drm/i915: Convert cdclk to global state
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / display / intel_display.c
1 /*
2  * Copyright © 2006-2007 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *      Eric Anholt <eric@anholt.net>
25  */
26
27 #include <linux/i2c.h>
28 #include <linux/input.h>
29 #include <linux/intel-iommu.h>
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/dma-resv.h>
33 #include <linux/slab.h>
34
35 #include <drm/drm_atomic.h>
36 #include <drm/drm_atomic_helper.h>
37 #include <drm/drm_atomic_uapi.h>
38 #include <drm/drm_dp_helper.h>
39 #include <drm/drm_edid.h>
40 #include <drm/drm_fourcc.h>
41 #include <drm/drm_plane_helper.h>
42 #include <drm/drm_probe_helper.h>
43 #include <drm/drm_rect.h>
44 #include <drm/i915_drm.h>
45
46 #include "display/intel_crt.h"
47 #include "display/intel_ddi.h"
48 #include "display/intel_dp.h"
49 #include "display/intel_dp_mst.h"
50 #include "display/intel_dsi.h"
51 #include "display/intel_dvo.h"
52 #include "display/intel_gmbus.h"
53 #include "display/intel_hdmi.h"
54 #include "display/intel_lvds.h"
55 #include "display/intel_sdvo.h"
56 #include "display/intel_tv.h"
57 #include "display/intel_vdsc.h"
58
59 #include "gt/intel_rps.h"
60
61 #include "i915_drv.h"
62 #include "i915_trace.h"
63 #include "intel_acpi.h"
64 #include "intel_atomic.h"
65 #include "intel_atomic_plane.h"
66 #include "intel_bw.h"
67 #include "intel_cdclk.h"
68 #include "intel_color.h"
69 #include "intel_display_types.h"
70 #include "intel_dp_link_training.h"
71 #include "intel_fbc.h"
72 #include "intel_fbdev.h"
73 #include "intel_fifo_underrun.h"
74 #include "intel_frontbuffer.h"
75 #include "intel_hdcp.h"
76 #include "intel_hotplug.h"
77 #include "intel_overlay.h"
78 #include "intel_pipe_crc.h"
79 #include "intel_pm.h"
80 #include "intel_psr.h"
81 #include "intel_quirks.h"
82 #include "intel_sideband.h"
83 #include "intel_sprite.h"
84 #include "intel_tc.h"
85 #include "intel_vga.h"
86
87 /* Primary plane formats for gen <= 3 */
88 static const u32 i8xx_primary_formats[] = {
89         DRM_FORMAT_C8,
90         DRM_FORMAT_XRGB1555,
91         DRM_FORMAT_RGB565,
92         DRM_FORMAT_XRGB8888,
93 };
94
95 /* Primary plane formats for ivb (no fp16 due to hw issue) */
96 static const u32 ivb_primary_formats[] = {
97         DRM_FORMAT_C8,
98         DRM_FORMAT_RGB565,
99         DRM_FORMAT_XRGB8888,
100         DRM_FORMAT_XBGR8888,
101         DRM_FORMAT_XRGB2101010,
102         DRM_FORMAT_XBGR2101010,
103 };
104
105 /* Primary plane formats for gen >= 4, except ivb */
106 static const u32 i965_primary_formats[] = {
107         DRM_FORMAT_C8,
108         DRM_FORMAT_RGB565,
109         DRM_FORMAT_XRGB8888,
110         DRM_FORMAT_XBGR8888,
111         DRM_FORMAT_XRGB2101010,
112         DRM_FORMAT_XBGR2101010,
113         DRM_FORMAT_XBGR16161616F,
114 };
115
116 /* Primary plane formats for vlv/chv */
117 static const u32 vlv_primary_formats[] = {
118         DRM_FORMAT_C8,
119         DRM_FORMAT_RGB565,
120         DRM_FORMAT_XRGB8888,
121         DRM_FORMAT_XBGR8888,
122         DRM_FORMAT_ARGB8888,
123         DRM_FORMAT_ABGR8888,
124         DRM_FORMAT_XRGB2101010,
125         DRM_FORMAT_XBGR2101010,
126         DRM_FORMAT_ARGB2101010,
127         DRM_FORMAT_ABGR2101010,
128         DRM_FORMAT_XBGR16161616F,
129 };
130
131 static const u64 i9xx_format_modifiers[] = {
132         I915_FORMAT_MOD_X_TILED,
133         DRM_FORMAT_MOD_LINEAR,
134         DRM_FORMAT_MOD_INVALID
135 };
136
137 /* Cursor formats */
138 static const u32 intel_cursor_formats[] = {
139         DRM_FORMAT_ARGB8888,
140 };
141
142 static const u64 cursor_format_modifiers[] = {
143         DRM_FORMAT_MOD_LINEAR,
144         DRM_FORMAT_MOD_INVALID
145 };
146
147 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
148                                 struct intel_crtc_state *pipe_config);
149 static void ilk_pch_clock_get(struct intel_crtc *crtc,
150                               struct intel_crtc_state *pipe_config);
151
152 static int intel_framebuffer_init(struct intel_framebuffer *ifb,
153                                   struct drm_i915_gem_object *obj,
154                                   struct drm_mode_fb_cmd2 *mode_cmd);
155 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state);
156 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
157 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
158                                          const struct intel_link_m_n *m_n,
159                                          const struct intel_link_m_n *m2_n2);
160 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
161 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state);
162 static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state);
163 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
164 static void vlv_prepare_pll(struct intel_crtc *crtc,
165                             const struct intel_crtc_state *pipe_config);
166 static void chv_prepare_pll(struct intel_crtc *crtc,
167                             const struct intel_crtc_state *pipe_config);
168 static void skl_pfit_enable(const struct intel_crtc_state *crtc_state);
169 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state);
170 static void intel_modeset_setup_hw_state(struct drm_device *dev,
171                                          struct drm_modeset_acquire_ctx *ctx);
172 static struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc);
173
174 struct intel_limit {
175         struct {
176                 int min, max;
177         } dot, vco, n, m, m1, m2, p, p1;
178
179         struct {
180                 int dot_limit;
181                 int p2_slow, p2_fast;
182         } p2;
183 };
184
185 /* returns HPLL frequency in kHz */
186 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
187 {
188         int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
189
190         /* Obtain SKU information */
191         hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
192                 CCK_FUSE_HPLL_FREQ_MASK;
193
194         return vco_freq[hpll_freq] * 1000;
195 }
196
197 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
198                       const char *name, u32 reg, int ref_freq)
199 {
200         u32 val;
201         int divider;
202
203         val = vlv_cck_read(dev_priv, reg);
204         divider = val & CCK_FREQUENCY_VALUES;
205
206         WARN((val & CCK_FREQUENCY_STATUS) !=
207              (divider << CCK_FREQUENCY_STATUS_SHIFT),
208              "%s change in progress\n", name);
209
210         return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
211 }
212
213 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
214                            const char *name, u32 reg)
215 {
216         int hpll;
217
218         vlv_cck_get(dev_priv);
219
220         if (dev_priv->hpll_freq == 0)
221                 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
222
223         hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq);
224
225         vlv_cck_put(dev_priv);
226
227         return hpll;
228 }
229
230 static void intel_update_czclk(struct drm_i915_private *dev_priv)
231 {
232         if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
233                 return;
234
235         dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
236                                                       CCK_CZ_CLOCK_CONTROL);
237
238         drm_dbg(&dev_priv->drm, "CZ clock rate: %d kHz\n",
239                 dev_priv->czclk_freq);
240 }
241
242 static inline u32 /* units of 100MHz */
243 intel_fdi_link_freq(struct drm_i915_private *dev_priv,
244                     const struct intel_crtc_state *pipe_config)
245 {
246         if (HAS_DDI(dev_priv))
247                 return pipe_config->port_clock; /* SPLL */
248         else
249                 return dev_priv->fdi_pll_freq;
250 }
251
252 static const struct intel_limit intel_limits_i8xx_dac = {
253         .dot = { .min = 25000, .max = 350000 },
254         .vco = { .min = 908000, .max = 1512000 },
255         .n = { .min = 2, .max = 16 },
256         .m = { .min = 96, .max = 140 },
257         .m1 = { .min = 18, .max = 26 },
258         .m2 = { .min = 6, .max = 16 },
259         .p = { .min = 4, .max = 128 },
260         .p1 = { .min = 2, .max = 33 },
261         .p2 = { .dot_limit = 165000,
262                 .p2_slow = 4, .p2_fast = 2 },
263 };
264
265 static const struct intel_limit intel_limits_i8xx_dvo = {
266         .dot = { .min = 25000, .max = 350000 },
267         .vco = { .min = 908000, .max = 1512000 },
268         .n = { .min = 2, .max = 16 },
269         .m = { .min = 96, .max = 140 },
270         .m1 = { .min = 18, .max = 26 },
271         .m2 = { .min = 6, .max = 16 },
272         .p = { .min = 4, .max = 128 },
273         .p1 = { .min = 2, .max = 33 },
274         .p2 = { .dot_limit = 165000,
275                 .p2_slow = 4, .p2_fast = 4 },
276 };
277
278 static const struct intel_limit intel_limits_i8xx_lvds = {
279         .dot = { .min = 25000, .max = 350000 },
280         .vco = { .min = 908000, .max = 1512000 },
281         .n = { .min = 2, .max = 16 },
282         .m = { .min = 96, .max = 140 },
283         .m1 = { .min = 18, .max = 26 },
284         .m2 = { .min = 6, .max = 16 },
285         .p = { .min = 4, .max = 128 },
286         .p1 = { .min = 1, .max = 6 },
287         .p2 = { .dot_limit = 165000,
288                 .p2_slow = 14, .p2_fast = 7 },
289 };
290
291 static const struct intel_limit intel_limits_i9xx_sdvo = {
292         .dot = { .min = 20000, .max = 400000 },
293         .vco = { .min = 1400000, .max = 2800000 },
294         .n = { .min = 1, .max = 6 },
295         .m = { .min = 70, .max = 120 },
296         .m1 = { .min = 8, .max = 18 },
297         .m2 = { .min = 3, .max = 7 },
298         .p = { .min = 5, .max = 80 },
299         .p1 = { .min = 1, .max = 8 },
300         .p2 = { .dot_limit = 200000,
301                 .p2_slow = 10, .p2_fast = 5 },
302 };
303
304 static const struct intel_limit intel_limits_i9xx_lvds = {
305         .dot = { .min = 20000, .max = 400000 },
306         .vco = { .min = 1400000, .max = 2800000 },
307         .n = { .min = 1, .max = 6 },
308         .m = { .min = 70, .max = 120 },
309         .m1 = { .min = 8, .max = 18 },
310         .m2 = { .min = 3, .max = 7 },
311         .p = { .min = 7, .max = 98 },
312         .p1 = { .min = 1, .max = 8 },
313         .p2 = { .dot_limit = 112000,
314                 .p2_slow = 14, .p2_fast = 7 },
315 };
316
317
318 static const struct intel_limit intel_limits_g4x_sdvo = {
319         .dot = { .min = 25000, .max = 270000 },
320         .vco = { .min = 1750000, .max = 3500000},
321         .n = { .min = 1, .max = 4 },
322         .m = { .min = 104, .max = 138 },
323         .m1 = { .min = 17, .max = 23 },
324         .m2 = { .min = 5, .max = 11 },
325         .p = { .min = 10, .max = 30 },
326         .p1 = { .min = 1, .max = 3},
327         .p2 = { .dot_limit = 270000,
328                 .p2_slow = 10,
329                 .p2_fast = 10
330         },
331 };
332
333 static const struct intel_limit intel_limits_g4x_hdmi = {
334         .dot = { .min = 22000, .max = 400000 },
335         .vco = { .min = 1750000, .max = 3500000},
336         .n = { .min = 1, .max = 4 },
337         .m = { .min = 104, .max = 138 },
338         .m1 = { .min = 16, .max = 23 },
339         .m2 = { .min = 5, .max = 11 },
340         .p = { .min = 5, .max = 80 },
341         .p1 = { .min = 1, .max = 8},
342         .p2 = { .dot_limit = 165000,
343                 .p2_slow = 10, .p2_fast = 5 },
344 };
345
346 static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
347         .dot = { .min = 20000, .max = 115000 },
348         .vco = { .min = 1750000, .max = 3500000 },
349         .n = { .min = 1, .max = 3 },
350         .m = { .min = 104, .max = 138 },
351         .m1 = { .min = 17, .max = 23 },
352         .m2 = { .min = 5, .max = 11 },
353         .p = { .min = 28, .max = 112 },
354         .p1 = { .min = 2, .max = 8 },
355         .p2 = { .dot_limit = 0,
356                 .p2_slow = 14, .p2_fast = 14
357         },
358 };
359
360 static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
361         .dot = { .min = 80000, .max = 224000 },
362         .vco = { .min = 1750000, .max = 3500000 },
363         .n = { .min = 1, .max = 3 },
364         .m = { .min = 104, .max = 138 },
365         .m1 = { .min = 17, .max = 23 },
366         .m2 = { .min = 5, .max = 11 },
367         .p = { .min = 14, .max = 42 },
368         .p1 = { .min = 2, .max = 6 },
369         .p2 = { .dot_limit = 0,
370                 .p2_slow = 7, .p2_fast = 7
371         },
372 };
373
374 static const struct intel_limit pnv_limits_sdvo = {
375         .dot = { .min = 20000, .max = 400000},
376         .vco = { .min = 1700000, .max = 3500000 },
377         /* Pineview's Ncounter is a ring counter */
378         .n = { .min = 3, .max = 6 },
379         .m = { .min = 2, .max = 256 },
380         /* Pineview only has one combined m divider, which we treat as m2. */
381         .m1 = { .min = 0, .max = 0 },
382         .m2 = { .min = 0, .max = 254 },
383         .p = { .min = 5, .max = 80 },
384         .p1 = { .min = 1, .max = 8 },
385         .p2 = { .dot_limit = 200000,
386                 .p2_slow = 10, .p2_fast = 5 },
387 };
388
389 static const struct intel_limit pnv_limits_lvds = {
390         .dot = { .min = 20000, .max = 400000 },
391         .vco = { .min = 1700000, .max = 3500000 },
392         .n = { .min = 3, .max = 6 },
393         .m = { .min = 2, .max = 256 },
394         .m1 = { .min = 0, .max = 0 },
395         .m2 = { .min = 0, .max = 254 },
396         .p = { .min = 7, .max = 112 },
397         .p1 = { .min = 1, .max = 8 },
398         .p2 = { .dot_limit = 112000,
399                 .p2_slow = 14, .p2_fast = 14 },
400 };
401
402 /* Ironlake / Sandybridge
403  *
404  * We calculate clock using (register_value + 2) for N/M1/M2, so here
405  * the range value for them is (actual_value - 2).
406  */
407 static const struct intel_limit ilk_limits_dac = {
408         .dot = { .min = 25000, .max = 350000 },
409         .vco = { .min = 1760000, .max = 3510000 },
410         .n = { .min = 1, .max = 5 },
411         .m = { .min = 79, .max = 127 },
412         .m1 = { .min = 12, .max = 22 },
413         .m2 = { .min = 5, .max = 9 },
414         .p = { .min = 5, .max = 80 },
415         .p1 = { .min = 1, .max = 8 },
416         .p2 = { .dot_limit = 225000,
417                 .p2_slow = 10, .p2_fast = 5 },
418 };
419
420 static const struct intel_limit ilk_limits_single_lvds = {
421         .dot = { .min = 25000, .max = 350000 },
422         .vco = { .min = 1760000, .max = 3510000 },
423         .n = { .min = 1, .max = 3 },
424         .m = { .min = 79, .max = 118 },
425         .m1 = { .min = 12, .max = 22 },
426         .m2 = { .min = 5, .max = 9 },
427         .p = { .min = 28, .max = 112 },
428         .p1 = { .min = 2, .max = 8 },
429         .p2 = { .dot_limit = 225000,
430                 .p2_slow = 14, .p2_fast = 14 },
431 };
432
433 static const struct intel_limit ilk_limits_dual_lvds = {
434         .dot = { .min = 25000, .max = 350000 },
435         .vco = { .min = 1760000, .max = 3510000 },
436         .n = { .min = 1, .max = 3 },
437         .m = { .min = 79, .max = 127 },
438         .m1 = { .min = 12, .max = 22 },
439         .m2 = { .min = 5, .max = 9 },
440         .p = { .min = 14, .max = 56 },
441         .p1 = { .min = 2, .max = 8 },
442         .p2 = { .dot_limit = 225000,
443                 .p2_slow = 7, .p2_fast = 7 },
444 };
445
446 /* LVDS 100mhz refclk limits. */
447 static const struct intel_limit ilk_limits_single_lvds_100m = {
448         .dot = { .min = 25000, .max = 350000 },
449         .vco = { .min = 1760000, .max = 3510000 },
450         .n = { .min = 1, .max = 2 },
451         .m = { .min = 79, .max = 126 },
452         .m1 = { .min = 12, .max = 22 },
453         .m2 = { .min = 5, .max = 9 },
454         .p = { .min = 28, .max = 112 },
455         .p1 = { .min = 2, .max = 8 },
456         .p2 = { .dot_limit = 225000,
457                 .p2_slow = 14, .p2_fast = 14 },
458 };
459
460 static const struct intel_limit ilk_limits_dual_lvds_100m = {
461         .dot = { .min = 25000, .max = 350000 },
462         .vco = { .min = 1760000, .max = 3510000 },
463         .n = { .min = 1, .max = 3 },
464         .m = { .min = 79, .max = 126 },
465         .m1 = { .min = 12, .max = 22 },
466         .m2 = { .min = 5, .max = 9 },
467         .p = { .min = 14, .max = 42 },
468         .p1 = { .min = 2, .max = 6 },
469         .p2 = { .dot_limit = 225000,
470                 .p2_slow = 7, .p2_fast = 7 },
471 };
472
473 static const struct intel_limit intel_limits_vlv = {
474          /*
475           * These are the data rate limits (measured in fast clocks)
476           * since those are the strictest limits we have. The fast
477           * clock and actual rate limits are more relaxed, so checking
478           * them would make no difference.
479           */
480         .dot = { .min = 25000 * 5, .max = 270000 * 5 },
481         .vco = { .min = 4000000, .max = 6000000 },
482         .n = { .min = 1, .max = 7 },
483         .m1 = { .min = 2, .max = 3 },
484         .m2 = { .min = 11, .max = 156 },
485         .p1 = { .min = 2, .max = 3 },
486         .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
487 };
488
489 static const struct intel_limit intel_limits_chv = {
490         /*
491          * These are the data rate limits (measured in fast clocks)
492          * since those are the strictest limits we have.  The fast
493          * clock and actual rate limits are more relaxed, so checking
494          * them would make no difference.
495          */
496         .dot = { .min = 25000 * 5, .max = 540000 * 5},
497         .vco = { .min = 4800000, .max = 6480000 },
498         .n = { .min = 1, .max = 1 },
499         .m1 = { .min = 2, .max = 2 },
500         .m2 = { .min = 24 << 22, .max = 175 << 22 },
501         .p1 = { .min = 2, .max = 4 },
502         .p2 = { .p2_slow = 1, .p2_fast = 14 },
503 };
504
505 static const struct intel_limit intel_limits_bxt = {
506         /* FIXME: find real dot limits */
507         .dot = { .min = 0, .max = INT_MAX },
508         .vco = { .min = 4800000, .max = 6700000 },
509         .n = { .min = 1, .max = 1 },
510         .m1 = { .min = 2, .max = 2 },
511         /* FIXME: find real m2 limits */
512         .m2 = { .min = 2 << 22, .max = 255 << 22 },
513         .p1 = { .min = 2, .max = 4 },
514         .p2 = { .p2_slow = 1, .p2_fast = 20 },
515 };
516
517 /* WA Display #0827: Gen9:all */
518 static void
519 skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
520 {
521         if (enable)
522                 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
523                                intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DUPS1_GATING_DIS | DUPS2_GATING_DIS);
524         else
525                 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
526                                intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
527 }
528
529 /* Wa_2006604312:icl */
530 static void
531 icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
532                        bool enable)
533 {
534         if (enable)
535                 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
536                                intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS);
537         else
538                 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
539                                intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
540 }
541
542 static bool
543 needs_modeset(const struct intel_crtc_state *state)
544 {
545         return drm_atomic_crtc_needs_modeset(&state->uapi);
546 }
547
548 bool
549 is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
550 {
551         return (crtc_state->master_transcoder != INVALID_TRANSCODER ||
552                 crtc_state->sync_mode_slaves_mask);
553 }
554
555 static bool
556 is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state)
557 {
558         return crtc_state->master_transcoder != INVALID_TRANSCODER;
559 }
560
561 /*
562  * Platform specific helpers to calculate the port PLL loopback- (clock.m),
563  * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
564  * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
565  * The helpers' return value is the rate of the clock that is fed to the
566  * display engine's pipe which can be the above fast dot clock rate or a
567  * divided-down version of it.
568  */
569 /* m1 is reserved as 0 in Pineview, n is a ring counter */
570 static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
571 {
572         clock->m = clock->m2 + 2;
573         clock->p = clock->p1 * clock->p2;
574         if (WARN_ON(clock->n == 0 || clock->p == 0))
575                 return 0;
576         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
577         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
578
579         return clock->dot;
580 }
581
582 static u32 i9xx_dpll_compute_m(struct dpll *dpll)
583 {
584         return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
585 }
586
587 static int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
588 {
589         clock->m = i9xx_dpll_compute_m(clock);
590         clock->p = clock->p1 * clock->p2;
591         if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
592                 return 0;
593         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
594         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
595
596         return clock->dot;
597 }
598
599 static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
600 {
601         clock->m = clock->m1 * clock->m2;
602         clock->p = clock->p1 * clock->p2;
603         if (WARN_ON(clock->n == 0 || clock->p == 0))
604                 return 0;
605         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
606         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
607
608         return clock->dot / 5;
609 }
610
611 int chv_calc_dpll_params(int refclk, struct dpll *clock)
612 {
613         clock->m = clock->m1 * clock->m2;
614         clock->p = clock->p1 * clock->p2;
615         if (WARN_ON(clock->n == 0 || clock->p == 0))
616                 return 0;
617         clock->vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock->m),
618                                            clock->n << 22);
619         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
620
621         return clock->dot / 5;
622 }
623
624 #define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
625
626 /*
627  * Returns whether the given set of divisors are valid for a given refclk with
628  * the given connectors.
629  */
630 static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv,
631                                const struct intel_limit *limit,
632                                const struct dpll *clock)
633 {
634         if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
635                 INTELPllInvalid("n out of range\n");
636         if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
637                 INTELPllInvalid("p1 out of range\n");
638         if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
639                 INTELPllInvalid("m2 out of range\n");
640         if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
641                 INTELPllInvalid("m1 out of range\n");
642
643         if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
644             !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv))
645                 if (clock->m1 <= clock->m2)
646                         INTELPllInvalid("m1 <= m2\n");
647
648         if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
649             !IS_GEN9_LP(dev_priv)) {
650                 if (clock->p < limit->p.min || limit->p.max < clock->p)
651                         INTELPllInvalid("p out of range\n");
652                 if (clock->m < limit->m.min || limit->m.max < clock->m)
653                         INTELPllInvalid("m out of range\n");
654         }
655
656         if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
657                 INTELPllInvalid("vco out of range\n");
658         /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
659          * connector, etc., rather than just a single range.
660          */
661         if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
662                 INTELPllInvalid("dot out of range\n");
663
664         return true;
665 }
666
667 static int
668 i9xx_select_p2_div(const struct intel_limit *limit,
669                    const struct intel_crtc_state *crtc_state,
670                    int target)
671 {
672         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
673
674         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
675                 /*
676                  * For LVDS just rely on its current settings for dual-channel.
677                  * We haven't figured out how to reliably set up different
678                  * single/dual channel state, if we even can.
679                  */
680                 if (intel_is_dual_link_lvds(dev_priv))
681                         return limit->p2.p2_fast;
682                 else
683                         return limit->p2.p2_slow;
684         } else {
685                 if (target < limit->p2.dot_limit)
686                         return limit->p2.p2_slow;
687                 else
688                         return limit->p2.p2_fast;
689         }
690 }
691
692 /*
693  * Returns a set of divisors for the desired target clock with the given
694  * refclk, or FALSE.  The returned values represent the clock equation:
695  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
696  *
697  * Target and reference clocks are specified in kHz.
698  *
699  * If match_clock is provided, then best_clock P divider must match the P
700  * divider from @match_clock used for LVDS downclocking.
701  */
702 static bool
703 i9xx_find_best_dpll(const struct intel_limit *limit,
704                     struct intel_crtc_state *crtc_state,
705                     int target, int refclk, struct dpll *match_clock,
706                     struct dpll *best_clock)
707 {
708         struct drm_device *dev = crtc_state->uapi.crtc->dev;
709         struct dpll clock;
710         int err = target;
711
712         memset(best_clock, 0, sizeof(*best_clock));
713
714         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
715
716         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
717              clock.m1++) {
718                 for (clock.m2 = limit->m2.min;
719                      clock.m2 <= limit->m2.max; clock.m2++) {
720                         if (clock.m2 >= clock.m1)
721                                 break;
722                         for (clock.n = limit->n.min;
723                              clock.n <= limit->n.max; clock.n++) {
724                                 for (clock.p1 = limit->p1.min;
725                                         clock.p1 <= limit->p1.max; clock.p1++) {
726                                         int this_err;
727
728                                         i9xx_calc_dpll_params(refclk, &clock);
729                                         if (!intel_PLL_is_valid(to_i915(dev),
730                                                                 limit,
731                                                                 &clock))
732                                                 continue;
733                                         if (match_clock &&
734                                             clock.p != match_clock->p)
735                                                 continue;
736
737                                         this_err = abs(clock.dot - target);
738                                         if (this_err < err) {
739                                                 *best_clock = clock;
740                                                 err = this_err;
741                                         }
742                                 }
743                         }
744                 }
745         }
746
747         return (err != target);
748 }
749
750 /*
751  * Returns a set of divisors for the desired target clock with the given
752  * refclk, or FALSE.  The returned values represent the clock equation:
753  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
754  *
755  * Target and reference clocks are specified in kHz.
756  *
757  * If match_clock is provided, then best_clock P divider must match the P
758  * divider from @match_clock used for LVDS downclocking.
759  */
760 static bool
761 pnv_find_best_dpll(const struct intel_limit *limit,
762                    struct intel_crtc_state *crtc_state,
763                    int target, int refclk, struct dpll *match_clock,
764                    struct dpll *best_clock)
765 {
766         struct drm_device *dev = crtc_state->uapi.crtc->dev;
767         struct dpll clock;
768         int err = target;
769
770         memset(best_clock, 0, sizeof(*best_clock));
771
772         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
773
774         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
775              clock.m1++) {
776                 for (clock.m2 = limit->m2.min;
777                      clock.m2 <= limit->m2.max; clock.m2++) {
778                         for (clock.n = limit->n.min;
779                              clock.n <= limit->n.max; clock.n++) {
780                                 for (clock.p1 = limit->p1.min;
781                                         clock.p1 <= limit->p1.max; clock.p1++) {
782                                         int this_err;
783
784                                         pnv_calc_dpll_params(refclk, &clock);
785                                         if (!intel_PLL_is_valid(to_i915(dev),
786                                                                 limit,
787                                                                 &clock))
788                                                 continue;
789                                         if (match_clock &&
790                                             clock.p != match_clock->p)
791                                                 continue;
792
793                                         this_err = abs(clock.dot - target);
794                                         if (this_err < err) {
795                                                 *best_clock = clock;
796                                                 err = this_err;
797                                         }
798                                 }
799                         }
800                 }
801         }
802
803         return (err != target);
804 }
805
806 /*
807  * Returns a set of divisors for the desired target clock with the given
808  * refclk, or FALSE.  The returned values represent the clock equation:
809  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
810  *
811  * Target and reference clocks are specified in kHz.
812  *
813  * If match_clock is provided, then best_clock P divider must match the P
814  * divider from @match_clock used for LVDS downclocking.
815  */
816 static bool
817 g4x_find_best_dpll(const struct intel_limit *limit,
818                    struct intel_crtc_state *crtc_state,
819                    int target, int refclk, struct dpll *match_clock,
820                    struct dpll *best_clock)
821 {
822         struct drm_device *dev = crtc_state->uapi.crtc->dev;
823         struct dpll clock;
824         int max_n;
825         bool found = false;
826         /* approximately equals target * 0.00585 */
827         int err_most = (target >> 8) + (target >> 9);
828
829         memset(best_clock, 0, sizeof(*best_clock));
830
831         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
832
833         max_n = limit->n.max;
834         /* based on hardware requirement, prefer smaller n to precision */
835         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
836                 /* based on hardware requirement, prefere larger m1,m2 */
837                 for (clock.m1 = limit->m1.max;
838                      clock.m1 >= limit->m1.min; clock.m1--) {
839                         for (clock.m2 = limit->m2.max;
840                              clock.m2 >= limit->m2.min; clock.m2--) {
841                                 for (clock.p1 = limit->p1.max;
842                                      clock.p1 >= limit->p1.min; clock.p1--) {
843                                         int this_err;
844
845                                         i9xx_calc_dpll_params(refclk, &clock);
846                                         if (!intel_PLL_is_valid(to_i915(dev),
847                                                                 limit,
848                                                                 &clock))
849                                                 continue;
850
851                                         this_err = abs(clock.dot - target);
852                                         if (this_err < err_most) {
853                                                 *best_clock = clock;
854                                                 err_most = this_err;
855                                                 max_n = clock.n;
856                                                 found = true;
857                                         }
858                                 }
859                         }
860                 }
861         }
862         return found;
863 }
864
865 /*
866  * Check if the calculated PLL configuration is more optimal compared to the
867  * best configuration and error found so far. Return the calculated error.
868  */
869 static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
870                                const struct dpll *calculated_clock,
871                                const struct dpll *best_clock,
872                                unsigned int best_error_ppm,
873                                unsigned int *error_ppm)
874 {
875         /*
876          * For CHV ignore the error and consider only the P value.
877          * Prefer a bigger P value based on HW requirements.
878          */
879         if (IS_CHERRYVIEW(to_i915(dev))) {
880                 *error_ppm = 0;
881
882                 return calculated_clock->p > best_clock->p;
883         }
884
885         if (WARN_ON_ONCE(!target_freq))
886                 return false;
887
888         *error_ppm = div_u64(1000000ULL *
889                                 abs(target_freq - calculated_clock->dot),
890                              target_freq);
891         /*
892          * Prefer a better P value over a better (smaller) error if the error
893          * is small. Ensure this preference for future configurations too by
894          * setting the error to 0.
895          */
896         if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
897                 *error_ppm = 0;
898
899                 return true;
900         }
901
902         return *error_ppm + 10 < best_error_ppm;
903 }
904
905 /*
906  * Returns a set of divisors for the desired target clock with the given
907  * refclk, or FALSE.  The returned values represent the clock equation:
908  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
909  */
910 static bool
911 vlv_find_best_dpll(const struct intel_limit *limit,
912                    struct intel_crtc_state *crtc_state,
913                    int target, int refclk, struct dpll *match_clock,
914                    struct dpll *best_clock)
915 {
916         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
917         struct drm_device *dev = crtc->base.dev;
918         struct dpll clock;
919         unsigned int bestppm = 1000000;
920         /* min update 19.2 MHz */
921         int max_n = min(limit->n.max, refclk / 19200);
922         bool found = false;
923
924         target *= 5; /* fast clock */
925
926         memset(best_clock, 0, sizeof(*best_clock));
927
928         /* based on hardware requirement, prefer smaller n to precision */
929         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
930                 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
931                         for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
932                              clock.p2 -= clock.p2 > 10 ? 2 : 1) {
933                                 clock.p = clock.p1 * clock.p2;
934                                 /* based on hardware requirement, prefer bigger m1,m2 values */
935                                 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
936                                         unsigned int ppm;
937
938                                         clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
939                                                                      refclk * clock.m1);
940
941                                         vlv_calc_dpll_params(refclk, &clock);
942
943                                         if (!intel_PLL_is_valid(to_i915(dev),
944                                                                 limit,
945                                                                 &clock))
946                                                 continue;
947
948                                         if (!vlv_PLL_is_optimal(dev, target,
949                                                                 &clock,
950                                                                 best_clock,
951                                                                 bestppm, &ppm))
952                                                 continue;
953
954                                         *best_clock = clock;
955                                         bestppm = ppm;
956                                         found = true;
957                                 }
958                         }
959                 }
960         }
961
962         return found;
963 }
964
965 /*
966  * Returns a set of divisors for the desired target clock with the given
967  * refclk, or FALSE.  The returned values represent the clock equation:
968  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
969  */
970 static bool
971 chv_find_best_dpll(const struct intel_limit *limit,
972                    struct intel_crtc_state *crtc_state,
973                    int target, int refclk, struct dpll *match_clock,
974                    struct dpll *best_clock)
975 {
976         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
977         struct drm_device *dev = crtc->base.dev;
978         unsigned int best_error_ppm;
979         struct dpll clock;
980         u64 m2;
981         int found = false;
982
983         memset(best_clock, 0, sizeof(*best_clock));
984         best_error_ppm = 1000000;
985
986         /*
987          * Based on hardware doc, the n always set to 1, and m1 always
988          * set to 2.  If requires to support 200Mhz refclk, we need to
989          * revisit this because n may not 1 anymore.
990          */
991         clock.n = 1, clock.m1 = 2;
992         target *= 5;    /* fast clock */
993
994         for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
995                 for (clock.p2 = limit->p2.p2_fast;
996                                 clock.p2 >= limit->p2.p2_slow;
997                                 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
998                         unsigned int error_ppm;
999
1000                         clock.p = clock.p1 * clock.p2;
1001
1002                         m2 = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(target, clock.p * clock.n) << 22,
1003                                                    refclk * clock.m1);
1004
1005                         if (m2 > INT_MAX/clock.m1)
1006                                 continue;
1007
1008                         clock.m2 = m2;
1009
1010                         chv_calc_dpll_params(refclk, &clock);
1011
1012                         if (!intel_PLL_is_valid(to_i915(dev), limit, &clock))
1013                                 continue;
1014
1015                         if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
1016                                                 best_error_ppm, &error_ppm))
1017                                 continue;
1018
1019                         *best_clock = clock;
1020                         best_error_ppm = error_ppm;
1021                         found = true;
1022                 }
1023         }
1024
1025         return found;
1026 }
1027
1028 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
1029                         struct dpll *best_clock)
1030 {
1031         int refclk = 100000;
1032         const struct intel_limit *limit = &intel_limits_bxt;
1033
1034         return chv_find_best_dpll(limit, crtc_state,
1035                                   crtc_state->port_clock, refclk,
1036                                   NULL, best_clock);
1037 }
1038
1039 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
1040                                     enum pipe pipe)
1041 {
1042         i915_reg_t reg = PIPEDSL(pipe);
1043         u32 line1, line2;
1044         u32 line_mask;
1045
1046         if (IS_GEN(dev_priv, 2))
1047                 line_mask = DSL_LINEMASK_GEN2;
1048         else
1049                 line_mask = DSL_LINEMASK_GEN3;
1050
1051         line1 = intel_de_read(dev_priv, reg) & line_mask;
1052         msleep(5);
1053         line2 = intel_de_read(dev_priv, reg) & line_mask;
1054
1055         return line1 != line2;
1056 }
1057
1058 static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
1059 {
1060         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1061         enum pipe pipe = crtc->pipe;
1062
1063         /* Wait for the display line to settle/start moving */
1064         if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
1065                 drm_err(&dev_priv->drm,
1066                         "pipe %c scanline %s wait timed out\n",
1067                         pipe_name(pipe), onoff(state));
1068 }
1069
1070 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
1071 {
1072         wait_for_pipe_scanline_moving(crtc, false);
1073 }
1074
1075 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
1076 {
1077         wait_for_pipe_scanline_moving(crtc, true);
1078 }
1079
1080 static void
1081 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
1082 {
1083         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
1084         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1085
1086         if (INTEL_GEN(dev_priv) >= 4) {
1087                 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1088                 i915_reg_t reg = PIPECONF(cpu_transcoder);
1089
1090                 /* Wait for the Pipe State to go off */
1091                 if (intel_de_wait_for_clear(dev_priv, reg,
1092                                             I965_PIPECONF_ACTIVE, 100))
1093                         WARN(1, "pipe_off wait timed out\n");
1094         } else {
1095                 intel_wait_for_pipe_scanline_stopped(crtc);
1096         }
1097 }
1098
1099 /* Only for pre-ILK configs */
1100 void assert_pll(struct drm_i915_private *dev_priv,
1101                 enum pipe pipe, bool state)
1102 {
1103         u32 val;
1104         bool cur_state;
1105
1106         val = intel_de_read(dev_priv, DPLL(pipe));
1107         cur_state = !!(val & DPLL_VCO_ENABLE);
1108         I915_STATE_WARN(cur_state != state,
1109              "PLL state assertion failure (expected %s, current %s)\n",
1110                         onoff(state), onoff(cur_state));
1111 }
1112
1113 /* XXX: the dsi pll is shared between MIPI DSI ports */
1114 void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
1115 {
1116         u32 val;
1117         bool cur_state;
1118
1119         vlv_cck_get(dev_priv);
1120         val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
1121         vlv_cck_put(dev_priv);
1122
1123         cur_state = val & DSI_PLL_VCO_EN;
1124         I915_STATE_WARN(cur_state != state,
1125              "DSI PLL state assertion failure (expected %s, current %s)\n",
1126                         onoff(state), onoff(cur_state));
1127 }
1128
1129 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1130                           enum pipe pipe, bool state)
1131 {
1132         bool cur_state;
1133
1134         if (HAS_DDI(dev_priv)) {
1135                 /*
1136                  * DDI does not have a specific FDI_TX register.
1137                  *
1138                  * FDI is never fed from EDP transcoder
1139                  * so pipe->transcoder cast is fine here.
1140                  */
1141                 enum transcoder cpu_transcoder = (enum transcoder)pipe;
1142                 u32 val = intel_de_read(dev_priv,
1143                                         TRANS_DDI_FUNC_CTL(cpu_transcoder));
1144                 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
1145         } else {
1146                 u32 val = intel_de_read(dev_priv, FDI_TX_CTL(pipe));
1147                 cur_state = !!(val & FDI_TX_ENABLE);
1148         }
1149         I915_STATE_WARN(cur_state != state,
1150              "FDI TX state assertion failure (expected %s, current %s)\n",
1151                         onoff(state), onoff(cur_state));
1152 }
1153 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1154 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1155
1156 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1157                           enum pipe pipe, bool state)
1158 {
1159         u32 val;
1160         bool cur_state;
1161
1162         val = intel_de_read(dev_priv, FDI_RX_CTL(pipe));
1163         cur_state = !!(val & FDI_RX_ENABLE);
1164         I915_STATE_WARN(cur_state != state,
1165              "FDI RX state assertion failure (expected %s, current %s)\n",
1166                         onoff(state), onoff(cur_state));
1167 }
1168 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1169 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1170
1171 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1172                                       enum pipe pipe)
1173 {
1174         u32 val;
1175
1176         /* ILK FDI PLL is always enabled */
1177         if (IS_GEN(dev_priv, 5))
1178                 return;
1179
1180         /* On Haswell, DDI ports are responsible for the FDI PLL setup */
1181         if (HAS_DDI(dev_priv))
1182                 return;
1183
1184         val = intel_de_read(dev_priv, FDI_TX_CTL(pipe));
1185         I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1186 }
1187
1188 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1189                        enum pipe pipe, bool state)
1190 {
1191         u32 val;
1192         bool cur_state;
1193
1194         val = intel_de_read(dev_priv, FDI_RX_CTL(pipe));
1195         cur_state = !!(val & FDI_RX_PLL_ENABLE);
1196         I915_STATE_WARN(cur_state != state,
1197              "FDI RX PLL assertion failure (expected %s, current %s)\n",
1198                         onoff(state), onoff(cur_state));
1199 }
1200
1201 void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
1202 {
1203         i915_reg_t pp_reg;
1204         u32 val;
1205         enum pipe panel_pipe = INVALID_PIPE;
1206         bool locked = true;
1207
1208         if (WARN_ON(HAS_DDI(dev_priv)))
1209                 return;
1210
1211         if (HAS_PCH_SPLIT(dev_priv)) {
1212                 u32 port_sel;
1213
1214                 pp_reg = PP_CONTROL(0);
1215                 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1216
1217                 switch (port_sel) {
1218                 case PANEL_PORT_SELECT_LVDS:
1219                         intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
1220                         break;
1221                 case PANEL_PORT_SELECT_DPA:
1222                         intel_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
1223                         break;
1224                 case PANEL_PORT_SELECT_DPC:
1225                         intel_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
1226                         break;
1227                 case PANEL_PORT_SELECT_DPD:
1228                         intel_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
1229                         break;
1230                 default:
1231                         MISSING_CASE(port_sel);
1232                         break;
1233                 }
1234         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1235                 /* presumably write lock depends on pipe, not port select */
1236                 pp_reg = PP_CONTROL(pipe);
1237                 panel_pipe = pipe;
1238         } else {
1239                 u32 port_sel;
1240
1241                 pp_reg = PP_CONTROL(0);
1242                 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1243
1244                 WARN_ON(port_sel != PANEL_PORT_SELECT_LVDS);
1245                 intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
1246         }
1247
1248         val = intel_de_read(dev_priv, pp_reg);
1249         if (!(val & PANEL_POWER_ON) ||
1250             ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
1251                 locked = false;
1252
1253         I915_STATE_WARN(panel_pipe == pipe && locked,
1254              "panel assertion failure, pipe %c regs locked\n",
1255              pipe_name(pipe));
1256 }
1257
1258 void assert_pipe(struct drm_i915_private *dev_priv,
1259                  enum transcoder cpu_transcoder, bool state)
1260 {
1261         bool cur_state;
1262         enum intel_display_power_domain power_domain;
1263         intel_wakeref_t wakeref;
1264
1265         /* we keep both pipes enabled on 830 */
1266         if (IS_I830(dev_priv))
1267                 state = true;
1268
1269         power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
1270         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
1271         if (wakeref) {
1272                 u32 val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
1273                 cur_state = !!(val & PIPECONF_ENABLE);
1274
1275                 intel_display_power_put(dev_priv, power_domain, wakeref);
1276         } else {
1277                 cur_state = false;
1278         }
1279
1280         I915_STATE_WARN(cur_state != state,
1281                         "transcoder %s assertion failure (expected %s, current %s)\n",
1282                         transcoder_name(cpu_transcoder),
1283                         onoff(state), onoff(cur_state));
1284 }
1285
1286 static void assert_plane(struct intel_plane *plane, bool state)
1287 {
1288         enum pipe pipe;
1289         bool cur_state;
1290
1291         cur_state = plane->get_hw_state(plane, &pipe);
1292
1293         I915_STATE_WARN(cur_state != state,
1294                         "%s assertion failure (expected %s, current %s)\n",
1295                         plane->base.name, onoff(state), onoff(cur_state));
1296 }
1297
1298 #define assert_plane_enabled(p) assert_plane(p, true)
1299 #define assert_plane_disabled(p) assert_plane(p, false)
1300
1301 static void assert_planes_disabled(struct intel_crtc *crtc)
1302 {
1303         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1304         struct intel_plane *plane;
1305
1306         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
1307                 assert_plane_disabled(plane);
1308 }
1309
1310 static void assert_vblank_disabled(struct drm_crtc *crtc)
1311 {
1312         if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
1313                 drm_crtc_vblank_put(crtc);
1314 }
1315
1316 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1317                                     enum pipe pipe)
1318 {
1319         u32 val;
1320         bool enabled;
1321
1322         val = intel_de_read(dev_priv, PCH_TRANSCONF(pipe));
1323         enabled = !!(val & TRANS_ENABLE);
1324         I915_STATE_WARN(enabled,
1325              "transcoder assertion failed, should be off on pipe %c but is still active\n",
1326              pipe_name(pipe));
1327 }
1328
1329 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1330                                    enum pipe pipe, enum port port,
1331                                    i915_reg_t dp_reg)
1332 {
1333         enum pipe port_pipe;
1334         bool state;
1335
1336         state = intel_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
1337
1338         I915_STATE_WARN(state && port_pipe == pipe,
1339                         "PCH DP %c enabled on transcoder %c, should be disabled\n",
1340                         port_name(port), pipe_name(pipe));
1341
1342         I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1343                         "IBX PCH DP %c still using transcoder B\n",
1344                         port_name(port));
1345 }
1346
1347 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1348                                      enum pipe pipe, enum port port,
1349                                      i915_reg_t hdmi_reg)
1350 {
1351         enum pipe port_pipe;
1352         bool state;
1353
1354         state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
1355
1356         I915_STATE_WARN(state && port_pipe == pipe,
1357                         "PCH HDMI %c enabled on transcoder %c, should be disabled\n",
1358                         port_name(port), pipe_name(pipe));
1359
1360         I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1361                         "IBX PCH HDMI %c still using transcoder B\n",
1362                         port_name(port));
1363 }
1364
1365 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1366                                       enum pipe pipe)
1367 {
1368         enum pipe port_pipe;
1369
1370         assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
1371         assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
1372         assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
1373
1374         I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) &&
1375                         port_pipe == pipe,
1376                         "PCH VGA enabled on transcoder %c, should be disabled\n",
1377                         pipe_name(pipe));
1378
1379         I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) &&
1380                         port_pipe == pipe,
1381                         "PCH LVDS enabled on transcoder %c, should be disabled\n",
1382                         pipe_name(pipe));
1383
1384         /* PCH SDVOB multiplex with HDMIB */
1385         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
1386         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
1387         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
1388 }
1389
1390 static void _vlv_enable_pll(struct intel_crtc *crtc,
1391                             const struct intel_crtc_state *pipe_config)
1392 {
1393         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1394         enum pipe pipe = crtc->pipe;
1395
1396         intel_de_write(dev_priv, DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1397         intel_de_posting_read(dev_priv, DPLL(pipe));
1398         udelay(150);
1399
1400         if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
1401                 drm_err(&dev_priv->drm, "DPLL %d failed to lock\n", pipe);
1402 }
1403
1404 static void vlv_enable_pll(struct intel_crtc *crtc,
1405                            const struct intel_crtc_state *pipe_config)
1406 {
1407         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1408         enum pipe pipe = crtc->pipe;
1409
1410         assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
1411
1412         /* PLL is protected by panel, make sure we can write it */
1413         assert_panel_unlocked(dev_priv, pipe);
1414
1415         if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1416                 _vlv_enable_pll(crtc, pipe_config);
1417
1418         intel_de_write(dev_priv, DPLL_MD(pipe),
1419                        pipe_config->dpll_hw_state.dpll_md);
1420         intel_de_posting_read(dev_priv, DPLL_MD(pipe));
1421 }
1422
1423
1424 static void _chv_enable_pll(struct intel_crtc *crtc,
1425                             const struct intel_crtc_state *pipe_config)
1426 {
1427         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1428         enum pipe pipe = crtc->pipe;
1429         enum dpio_channel port = vlv_pipe_to_channel(pipe);
1430         u32 tmp;
1431
1432         vlv_dpio_get(dev_priv);
1433
1434         /* Enable back the 10bit clock to display controller */
1435         tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1436         tmp |= DPIO_DCLKP_EN;
1437         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1438
1439         vlv_dpio_put(dev_priv);
1440
1441         /*
1442          * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1443          */
1444         udelay(1);
1445
1446         /* Enable PLL */
1447         intel_de_write(dev_priv, DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1448
1449         /* Check PLL is locked */
1450         if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
1451                 drm_err(&dev_priv->drm, "PLL %d failed to lock\n", pipe);
1452 }
1453
1454 static void chv_enable_pll(struct intel_crtc *crtc,
1455                            const struct intel_crtc_state *pipe_config)
1456 {
1457         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1458         enum pipe pipe = crtc->pipe;
1459
1460         assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
1461
1462         /* PLL is protected by panel, make sure we can write it */
1463         assert_panel_unlocked(dev_priv, pipe);
1464
1465         if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1466                 _chv_enable_pll(crtc, pipe_config);
1467
1468         if (pipe != PIPE_A) {
1469                 /*
1470                  * WaPixelRepeatModeFixForC0:chv
1471                  *
1472                  * DPLLCMD is AWOL. Use chicken bits to propagate
1473                  * the value from DPLLBMD to either pipe B or C.
1474                  */
1475                 intel_de_write(dev_priv, CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
1476                 intel_de_write(dev_priv, DPLL_MD(PIPE_B),
1477                                pipe_config->dpll_hw_state.dpll_md);
1478                 intel_de_write(dev_priv, CBR4_VLV, 0);
1479                 dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
1480
1481                 /*
1482                  * DPLLB VGA mode also seems to cause problems.
1483                  * We should always have it disabled.
1484                  */
1485                 WARN_ON((intel_de_read(dev_priv, DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0);
1486         } else {
1487                 intel_de_write(dev_priv, DPLL_MD(pipe),
1488                                pipe_config->dpll_hw_state.dpll_md);
1489                 intel_de_posting_read(dev_priv, DPLL_MD(pipe));
1490         }
1491 }
1492
1493 static bool i9xx_has_pps(struct drm_i915_private *dev_priv)
1494 {
1495         if (IS_I830(dev_priv))
1496                 return false;
1497
1498         return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
1499 }
1500
1501 static void i9xx_enable_pll(struct intel_crtc *crtc,
1502                             const struct intel_crtc_state *crtc_state)
1503 {
1504         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1505         i915_reg_t reg = DPLL(crtc->pipe);
1506         u32 dpll = crtc_state->dpll_hw_state.dpll;
1507         int i;
1508
1509         assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
1510
1511         /* PLL is protected by panel, make sure we can write it */
1512         if (i9xx_has_pps(dev_priv))
1513                 assert_panel_unlocked(dev_priv, crtc->pipe);
1514
1515         /*
1516          * Apparently we need to have VGA mode enabled prior to changing
1517          * the P1/P2 dividers. Otherwise the DPLL will keep using the old
1518          * dividers, even though the register value does change.
1519          */
1520         intel_de_write(dev_priv, reg, dpll & ~DPLL_VGA_MODE_DIS);
1521         intel_de_write(dev_priv, reg, dpll);
1522
1523         /* Wait for the clocks to stabilize. */
1524         intel_de_posting_read(dev_priv, reg);
1525         udelay(150);
1526
1527         if (INTEL_GEN(dev_priv) >= 4) {
1528                 intel_de_write(dev_priv, DPLL_MD(crtc->pipe),
1529                                crtc_state->dpll_hw_state.dpll_md);
1530         } else {
1531                 /* The pixel multiplier can only be updated once the
1532                  * DPLL is enabled and the clocks are stable.
1533                  *
1534                  * So write it again.
1535                  */
1536                 intel_de_write(dev_priv, reg, dpll);
1537         }
1538
1539         /* We do this three times for luck */
1540         for (i = 0; i < 3; i++) {
1541                 intel_de_write(dev_priv, reg, dpll);
1542                 intel_de_posting_read(dev_priv, reg);
1543                 udelay(150); /* wait for warmup */
1544         }
1545 }
1546
1547 static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
1548 {
1549         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1550         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1551         enum pipe pipe = crtc->pipe;
1552
1553         /* Don't disable pipe or pipe PLLs if needed */
1554         if (IS_I830(dev_priv))
1555                 return;
1556
1557         /* Make sure the pipe isn't still relying on us */
1558         assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
1559
1560         intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS);
1561         intel_de_posting_read(dev_priv, DPLL(pipe));
1562 }
1563
1564 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1565 {
1566         u32 val;
1567
1568         /* Make sure the pipe isn't still relying on us */
1569         assert_pipe_disabled(dev_priv, (enum transcoder)pipe);
1570
1571         val = DPLL_INTEGRATED_REF_CLK_VLV |
1572                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1573         if (pipe != PIPE_A)
1574                 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1575
1576         intel_de_write(dev_priv, DPLL(pipe), val);
1577         intel_de_posting_read(dev_priv, DPLL(pipe));
1578 }
1579
1580 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1581 {
1582         enum dpio_channel port = vlv_pipe_to_channel(pipe);
1583         u32 val;
1584
1585         /* Make sure the pipe isn't still relying on us */
1586         assert_pipe_disabled(dev_priv, (enum transcoder)pipe);
1587
1588         val = DPLL_SSC_REF_CLK_CHV |
1589                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1590         if (pipe != PIPE_A)
1591                 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1592
1593         intel_de_write(dev_priv, DPLL(pipe), val);
1594         intel_de_posting_read(dev_priv, DPLL(pipe));
1595
1596         vlv_dpio_get(dev_priv);
1597
1598         /* Disable 10bit clock to display controller */
1599         val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1600         val &= ~DPIO_DCLKP_EN;
1601         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1602
1603         vlv_dpio_put(dev_priv);
1604 }
1605
1606 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1607                          struct intel_digital_port *dport,
1608                          unsigned int expected_mask)
1609 {
1610         u32 port_mask;
1611         i915_reg_t dpll_reg;
1612
1613         switch (dport->base.port) {
1614         case PORT_B:
1615                 port_mask = DPLL_PORTB_READY_MASK;
1616                 dpll_reg = DPLL(0);
1617                 break;
1618         case PORT_C:
1619                 port_mask = DPLL_PORTC_READY_MASK;
1620                 dpll_reg = DPLL(0);
1621                 expected_mask <<= 4;
1622                 break;
1623         case PORT_D:
1624                 port_mask = DPLL_PORTD_READY_MASK;
1625                 dpll_reg = DPIO_PHY_STATUS;
1626                 break;
1627         default:
1628                 BUG();
1629         }
1630
1631         if (intel_de_wait_for_register(dev_priv, dpll_reg,
1632                                        port_mask, expected_mask, 1000))
1633                 WARN(1, "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
1634                      dport->base.base.base.id, dport->base.base.name,
1635                      intel_de_read(dev_priv, dpll_reg) & port_mask,
1636                      expected_mask);
1637 }
1638
1639 static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
1640 {
1641         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1642         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1643         enum pipe pipe = crtc->pipe;
1644         i915_reg_t reg;
1645         u32 val, pipeconf_val;
1646
1647         /* Make sure PCH DPLL is enabled */
1648         assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll);
1649
1650         /* FDI must be feeding us bits for PCH ports */
1651         assert_fdi_tx_enabled(dev_priv, pipe);
1652         assert_fdi_rx_enabled(dev_priv, pipe);
1653
1654         if (HAS_PCH_CPT(dev_priv)) {
1655                 reg = TRANS_CHICKEN2(pipe);
1656                 val = intel_de_read(dev_priv, reg);
1657                 /*
1658                  * Workaround: Set the timing override bit
1659                  * before enabling the pch transcoder.
1660                  */
1661                 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1662                 /* Configure frame start delay to match the CPU */
1663                 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
1664                 val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
1665                 intel_de_write(dev_priv, reg, val);
1666         }
1667
1668         reg = PCH_TRANSCONF(pipe);
1669         val = intel_de_read(dev_priv, reg);
1670         pipeconf_val = intel_de_read(dev_priv, PIPECONF(pipe));
1671
1672         if (HAS_PCH_IBX(dev_priv)) {
1673                 /* Configure frame start delay to match the CPU */
1674                 val &= ~TRANS_FRAME_START_DELAY_MASK;
1675                 val |= TRANS_FRAME_START_DELAY(0);
1676
1677                 /*
1678                  * Make the BPC in transcoder be consistent with
1679                  * that in pipeconf reg. For HDMI we must use 8bpc
1680                  * here for both 8bpc and 12bpc.
1681                  */
1682                 val &= ~PIPECONF_BPC_MASK;
1683                 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1684                         val |= PIPECONF_8BPC;
1685                 else
1686                         val |= pipeconf_val & PIPECONF_BPC_MASK;
1687         }
1688
1689         val &= ~TRANS_INTERLACE_MASK;
1690         if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) {
1691                 if (HAS_PCH_IBX(dev_priv) &&
1692                     intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
1693                         val |= TRANS_LEGACY_INTERLACED_ILK;
1694                 else
1695                         val |= TRANS_INTERLACED;
1696         } else {
1697                 val |= TRANS_PROGRESSIVE;
1698         }
1699
1700         intel_de_write(dev_priv, reg, val | TRANS_ENABLE);
1701         if (intel_de_wait_for_set(dev_priv, reg, TRANS_STATE_ENABLE, 100))
1702                 drm_err(&dev_priv->drm, "failed to enable transcoder %c\n",
1703                         pipe_name(pipe));
1704 }
1705
1706 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1707                                       enum transcoder cpu_transcoder)
1708 {
1709         u32 val, pipeconf_val;
1710
1711         /* FDI must be feeding us bits for PCH ports */
1712         assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
1713         assert_fdi_rx_enabled(dev_priv, PIPE_A);
1714
1715         val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
1716         /* Workaround: set timing override bit. */
1717         val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1718         /* Configure frame start delay to match the CPU */
1719         val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
1720         val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
1721         intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
1722
1723         val = TRANS_ENABLE;
1724         pipeconf_val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
1725
1726         if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1727             PIPECONF_INTERLACED_ILK)
1728                 val |= TRANS_INTERLACED;
1729         else
1730                 val |= TRANS_PROGRESSIVE;
1731
1732         intel_de_write(dev_priv, LPT_TRANSCONF, val);
1733         if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF,
1734                                   TRANS_STATE_ENABLE, 100))
1735                 drm_err(&dev_priv->drm, "Failed to enable PCH transcoder\n");
1736 }
1737
1738 static void ilk_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1739                                        enum pipe pipe)
1740 {
1741         i915_reg_t reg;
1742         u32 val;
1743
1744         /* FDI relies on the transcoder */
1745         assert_fdi_tx_disabled(dev_priv, pipe);
1746         assert_fdi_rx_disabled(dev_priv, pipe);
1747
1748         /* Ports must be off as well */
1749         assert_pch_ports_disabled(dev_priv, pipe);
1750
1751         reg = PCH_TRANSCONF(pipe);
1752         val = intel_de_read(dev_priv, reg);
1753         val &= ~TRANS_ENABLE;
1754         intel_de_write(dev_priv, reg, val);
1755         /* wait for PCH transcoder off, transcoder state */
1756         if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50))
1757                 drm_err(&dev_priv->drm, "failed to disable transcoder %c\n",
1758                         pipe_name(pipe));
1759
1760         if (HAS_PCH_CPT(dev_priv)) {
1761                 /* Workaround: Clear the timing override chicken bit again. */
1762                 reg = TRANS_CHICKEN2(pipe);
1763                 val = intel_de_read(dev_priv, reg);
1764                 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1765                 intel_de_write(dev_priv, reg, val);
1766         }
1767 }
1768
1769 void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1770 {
1771         u32 val;
1772
1773         val = intel_de_read(dev_priv, LPT_TRANSCONF);
1774         val &= ~TRANS_ENABLE;
1775         intel_de_write(dev_priv, LPT_TRANSCONF, val);
1776         /* wait for PCH transcoder off, transcoder state */
1777         if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF,
1778                                     TRANS_STATE_ENABLE, 50))
1779                 drm_err(&dev_priv->drm, "Failed to disable PCH transcoder\n");
1780
1781         /* Workaround: clear timing override bit. */
1782         val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
1783         val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1784         intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
1785 }
1786
1787 enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
1788 {
1789         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1790
1791         if (HAS_PCH_LPT(dev_priv))
1792                 return PIPE_A;
1793         else
1794                 return crtc->pipe;
1795 }
1796
1797 static u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state)
1798 {
1799         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1800
1801         /*
1802          * On i965gm the hardware frame counter reads
1803          * zero when the TV encoder is enabled :(
1804          */
1805         if (IS_I965GM(dev_priv) &&
1806             (crtc_state->output_types & BIT(INTEL_OUTPUT_TVOUT)))
1807                 return 0;
1808
1809         if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
1810                 return 0xffffffff; /* full 32 bit counter */
1811         else if (INTEL_GEN(dev_priv) >= 3)
1812                 return 0xffffff; /* only 24 bits of frame count */
1813         else
1814                 return 0; /* Gen2 doesn't have a hardware frame counter */
1815 }
1816
1817 void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state)
1818 {
1819         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1820
1821         assert_vblank_disabled(&crtc->base);
1822         drm_crtc_set_max_vblank_count(&crtc->base,
1823                                       intel_crtc_max_vblank_count(crtc_state));
1824         drm_crtc_vblank_on(&crtc->base);
1825 }
1826
1827 void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state)
1828 {
1829         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1830
1831         drm_crtc_vblank_off(&crtc->base);
1832         assert_vblank_disabled(&crtc->base);
1833 }
1834
1835 void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
1836 {
1837         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
1838         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1839         enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
1840         enum pipe pipe = crtc->pipe;
1841         i915_reg_t reg;
1842         u32 val;
1843
1844         drm_dbg_kms(&dev_priv->drm, "enabling pipe %c\n", pipe_name(pipe));
1845
1846         assert_planes_disabled(crtc);
1847
1848         /*
1849          * A pipe without a PLL won't actually be able to drive bits from
1850          * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
1851          * need the check.
1852          */
1853         if (HAS_GMCH(dev_priv)) {
1854                 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
1855                         assert_dsi_pll_enabled(dev_priv);
1856                 else
1857                         assert_pll_enabled(dev_priv, pipe);
1858         } else {
1859                 if (new_crtc_state->has_pch_encoder) {
1860                         /* if driving the PCH, we need FDI enabled */
1861                         assert_fdi_rx_pll_enabled(dev_priv,
1862                                                   intel_crtc_pch_transcoder(crtc));
1863                         assert_fdi_tx_pll_enabled(dev_priv,
1864                                                   (enum pipe) cpu_transcoder);
1865                 }
1866                 /* FIXME: assert CPU port conditions for SNB+ */
1867         }
1868
1869         trace_intel_pipe_enable(crtc);
1870
1871         reg = PIPECONF(cpu_transcoder);
1872         val = intel_de_read(dev_priv, reg);
1873         if (val & PIPECONF_ENABLE) {
1874                 /* we keep both pipes enabled on 830 */
1875                 WARN_ON(!IS_I830(dev_priv));
1876                 return;
1877         }
1878
1879         intel_de_write(dev_priv, reg, val | PIPECONF_ENABLE);
1880         intel_de_posting_read(dev_priv, reg);
1881
1882         /*
1883          * Until the pipe starts PIPEDSL reads will return a stale value,
1884          * which causes an apparent vblank timestamp jump when PIPEDSL
1885          * resets to its proper value. That also messes up the frame count
1886          * when it's derived from the timestamps. So let's wait for the
1887          * pipe to start properly before we call drm_crtc_vblank_on()
1888          */
1889         if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
1890                 intel_wait_for_pipe_scanline_moving(crtc);
1891 }
1892
1893 void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
1894 {
1895         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
1896         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1897         enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1898         enum pipe pipe = crtc->pipe;
1899         i915_reg_t reg;
1900         u32 val;
1901
1902         drm_dbg_kms(&dev_priv->drm, "disabling pipe %c\n", pipe_name(pipe));
1903
1904         /*
1905          * Make sure planes won't keep trying to pump pixels to us,
1906          * or we might hang the display.
1907          */
1908         assert_planes_disabled(crtc);
1909
1910         trace_intel_pipe_disable(crtc);
1911
1912         reg = PIPECONF(cpu_transcoder);
1913         val = intel_de_read(dev_priv, reg);
1914         if ((val & PIPECONF_ENABLE) == 0)
1915                 return;
1916
1917         /*
1918          * Double wide has implications for planes
1919          * so best keep it disabled when not needed.
1920          */
1921         if (old_crtc_state->double_wide)
1922                 val &= ~PIPECONF_DOUBLE_WIDE;
1923
1924         /* Don't disable pipe or pipe PLLs if needed */
1925         if (!IS_I830(dev_priv))
1926                 val &= ~PIPECONF_ENABLE;
1927
1928         intel_de_write(dev_priv, reg, val);
1929         if ((val & PIPECONF_ENABLE) == 0)
1930                 intel_wait_for_pipe_off(old_crtc_state);
1931 }
1932
1933 static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
1934 {
1935         return IS_GEN(dev_priv, 2) ? 2048 : 4096;
1936 }
1937
1938 static bool is_ccs_plane(const struct drm_framebuffer *fb, int plane)
1939 {
1940         if (!is_ccs_modifier(fb->modifier))
1941                 return false;
1942
1943         return plane >= fb->format->num_planes / 2;
1944 }
1945
1946 static bool is_gen12_ccs_modifier(u64 modifier)
1947 {
1948         return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
1949                modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS;
1950
1951 }
1952
1953 static bool is_gen12_ccs_plane(const struct drm_framebuffer *fb, int plane)
1954 {
1955         return is_gen12_ccs_modifier(fb->modifier) && is_ccs_plane(fb, plane);
1956 }
1957
1958 static bool is_aux_plane(const struct drm_framebuffer *fb, int plane)
1959 {
1960         if (is_ccs_modifier(fb->modifier))
1961                 return is_ccs_plane(fb, plane);
1962
1963         return plane == 1;
1964 }
1965
1966 static int main_to_ccs_plane(const struct drm_framebuffer *fb, int main_plane)
1967 {
1968         WARN_ON(!is_ccs_modifier(fb->modifier) ||
1969                 (main_plane && main_plane >= fb->format->num_planes / 2));
1970
1971         return fb->format->num_planes / 2 + main_plane;
1972 }
1973
1974 static int ccs_to_main_plane(const struct drm_framebuffer *fb, int ccs_plane)
1975 {
1976         WARN_ON(!is_ccs_modifier(fb->modifier) ||
1977                 ccs_plane < fb->format->num_planes / 2);
1978
1979         return ccs_plane - fb->format->num_planes / 2;
1980 }
1981
1982 /* Return either the main plane's CCS or - if not a CCS FB - UV plane */
1983 int intel_main_to_aux_plane(const struct drm_framebuffer *fb, int main_plane)
1984 {
1985         if (is_ccs_modifier(fb->modifier))
1986                 return main_to_ccs_plane(fb, main_plane);
1987
1988         return 1;
1989 }
1990
1991 bool
1992 intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info,
1993                                     uint64_t modifier)
1994 {
1995         return info->is_yuv &&
1996                info->num_planes == (is_ccs_modifier(modifier) ? 4 : 2);
1997 }
1998
1999 static bool is_semiplanar_uv_plane(const struct drm_framebuffer *fb,
2000                                    int color_plane)
2001 {
2002         return intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) &&
2003                color_plane == 1;
2004 }
2005
2006 static unsigned int
2007 intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
2008 {
2009         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2010         unsigned int cpp = fb->format->cpp[color_plane];
2011
2012         switch (fb->modifier) {
2013         case DRM_FORMAT_MOD_LINEAR:
2014                 return intel_tile_size(dev_priv);
2015         case I915_FORMAT_MOD_X_TILED:
2016                 if (IS_GEN(dev_priv, 2))
2017                         return 128;
2018                 else
2019                         return 512;
2020         case I915_FORMAT_MOD_Y_TILED_CCS:
2021                 if (is_ccs_plane(fb, color_plane))
2022                         return 128;
2023                 /* fall through */
2024         case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
2025         case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
2026                 if (is_ccs_plane(fb, color_plane))
2027                         return 64;
2028                 /* fall through */
2029         case I915_FORMAT_MOD_Y_TILED:
2030                 if (IS_GEN(dev_priv, 2) || HAS_128_BYTE_Y_TILING(dev_priv))
2031                         return 128;
2032                 else
2033                         return 512;
2034         case I915_FORMAT_MOD_Yf_TILED_CCS:
2035                 if (is_ccs_plane(fb, color_plane))
2036                         return 128;
2037                 /* fall through */
2038         case I915_FORMAT_MOD_Yf_TILED:
2039                 switch (cpp) {
2040                 case 1:
2041                         return 64;
2042                 case 2:
2043                 case 4:
2044                         return 128;
2045                 case 8:
2046                 case 16:
2047                         return 256;
2048                 default:
2049                         MISSING_CASE(cpp);
2050                         return cpp;
2051                 }
2052                 break;
2053         default:
2054                 MISSING_CASE(fb->modifier);
2055                 return cpp;
2056         }
2057 }
2058
2059 static unsigned int
2060 intel_tile_height(const struct drm_framebuffer *fb, int color_plane)
2061 {
2062         if (is_gen12_ccs_plane(fb, color_plane))
2063                 return 1;
2064
2065         return intel_tile_size(to_i915(fb->dev)) /
2066                 intel_tile_width_bytes(fb, color_plane);
2067 }
2068
2069 /* Return the tile dimensions in pixel units */
2070 static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane,
2071                             unsigned int *tile_width,
2072                             unsigned int *tile_height)
2073 {
2074         unsigned int tile_width_bytes = intel_tile_width_bytes(fb, color_plane);
2075         unsigned int cpp = fb->format->cpp[color_plane];
2076
2077         *tile_width = tile_width_bytes / cpp;
2078         *tile_height = intel_tile_height(fb, color_plane);
2079 }
2080
2081 static unsigned int intel_tile_row_size(const struct drm_framebuffer *fb,
2082                                         int color_plane)
2083 {
2084         unsigned int tile_width, tile_height;
2085
2086         intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2087
2088         return fb->pitches[color_plane] * tile_height;
2089 }
2090
2091 unsigned int
2092 intel_fb_align_height(const struct drm_framebuffer *fb,
2093                       int color_plane, unsigned int height)
2094 {
2095         unsigned int tile_height = intel_tile_height(fb, color_plane);
2096
2097         return ALIGN(height, tile_height);
2098 }
2099
2100 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
2101 {
2102         unsigned int size = 0;
2103         int i;
2104
2105         for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
2106                 size += rot_info->plane[i].width * rot_info->plane[i].height;
2107
2108         return size;
2109 }
2110
2111 unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info)
2112 {
2113         unsigned int size = 0;
2114         int i;
2115
2116         for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++)
2117                 size += rem_info->plane[i].width * rem_info->plane[i].height;
2118
2119         return size;
2120 }
2121
2122 static void
2123 intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
2124                         const struct drm_framebuffer *fb,
2125                         unsigned int rotation)
2126 {
2127         view->type = I915_GGTT_VIEW_NORMAL;
2128         if (drm_rotation_90_or_270(rotation)) {
2129                 view->type = I915_GGTT_VIEW_ROTATED;
2130                 view->rotated = to_intel_framebuffer(fb)->rot_info;
2131         }
2132 }
2133
2134 static unsigned int intel_cursor_alignment(const struct drm_i915_private *dev_priv)
2135 {
2136         if (IS_I830(dev_priv))
2137                 return 16 * 1024;
2138         else if (IS_I85X(dev_priv))
2139                 return 256;
2140         else if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
2141                 return 32;
2142         else
2143                 return 4 * 1024;
2144 }
2145
2146 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
2147 {
2148         if (INTEL_GEN(dev_priv) >= 9)
2149                 return 256 * 1024;
2150         else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
2151                  IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2152                 return 128 * 1024;
2153         else if (INTEL_GEN(dev_priv) >= 4)
2154                 return 4 * 1024;
2155         else
2156                 return 0;
2157 }
2158
2159 static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
2160                                          int color_plane)
2161 {
2162         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2163
2164         /* AUX_DIST needs only 4K alignment */
2165         if ((INTEL_GEN(dev_priv) < 12 && is_aux_plane(fb, color_plane)) ||
2166             is_ccs_plane(fb, color_plane))
2167                 return 4096;
2168
2169         switch (fb->modifier) {
2170         case DRM_FORMAT_MOD_LINEAR:
2171                 return intel_linear_alignment(dev_priv);
2172         case I915_FORMAT_MOD_X_TILED:
2173                 if (INTEL_GEN(dev_priv) >= 9)
2174                         return 256 * 1024;
2175                 return 0;
2176         case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
2177                 if (is_semiplanar_uv_plane(fb, color_plane))
2178                         return intel_tile_row_size(fb, color_plane);
2179                 /* Fall-through */
2180         case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
2181                 return 16 * 1024;
2182         case I915_FORMAT_MOD_Y_TILED_CCS:
2183         case I915_FORMAT_MOD_Yf_TILED_CCS:
2184         case I915_FORMAT_MOD_Y_TILED:
2185                 if (INTEL_GEN(dev_priv) >= 12 &&
2186                     is_semiplanar_uv_plane(fb, color_plane))
2187                         return intel_tile_row_size(fb, color_plane);
2188                 /* Fall-through */
2189         case I915_FORMAT_MOD_Yf_TILED:
2190                 return 1 * 1024 * 1024;
2191         default:
2192                 MISSING_CASE(fb->modifier);
2193                 return 0;
2194         }
2195 }
2196
2197 static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
2198 {
2199         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
2200         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
2201
2202         return INTEL_GEN(dev_priv) < 4 ||
2203                 (plane->has_fbc &&
2204                  plane_state->view.type == I915_GGTT_VIEW_NORMAL);
2205 }
2206
2207 struct i915_vma *
2208 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
2209                            const struct i915_ggtt_view *view,
2210                            bool uses_fence,
2211                            unsigned long *out_flags)
2212 {
2213         struct drm_device *dev = fb->dev;
2214         struct drm_i915_private *dev_priv = to_i915(dev);
2215         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2216         intel_wakeref_t wakeref;
2217         struct i915_vma *vma;
2218         unsigned int pinctl;
2219         u32 alignment;
2220
2221         if (WARN_ON(!i915_gem_object_is_framebuffer(obj)))
2222                 return ERR_PTR(-EINVAL);
2223
2224         alignment = intel_surf_alignment(fb, 0);
2225         if (WARN_ON(alignment && !is_power_of_2(alignment)))
2226                 return ERR_PTR(-EINVAL);
2227
2228         /* Note that the w/a also requires 64 PTE of padding following the
2229          * bo. We currently fill all unused PTE with the shadow page and so
2230          * we should always have valid PTE following the scanout preventing
2231          * the VT-d warning.
2232          */
2233         if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
2234                 alignment = 256 * 1024;
2235
2236         /*
2237          * Global gtt pte registers are special registers which actually forward
2238          * writes to a chunk of system memory. Which means that there is no risk
2239          * that the register values disappear as soon as we call
2240          * intel_runtime_pm_put(), so it is correct to wrap only the
2241          * pin/unpin/fence and not more.
2242          */
2243         wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2244
2245         atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
2246
2247         /*
2248          * Valleyview is definitely limited to scanning out the first
2249          * 512MiB. Lets presume this behaviour was inherited from the
2250          * g4x display engine and that all earlier gen are similarly
2251          * limited. Testing suggests that it is a little more
2252          * complicated than this. For example, Cherryview appears quite
2253          * happy to scanout from anywhere within its global aperture.
2254          */
2255         pinctl = 0;
2256         if (HAS_GMCH(dev_priv))
2257                 pinctl |= PIN_MAPPABLE;
2258
2259         vma = i915_gem_object_pin_to_display_plane(obj,
2260                                                    alignment, view, pinctl);
2261         if (IS_ERR(vma))
2262                 goto err;
2263
2264         if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
2265                 int ret;
2266
2267                 /*
2268                  * Install a fence for tiled scan-out. Pre-i965 always needs a
2269                  * fence, whereas 965+ only requires a fence if using
2270                  * framebuffer compression.  For simplicity, we always, when
2271                  * possible, install a fence as the cost is not that onerous.
2272                  *
2273                  * If we fail to fence the tiled scanout, then either the
2274                  * modeset will reject the change (which is highly unlikely as
2275                  * the affected systems, all but one, do not have unmappable
2276                  * space) or we will not be able to enable full powersaving
2277                  * techniques (also likely not to apply due to various limits
2278                  * FBC and the like impose on the size of the buffer, which
2279                  * presumably we violated anyway with this unmappable buffer).
2280                  * Anyway, it is presumably better to stumble onwards with
2281                  * something and try to run the system in a "less than optimal"
2282                  * mode that matches the user configuration.
2283                  */
2284                 ret = i915_vma_pin_fence(vma);
2285                 if (ret != 0 && INTEL_GEN(dev_priv) < 4) {
2286                         i915_gem_object_unpin_from_display_plane(vma);
2287                         vma = ERR_PTR(ret);
2288                         goto err;
2289                 }
2290
2291                 if (ret == 0 && vma->fence)
2292                         *out_flags |= PLANE_HAS_FENCE;
2293         }
2294
2295         i915_vma_get(vma);
2296 err:
2297         atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
2298         intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2299         return vma;
2300 }
2301
2302 void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
2303 {
2304         i915_gem_object_lock(vma->obj);
2305         if (flags & PLANE_HAS_FENCE)
2306                 i915_vma_unpin_fence(vma);
2307         i915_gem_object_unpin_from_display_plane(vma);
2308         i915_gem_object_unlock(vma->obj);
2309
2310         i915_vma_put(vma);
2311 }
2312
2313 static int intel_fb_pitch(const struct drm_framebuffer *fb, int color_plane,
2314                           unsigned int rotation)
2315 {
2316         if (drm_rotation_90_or_270(rotation))
2317                 return to_intel_framebuffer(fb)->rotated[color_plane].pitch;
2318         else
2319                 return fb->pitches[color_plane];
2320 }
2321
2322 /*
2323  * Convert the x/y offsets into a linear offset.
2324  * Only valid with 0/180 degree rotation, which is fine since linear
2325  * offset is only used with linear buffers on pre-hsw and tiled buffers
2326  * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
2327  */
2328 u32 intel_fb_xy_to_linear(int x, int y,
2329                           const struct intel_plane_state *state,
2330                           int color_plane)
2331 {
2332         const struct drm_framebuffer *fb = state->hw.fb;
2333         unsigned int cpp = fb->format->cpp[color_plane];
2334         unsigned int pitch = state->color_plane[color_plane].stride;
2335
2336         return y * pitch + x * cpp;
2337 }
2338
2339 /*
2340  * Add the x/y offsets derived from fb->offsets[] to the user
2341  * specified plane src x/y offsets. The resulting x/y offsets
2342  * specify the start of scanout from the beginning of the gtt mapping.
2343  */
2344 void intel_add_fb_offsets(int *x, int *y,
2345                           const struct intel_plane_state *state,
2346                           int color_plane)
2347
2348 {
2349         *x += state->color_plane[color_plane].x;
2350         *y += state->color_plane[color_plane].y;
2351 }
2352
2353 static u32 intel_adjust_tile_offset(int *x, int *y,
2354                                     unsigned int tile_width,
2355                                     unsigned int tile_height,
2356                                     unsigned int tile_size,
2357                                     unsigned int pitch_tiles,
2358                                     u32 old_offset,
2359                                     u32 new_offset)
2360 {
2361         unsigned int pitch_pixels = pitch_tiles * tile_width;
2362         unsigned int tiles;
2363
2364         WARN_ON(old_offset & (tile_size - 1));
2365         WARN_ON(new_offset & (tile_size - 1));
2366         WARN_ON(new_offset > old_offset);
2367
2368         tiles = (old_offset - new_offset) / tile_size;
2369
2370         *y += tiles / pitch_tiles * tile_height;
2371         *x += tiles % pitch_tiles * tile_width;
2372
2373         /* minimize x in case it got needlessly big */
2374         *y += *x / pitch_pixels * tile_height;
2375         *x %= pitch_pixels;
2376
2377         return new_offset;
2378 }
2379
2380 static bool is_surface_linear(const struct drm_framebuffer *fb, int color_plane)
2381 {
2382         return fb->modifier == DRM_FORMAT_MOD_LINEAR ||
2383                is_gen12_ccs_plane(fb, color_plane);
2384 }
2385
2386 static u32 intel_adjust_aligned_offset(int *x, int *y,
2387                                        const struct drm_framebuffer *fb,
2388                                        int color_plane,
2389                                        unsigned int rotation,
2390                                        unsigned int pitch,
2391                                        u32 old_offset, u32 new_offset)
2392 {
2393         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2394         unsigned int cpp = fb->format->cpp[color_plane];
2395
2396         WARN_ON(new_offset > old_offset);
2397
2398         if (!is_surface_linear(fb, color_plane)) {
2399                 unsigned int tile_size, tile_width, tile_height;
2400                 unsigned int pitch_tiles;
2401
2402                 tile_size = intel_tile_size(dev_priv);
2403                 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2404
2405                 if (drm_rotation_90_or_270(rotation)) {
2406                         pitch_tiles = pitch / tile_height;
2407                         swap(tile_width, tile_height);
2408                 } else {
2409                         pitch_tiles = pitch / (tile_width * cpp);
2410                 }
2411
2412                 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2413                                          tile_size, pitch_tiles,
2414                                          old_offset, new_offset);
2415         } else {
2416                 old_offset += *y * pitch + *x * cpp;
2417
2418                 *y = (old_offset - new_offset) / pitch;
2419                 *x = ((old_offset - new_offset) - *y * pitch) / cpp;
2420         }
2421
2422         return new_offset;
2423 }
2424
2425 /*
2426  * Adjust the tile offset by moving the difference into
2427  * the x/y offsets.
2428  */
2429 static u32 intel_plane_adjust_aligned_offset(int *x, int *y,
2430                                              const struct intel_plane_state *state,
2431                                              int color_plane,
2432                                              u32 old_offset, u32 new_offset)
2433 {
2434         return intel_adjust_aligned_offset(x, y, state->hw.fb, color_plane,
2435                                            state->hw.rotation,
2436                                            state->color_plane[color_plane].stride,
2437                                            old_offset, new_offset);
2438 }
2439
2440 /*
2441  * Computes the aligned offset to the base tile and adjusts
2442  * x, y. bytes per pixel is assumed to be a power-of-two.
2443  *
2444  * In the 90/270 rotated case, x and y are assumed
2445  * to be already rotated to match the rotated GTT view, and
2446  * pitch is the tile_height aligned framebuffer height.
2447  *
2448  * This function is used when computing the derived information
2449  * under intel_framebuffer, so using any of that information
2450  * here is not allowed. Anything under drm_framebuffer can be
2451  * used. This is why the user has to pass in the pitch since it
2452  * is specified in the rotated orientation.
2453  */
2454 static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv,
2455                                         int *x, int *y,
2456                                         const struct drm_framebuffer *fb,
2457                                         int color_plane,
2458                                         unsigned int pitch,
2459                                         unsigned int rotation,
2460                                         u32 alignment)
2461 {
2462         unsigned int cpp = fb->format->cpp[color_plane];
2463         u32 offset, offset_aligned;
2464
2465         if (!is_surface_linear(fb, color_plane)) {
2466                 unsigned int tile_size, tile_width, tile_height;
2467                 unsigned int tile_rows, tiles, pitch_tiles;
2468
2469                 tile_size = intel_tile_size(dev_priv);
2470                 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2471
2472                 if (drm_rotation_90_or_270(rotation)) {
2473                         pitch_tiles = pitch / tile_height;
2474                         swap(tile_width, tile_height);
2475                 } else {
2476                         pitch_tiles = pitch / (tile_width * cpp);
2477                 }
2478
2479                 tile_rows = *y / tile_height;
2480                 *y %= tile_height;
2481
2482                 tiles = *x / tile_width;
2483                 *x %= tile_width;
2484
2485                 offset = (tile_rows * pitch_tiles + tiles) * tile_size;
2486
2487                 offset_aligned = offset;
2488                 if (alignment)
2489                         offset_aligned = rounddown(offset_aligned, alignment);
2490
2491                 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2492                                          tile_size, pitch_tiles,
2493                                          offset, offset_aligned);
2494         } else {
2495                 offset = *y * pitch + *x * cpp;
2496                 offset_aligned = offset;
2497                 if (alignment) {
2498                         offset_aligned = rounddown(offset_aligned, alignment);
2499                         *y = (offset % alignment) / pitch;
2500                         *x = ((offset % alignment) - *y * pitch) / cpp;
2501                 } else {
2502                         *y = *x = 0;
2503                 }
2504         }
2505
2506         return offset_aligned;
2507 }
2508
2509 static u32 intel_plane_compute_aligned_offset(int *x, int *y,
2510                                               const struct intel_plane_state *state,
2511                                               int color_plane)
2512 {
2513         struct intel_plane *intel_plane = to_intel_plane(state->uapi.plane);
2514         struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
2515         const struct drm_framebuffer *fb = state->hw.fb;
2516         unsigned int rotation = state->hw.rotation;
2517         int pitch = state->color_plane[color_plane].stride;
2518         u32 alignment;
2519
2520         if (intel_plane->id == PLANE_CURSOR)
2521                 alignment = intel_cursor_alignment(dev_priv);
2522         else
2523                 alignment = intel_surf_alignment(fb, color_plane);
2524
2525         return intel_compute_aligned_offset(dev_priv, x, y, fb, color_plane,
2526                                             pitch, rotation, alignment);
2527 }
2528
2529 /* Convert the fb->offset[] into x/y offsets */
2530 static int intel_fb_offset_to_xy(int *x, int *y,
2531                                  const struct drm_framebuffer *fb,
2532                                  int color_plane)
2533 {
2534         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2535         unsigned int height;
2536         u32 alignment;
2537
2538         if (INTEL_GEN(dev_priv) >= 12 &&
2539             is_semiplanar_uv_plane(fb, color_plane))
2540                 alignment = intel_tile_row_size(fb, color_plane);
2541         else if (fb->modifier != DRM_FORMAT_MOD_LINEAR)
2542                 alignment = intel_tile_size(dev_priv);
2543         else
2544                 alignment = 0;
2545
2546         if (alignment != 0 && fb->offsets[color_plane] % alignment) {
2547                 drm_dbg_kms(&dev_priv->drm,
2548                             "Misaligned offset 0x%08x for color plane %d\n",
2549                             fb->offsets[color_plane], color_plane);
2550                 return -EINVAL;
2551         }
2552
2553         height = drm_framebuffer_plane_height(fb->height, fb, color_plane);
2554         height = ALIGN(height, intel_tile_height(fb, color_plane));
2555
2556         /* Catch potential overflows early */
2557         if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]),
2558                             fb->offsets[color_plane])) {
2559                 drm_dbg_kms(&dev_priv->drm,
2560                             "Bad offset 0x%08x or pitch %d for color plane %d\n",
2561                             fb->offsets[color_plane], fb->pitches[color_plane],
2562                             color_plane);
2563                 return -ERANGE;
2564         }
2565
2566         *x = 0;
2567         *y = 0;
2568
2569         intel_adjust_aligned_offset(x, y,
2570                                     fb, color_plane, DRM_MODE_ROTATE_0,
2571                                     fb->pitches[color_plane],
2572                                     fb->offsets[color_plane], 0);
2573
2574         return 0;
2575 }
2576
2577 static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
2578 {
2579         switch (fb_modifier) {
2580         case I915_FORMAT_MOD_X_TILED:
2581                 return I915_TILING_X;
2582         case I915_FORMAT_MOD_Y_TILED:
2583         case I915_FORMAT_MOD_Y_TILED_CCS:
2584         case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
2585         case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
2586                 return I915_TILING_Y;
2587         default:
2588                 return I915_TILING_NONE;
2589         }
2590 }
2591
2592 /*
2593  * From the Sky Lake PRM:
2594  * "The Color Control Surface (CCS) contains the compression status of
2595  *  the cache-line pairs. The compression state of the cache-line pair
2596  *  is specified by 2 bits in the CCS. Each CCS cache-line represents
2597  *  an area on the main surface of 16 x16 sets of 128 byte Y-tiled
2598  *  cache-line-pairs. CCS is always Y tiled."
2599  *
2600  * Since cache line pairs refers to horizontally adjacent cache lines,
2601  * each cache line in the CCS corresponds to an area of 32x16 cache
2602  * lines on the main surface. Since each pixel is 4 bytes, this gives
2603  * us a ratio of one byte in the CCS for each 8x16 pixels in the
2604  * main surface.
2605  */
2606 static const struct drm_format_info skl_ccs_formats[] = {
2607         { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
2608           .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2609         { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
2610           .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2611         { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
2612           .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
2613         { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
2614           .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
2615 };
2616
2617 /*
2618  * Gen-12 compression uses 4 bits of CCS data for each cache line pair in the
2619  * main surface. And each 64B CCS cache line represents an area of 4x1 Y-tiles
2620  * in the main surface. With 4 byte pixels and each Y-tile having dimensions of
2621  * 32x32 pixels, the ratio turns out to 1B in the CCS for every 2x32 pixels in
2622  * the main surface.
2623  */
2624 static const struct drm_format_info gen12_ccs_formats[] = {
2625         { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
2626           .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
2627           .hsub = 1, .vsub = 1, },
2628         { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
2629           .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
2630           .hsub = 1, .vsub = 1, },
2631         { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
2632           .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
2633           .hsub = 1, .vsub = 1, .has_alpha = true },
2634         { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
2635           .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
2636           .hsub = 1, .vsub = 1, .has_alpha = true },
2637         { .format = DRM_FORMAT_YUYV, .num_planes = 2,
2638           .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
2639           .hsub = 2, .vsub = 1, .is_yuv = true },
2640         { .format = DRM_FORMAT_YVYU, .num_planes = 2,
2641           .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
2642           .hsub = 2, .vsub = 1, .is_yuv = true },
2643         { .format = DRM_FORMAT_UYVY, .num_planes = 2,
2644           .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
2645           .hsub = 2, .vsub = 1, .is_yuv = true },
2646         { .format = DRM_FORMAT_VYUY, .num_planes = 2,
2647           .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
2648           .hsub = 2, .vsub = 1, .is_yuv = true },
2649         { .format = DRM_FORMAT_NV12, .num_planes = 4,
2650           .char_per_block = { 1, 2, 1, 1 }, .block_w = { 1, 1, 4, 4 }, .block_h = { 1, 1, 1, 1 },
2651           .hsub = 2, .vsub = 2, .is_yuv = true },
2652         { .format = DRM_FORMAT_P010, .num_planes = 4,
2653           .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
2654           .hsub = 2, .vsub = 2, .is_yuv = true },
2655         { .format = DRM_FORMAT_P012, .num_planes = 4,
2656           .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
2657           .hsub = 2, .vsub = 2, .is_yuv = true },
2658         { .format = DRM_FORMAT_P016, .num_planes = 4,
2659           .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
2660           .hsub = 2, .vsub = 2, .is_yuv = true },
2661 };
2662
2663 static const struct drm_format_info *
2664 lookup_format_info(const struct drm_format_info formats[],
2665                    int num_formats, u32 format)
2666 {
2667         int i;
2668
2669         for (i = 0; i < num_formats; i++) {
2670                 if (formats[i].format == format)
2671                         return &formats[i];
2672         }
2673
2674         return NULL;
2675 }
2676
2677 static const struct drm_format_info *
2678 intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
2679 {
2680         switch (cmd->modifier[0]) {
2681         case I915_FORMAT_MOD_Y_TILED_CCS:
2682         case I915_FORMAT_MOD_Yf_TILED_CCS:
2683                 return lookup_format_info(skl_ccs_formats,
2684                                           ARRAY_SIZE(skl_ccs_formats),
2685                                           cmd->pixel_format);
2686         case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
2687         case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
2688                 return lookup_format_info(gen12_ccs_formats,
2689                                           ARRAY_SIZE(gen12_ccs_formats),
2690                                           cmd->pixel_format);
2691         default:
2692                 return NULL;
2693         }
2694 }
2695
2696 bool is_ccs_modifier(u64 modifier)
2697 {
2698         return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
2699                modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS ||
2700                modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
2701                modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
2702 }
2703
2704 static int gen12_ccs_aux_stride(struct drm_framebuffer *fb, int ccs_plane)
2705 {
2706         return DIV_ROUND_UP(fb->pitches[ccs_to_main_plane(fb, ccs_plane)],
2707                             512) * 64;
2708 }
2709
2710 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
2711                               u32 pixel_format, u64 modifier)
2712 {
2713         struct intel_crtc *crtc;
2714         struct intel_plane *plane;
2715
2716         /*
2717          * We assume the primary plane for pipe A has
2718          * the highest stride limits of them all.
2719          */
2720         crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
2721         if (!crtc)
2722                 return 0;
2723
2724         plane = to_intel_plane(crtc->base.primary);
2725
2726         return plane->max_stride(plane, pixel_format, modifier,
2727                                  DRM_MODE_ROTATE_0);
2728 }
2729
2730 static
2731 u32 intel_fb_max_stride(struct drm_i915_private *dev_priv,
2732                         u32 pixel_format, u64 modifier)
2733 {
2734         /*
2735          * Arbitrary limit for gen4+ chosen to match the
2736          * render engine max stride.
2737          *
2738          * The new CCS hash mode makes remapping impossible
2739          */
2740         if (!is_ccs_modifier(modifier)) {
2741                 if (INTEL_GEN(dev_priv) >= 7)
2742                         return 256*1024;
2743                 else if (INTEL_GEN(dev_priv) >= 4)
2744                         return 128*1024;
2745         }
2746
2747         return intel_plane_fb_max_stride(dev_priv, pixel_format, modifier);
2748 }
2749
2750 static u32
2751 intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
2752 {
2753         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2754         u32 tile_width;
2755
2756         if (is_surface_linear(fb, color_plane)) {
2757                 u32 max_stride = intel_plane_fb_max_stride(dev_priv,
2758                                                            fb->format->format,
2759                                                            fb->modifier);
2760
2761                 /*
2762                  * To make remapping with linear generally feasible
2763                  * we need the stride to be page aligned.
2764                  */
2765                 if (fb->pitches[color_plane] > max_stride &&
2766                     !is_ccs_modifier(fb->modifier))
2767                         return intel_tile_size(dev_priv);
2768                 else
2769                         return 64;
2770         }
2771
2772         tile_width = intel_tile_width_bytes(fb, color_plane);
2773         if (is_ccs_modifier(fb->modifier)) {
2774                 /*
2775                  * Display WA #0531: skl,bxt,kbl,glk
2776                  *
2777                  * Render decompression and plane width > 3840
2778                  * combined with horizontal panning requires the
2779                  * plane stride to be a multiple of 4. We'll just
2780                  * require the entire fb to accommodate that to avoid
2781                  * potential runtime errors at plane configuration time.
2782                  */
2783                 if (IS_GEN(dev_priv, 9) && color_plane == 0 && fb->width > 3840)
2784                         tile_width *= 4;
2785                 /*
2786                  * The main surface pitch must be padded to a multiple of four
2787                  * tile widths.
2788                  */
2789                 else if (INTEL_GEN(dev_priv) >= 12)
2790                         tile_width *= 4;
2791         }
2792         return tile_width;
2793 }
2794
2795 bool intel_plane_can_remap(const struct intel_plane_state *plane_state)
2796 {
2797         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
2798         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
2799         const struct drm_framebuffer *fb = plane_state->hw.fb;
2800         int i;
2801
2802         /* We don't want to deal with remapping with cursors */
2803         if (plane->id == PLANE_CURSOR)
2804                 return false;
2805
2806         /*
2807          * The display engine limits already match/exceed the
2808          * render engine limits, so not much point in remapping.
2809          * Would also need to deal with the fence POT alignment
2810          * and gen2 2KiB GTT tile size.
2811          */
2812         if (INTEL_GEN(dev_priv) < 4)
2813                 return false;
2814
2815         /*
2816          * The new CCS hash mode isn't compatible with remapping as
2817          * the virtual address of the pages affects the compressed data.
2818          */
2819         if (is_ccs_modifier(fb->modifier))
2820                 return false;
2821
2822         /* Linear needs a page aligned stride for remapping */
2823         if (fb->modifier == DRM_FORMAT_MOD_LINEAR) {
2824                 unsigned int alignment = intel_tile_size(dev_priv) - 1;
2825
2826                 for (i = 0; i < fb->format->num_planes; i++) {
2827                         if (fb->pitches[i] & alignment)
2828                                 return false;
2829                 }
2830         }
2831
2832         return true;
2833 }
2834
2835 static bool intel_plane_needs_remap(const struct intel_plane_state *plane_state)
2836 {
2837         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
2838         const struct drm_framebuffer *fb = plane_state->hw.fb;
2839         unsigned int rotation = plane_state->hw.rotation;
2840         u32 stride, max_stride;
2841
2842         /*
2843          * No remapping for invisible planes since we don't have
2844          * an actual source viewport to remap.
2845          */
2846         if (!plane_state->uapi.visible)
2847                 return false;
2848
2849         if (!intel_plane_can_remap(plane_state))
2850                 return false;
2851
2852         /*
2853          * FIXME: aux plane limits on gen9+ are
2854          * unclear in Bspec, for now no checking.
2855          */
2856         stride = intel_fb_pitch(fb, 0, rotation);
2857         max_stride = plane->max_stride(plane, fb->format->format,
2858                                        fb->modifier, rotation);
2859
2860         return stride > max_stride;
2861 }
2862
2863 static void
2864 intel_fb_plane_get_subsampling(int *hsub, int *vsub,
2865                                const struct drm_framebuffer *fb,
2866                                int color_plane)
2867 {
2868         int main_plane;
2869
2870         if (color_plane == 0) {
2871                 *hsub = 1;
2872                 *vsub = 1;
2873
2874                 return;
2875         }
2876
2877         /*
2878          * TODO: Deduct the subsampling from the char block for all CCS
2879          * formats and planes.
2880          */
2881         if (!is_gen12_ccs_plane(fb, color_plane)) {
2882                 *hsub = fb->format->hsub;
2883                 *vsub = fb->format->vsub;
2884
2885                 return;
2886         }
2887
2888         main_plane = ccs_to_main_plane(fb, color_plane);
2889         *hsub = drm_format_info_block_width(fb->format, color_plane) /
2890                 drm_format_info_block_width(fb->format, main_plane);
2891
2892         /*
2893          * The min stride check in the core framebuffer_check() function
2894          * assumes that format->hsub applies to every plane except for the
2895          * first plane. That's incorrect for the CCS AUX plane of the first
2896          * plane, but for the above check to pass we must define the block
2897          * width with that subsampling applied to it. Adjust the width here
2898          * accordingly, so we can calculate the actual subsampling factor.
2899          */
2900         if (main_plane == 0)
2901                 *hsub *= fb->format->hsub;
2902
2903         *vsub = 32;
2904 }
2905 static int
2906 intel_fb_check_ccs_xy(struct drm_framebuffer *fb, int ccs_plane, int x, int y)
2907 {
2908         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2909         int main_plane;
2910         int hsub, vsub;
2911         int tile_width, tile_height;
2912         int ccs_x, ccs_y;
2913         int main_x, main_y;
2914
2915         if (!is_ccs_plane(fb, ccs_plane))
2916                 return 0;
2917
2918         intel_tile_dims(fb, ccs_plane, &tile_width, &tile_height);
2919         intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane);
2920
2921         tile_width *= hsub;
2922         tile_height *= vsub;
2923
2924         ccs_x = (x * hsub) % tile_width;
2925         ccs_y = (y * vsub) % tile_height;
2926
2927         main_plane = ccs_to_main_plane(fb, ccs_plane);
2928         main_x = intel_fb->normal[main_plane].x % tile_width;
2929         main_y = intel_fb->normal[main_plane].y % tile_height;
2930
2931         /*
2932          * CCS doesn't have its own x/y offset register, so the intra CCS tile
2933          * x/y offsets must match between CCS and the main surface.
2934          */
2935         if (main_x != ccs_x || main_y != ccs_y) {
2936                 DRM_DEBUG_KMS("Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
2937                               main_x, main_y,
2938                               ccs_x, ccs_y,
2939                               intel_fb->normal[main_plane].x,
2940                               intel_fb->normal[main_plane].y,
2941                               x, y);
2942                 return -EINVAL;
2943         }
2944
2945         return 0;
2946 }
2947
2948 static void
2949 intel_fb_plane_dims(int *w, int *h, struct drm_framebuffer *fb, int color_plane)
2950 {
2951         int main_plane = is_ccs_plane(fb, color_plane) ?
2952                          ccs_to_main_plane(fb, color_plane) : 0;
2953         int main_hsub, main_vsub;
2954         int hsub, vsub;
2955
2956         intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, fb, main_plane);
2957         intel_fb_plane_get_subsampling(&hsub, &vsub, fb, color_plane);
2958         *w = fb->width / main_hsub / hsub;
2959         *h = fb->height / main_vsub / vsub;
2960 }
2961
2962 /*
2963  * Setup the rotated view for an FB plane and return the size the GTT mapping
2964  * requires for this view.
2965  */
2966 static u32
2967 setup_fb_rotation(int plane, const struct intel_remapped_plane_info *plane_info,
2968                   u32 gtt_offset_rotated, int x, int y,
2969                   unsigned int width, unsigned int height,
2970                   unsigned int tile_size,
2971                   unsigned int tile_width, unsigned int tile_height,
2972                   struct drm_framebuffer *fb)
2973 {
2974         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2975         struct intel_rotation_info *rot_info = &intel_fb->rot_info;
2976         unsigned int pitch_tiles;
2977         struct drm_rect r;
2978
2979         /* Y or Yf modifiers required for 90/270 rotation */
2980         if (fb->modifier != I915_FORMAT_MOD_Y_TILED &&
2981             fb->modifier != I915_FORMAT_MOD_Yf_TILED)
2982                 return 0;
2983
2984         if (WARN_ON(plane >= ARRAY_SIZE(rot_info->plane)))
2985                 return 0;
2986
2987         rot_info->plane[plane] = *plane_info;
2988
2989         intel_fb->rotated[plane].pitch = plane_info->height * tile_height;
2990
2991         /* rotate the x/y offsets to match the GTT view */
2992         drm_rect_init(&r, x, y, width, height);
2993         drm_rect_rotate(&r,
2994                         plane_info->width * tile_width,
2995                         plane_info->height * tile_height,
2996                         DRM_MODE_ROTATE_270);
2997         x = r.x1;
2998         y = r.y1;
2999
3000         /* rotate the tile dimensions to match the GTT view */
3001         pitch_tiles = intel_fb->rotated[plane].pitch / tile_height;
3002         swap(tile_width, tile_height);
3003
3004         /*
3005          * We only keep the x/y offsets, so push all of the
3006          * gtt offset into the x/y offsets.
3007          */
3008         intel_adjust_tile_offset(&x, &y,
3009                                  tile_width, tile_height,
3010                                  tile_size, pitch_tiles,
3011                                  gtt_offset_rotated * tile_size, 0);
3012
3013         /*
3014          * First pixel of the framebuffer from
3015          * the start of the rotated gtt mapping.
3016          */
3017         intel_fb->rotated[plane].x = x;
3018         intel_fb->rotated[plane].y = y;
3019
3020         return plane_info->width * plane_info->height;
3021 }
3022
3023 static int
3024 intel_fill_fb_info(struct drm_i915_private *dev_priv,
3025                    struct drm_framebuffer *fb)
3026 {
3027         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
3028         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
3029         u32 gtt_offset_rotated = 0;
3030         unsigned int max_size = 0;
3031         int i, num_planes = fb->format->num_planes;
3032         unsigned int tile_size = intel_tile_size(dev_priv);
3033
3034         for (i = 0; i < num_planes; i++) {
3035                 unsigned int width, height;
3036                 unsigned int cpp, size;
3037                 u32 offset;
3038                 int x, y;
3039                 int ret;
3040
3041                 cpp = fb->format->cpp[i];
3042                 intel_fb_plane_dims(&width, &height, fb, i);
3043
3044                 ret = intel_fb_offset_to_xy(&x, &y, fb, i);
3045                 if (ret) {
3046                         drm_dbg_kms(&dev_priv->drm,
3047                                     "bad fb plane %d offset: 0x%x\n",
3048                                     i, fb->offsets[i]);
3049                         return ret;
3050                 }
3051
3052                 ret = intel_fb_check_ccs_xy(fb, i, x, y);
3053                 if (ret)
3054                         return ret;
3055
3056                 /*
3057                  * The fence (if used) is aligned to the start of the object
3058                  * so having the framebuffer wrap around across the edge of the
3059                  * fenced region doesn't really work. We have no API to configure
3060                  * the fence start offset within the object (nor could we probably
3061                  * on gen2/3). So it's just easier if we just require that the
3062                  * fb layout agrees with the fence layout. We already check that the
3063                  * fb stride matches the fence stride elsewhere.
3064                  */
3065                 if (i == 0 && i915_gem_object_is_tiled(obj) &&
3066                     (x + width) * cpp > fb->pitches[i]) {
3067                         drm_dbg_kms(&dev_priv->drm,
3068                                     "bad fb plane %d offset: 0x%x\n",
3069                                      i, fb->offsets[i]);
3070                         return -EINVAL;
3071                 }
3072
3073                 /*
3074                  * First pixel of the framebuffer from
3075                  * the start of the normal gtt mapping.
3076                  */
3077                 intel_fb->normal[i].x = x;
3078                 intel_fb->normal[i].y = y;
3079
3080                 offset = intel_compute_aligned_offset(dev_priv, &x, &y, fb, i,
3081                                                       fb->pitches[i],
3082                                                       DRM_MODE_ROTATE_0,
3083                                                       tile_size);
3084                 offset /= tile_size;
3085
3086                 if (!is_surface_linear(fb, i)) {
3087                         struct intel_remapped_plane_info plane_info;
3088                         unsigned int tile_width, tile_height;
3089
3090                         intel_tile_dims(fb, i, &tile_width, &tile_height);
3091
3092                         plane_info.offset = offset;
3093                         plane_info.stride = DIV_ROUND_UP(fb->pitches[i],
3094                                                          tile_width * cpp);
3095                         plane_info.width = DIV_ROUND_UP(x + width, tile_width);
3096                         plane_info.height = DIV_ROUND_UP(y + height,
3097                                                          tile_height);
3098
3099                         /* how many tiles does this plane need */
3100                         size = plane_info.stride * plane_info.height;
3101                         /*
3102                          * If the plane isn't horizontally tile aligned,
3103                          * we need one more tile.
3104                          */
3105                         if (x != 0)
3106                                 size++;
3107
3108                         gtt_offset_rotated +=
3109                                 setup_fb_rotation(i, &plane_info,
3110                                                   gtt_offset_rotated,
3111                                                   x, y, width, height,
3112                                                   tile_size,
3113                                                   tile_width, tile_height,
3114                                                   fb);
3115                 } else {
3116                         size = DIV_ROUND_UP((y + height) * fb->pitches[i] +
3117                                             x * cpp, tile_size);
3118                 }
3119
3120                 /* how many tiles in total needed in the bo */
3121                 max_size = max(max_size, offset + size);
3122         }
3123
3124         if (mul_u32_u32(max_size, tile_size) > obj->base.size) {
3125                 drm_dbg_kms(&dev_priv->drm,
3126                             "fb too big for bo (need %llu bytes, have %zu bytes)\n",
3127                             mul_u32_u32(max_size, tile_size), obj->base.size);
3128                 return -EINVAL;
3129         }
3130
3131         return 0;
3132 }
3133
3134 static void
3135 intel_plane_remap_gtt(struct intel_plane_state *plane_state)
3136 {
3137         struct drm_i915_private *dev_priv =
3138                 to_i915(plane_state->uapi.plane->dev);
3139         struct drm_framebuffer *fb = plane_state->hw.fb;
3140         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
3141         struct intel_rotation_info *info = &plane_state->view.rotated;
3142         unsigned int rotation = plane_state->hw.rotation;
3143         int i, num_planes = fb->format->num_planes;
3144         unsigned int tile_size = intel_tile_size(dev_priv);
3145         unsigned int src_x, src_y;
3146         unsigned int src_w, src_h;
3147         u32 gtt_offset = 0;
3148
3149         memset(&plane_state->view, 0, sizeof(plane_state->view));
3150         plane_state->view.type = drm_rotation_90_or_270(rotation) ?
3151                 I915_GGTT_VIEW_ROTATED : I915_GGTT_VIEW_REMAPPED;
3152
3153         src_x = plane_state->uapi.src.x1 >> 16;
3154         src_y = plane_state->uapi.src.y1 >> 16;
3155         src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
3156         src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
3157
3158         WARN_ON(is_ccs_modifier(fb->modifier));
3159
3160         /* Make src coordinates relative to the viewport */
3161         drm_rect_translate(&plane_state->uapi.src,
3162                            -(src_x << 16), -(src_y << 16));
3163
3164         /* Rotate src coordinates to match rotated GTT view */
3165         if (drm_rotation_90_or_270(rotation))
3166                 drm_rect_rotate(&plane_state->uapi.src,
3167                                 src_w << 16, src_h << 16,
3168                                 DRM_MODE_ROTATE_270);
3169
3170         for (i = 0; i < num_planes; i++) {
3171                 unsigned int hsub = i ? fb->format->hsub : 1;
3172                 unsigned int vsub = i ? fb->format->vsub : 1;
3173                 unsigned int cpp = fb->format->cpp[i];
3174                 unsigned int tile_width, tile_height;
3175                 unsigned int width, height;
3176                 unsigned int pitch_tiles;
3177                 unsigned int x, y;
3178                 u32 offset;
3179
3180                 intel_tile_dims(fb, i, &tile_width, &tile_height);
3181
3182                 x = src_x / hsub;
3183                 y = src_y / vsub;
3184                 width = src_w / hsub;
3185                 height = src_h / vsub;
3186
3187                 /*
3188                  * First pixel of the src viewport from the
3189                  * start of the normal gtt mapping.
3190                  */
3191                 x += intel_fb->normal[i].x;
3192                 y += intel_fb->normal[i].y;
3193
3194                 offset = intel_compute_aligned_offset(dev_priv, &x, &y,
3195                                                       fb, i, fb->pitches[i],
3196                                                       DRM_MODE_ROTATE_0, tile_size);
3197                 offset /= tile_size;
3198
3199                 WARN_ON(i >= ARRAY_SIZE(info->plane));
3200                 info->plane[i].offset = offset;
3201                 info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i],
3202                                                      tile_width * cpp);
3203                 info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
3204                 info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
3205
3206                 if (drm_rotation_90_or_270(rotation)) {
3207                         struct drm_rect r;
3208
3209                         /* rotate the x/y offsets to match the GTT view */
3210                         drm_rect_init(&r, x, y, width, height);
3211                         drm_rect_rotate(&r,
3212                                         info->plane[i].width * tile_width,
3213                                         info->plane[i].height * tile_height,
3214                                         DRM_MODE_ROTATE_270);
3215                         x = r.x1;
3216                         y = r.y1;
3217
3218                         pitch_tiles = info->plane[i].height;
3219                         plane_state->color_plane[i].stride = pitch_tiles * tile_height;
3220
3221                         /* rotate the tile dimensions to match the GTT view */
3222                         swap(tile_width, tile_height);
3223                 } else {
3224                         pitch_tiles = info->plane[i].width;
3225                         plane_state->color_plane[i].stride = pitch_tiles * tile_width * cpp;
3226                 }
3227
3228                 /*
3229                  * We only keep the x/y offsets, so push all of the
3230                  * gtt offset into the x/y offsets.
3231                  */
3232                 intel_adjust_tile_offset(&x, &y,
3233                                          tile_width, tile_height,
3234                                          tile_size, pitch_tiles,
3235                                          gtt_offset * tile_size, 0);
3236
3237                 gtt_offset += info->plane[i].width * info->plane[i].height;
3238
3239                 plane_state->color_plane[i].offset = 0;
3240                 plane_state->color_plane[i].x = x;
3241                 plane_state->color_plane[i].y = y;
3242         }
3243 }
3244
3245 static int
3246 intel_plane_compute_gtt(struct intel_plane_state *plane_state)
3247 {
3248         const struct intel_framebuffer *fb =
3249                 to_intel_framebuffer(plane_state->hw.fb);
3250         unsigned int rotation = plane_state->hw.rotation;
3251         int i, num_planes;
3252
3253         if (!fb)
3254                 return 0;
3255
3256         num_planes = fb->base.format->num_planes;
3257
3258         if (intel_plane_needs_remap(plane_state)) {
3259                 intel_plane_remap_gtt(plane_state);
3260
3261                 /*
3262                  * Sometimes even remapping can't overcome
3263                  * the stride limitations :( Can happen with
3264                  * big plane sizes and suitably misaligned
3265                  * offsets.
3266                  */
3267                 return intel_plane_check_stride(plane_state);
3268         }
3269
3270         intel_fill_fb_ggtt_view(&plane_state->view, &fb->base, rotation);
3271
3272         for (i = 0; i < num_planes; i++) {
3273                 plane_state->color_plane[i].stride = intel_fb_pitch(&fb->base, i, rotation);
3274                 plane_state->color_plane[i].offset = 0;
3275
3276                 if (drm_rotation_90_or_270(rotation)) {
3277                         plane_state->color_plane[i].x = fb->rotated[i].x;
3278                         plane_state->color_plane[i].y = fb->rotated[i].y;
3279                 } else {
3280                         plane_state->color_plane[i].x = fb->normal[i].x;
3281                         plane_state->color_plane[i].y = fb->normal[i].y;
3282                 }
3283         }
3284
3285         /* Rotate src coordinates to match rotated GTT view */
3286         if (drm_rotation_90_or_270(rotation))
3287                 drm_rect_rotate(&plane_state->uapi.src,
3288                                 fb->base.width << 16, fb->base.height << 16,
3289                                 DRM_MODE_ROTATE_270);
3290
3291         return intel_plane_check_stride(plane_state);
3292 }
3293
3294 static int i9xx_format_to_fourcc(int format)
3295 {
3296         switch (format) {
3297         case DISPPLANE_8BPP:
3298                 return DRM_FORMAT_C8;
3299         case DISPPLANE_BGRA555:
3300                 return DRM_FORMAT_ARGB1555;
3301         case DISPPLANE_BGRX555:
3302                 return DRM_FORMAT_XRGB1555;
3303         case DISPPLANE_BGRX565:
3304                 return DRM_FORMAT_RGB565;
3305         default:
3306         case DISPPLANE_BGRX888:
3307                 return DRM_FORMAT_XRGB8888;
3308         case DISPPLANE_RGBX888:
3309                 return DRM_FORMAT_XBGR8888;
3310         case DISPPLANE_BGRA888:
3311                 return DRM_FORMAT_ARGB8888;
3312         case DISPPLANE_RGBA888:
3313                 return DRM_FORMAT_ABGR8888;
3314         case DISPPLANE_BGRX101010:
3315                 return DRM_FORMAT_XRGB2101010;
3316         case DISPPLANE_RGBX101010:
3317                 return DRM_FORMAT_XBGR2101010;
3318         case DISPPLANE_BGRA101010:
3319                 return DRM_FORMAT_ARGB2101010;
3320         case DISPPLANE_RGBA101010:
3321                 return DRM_FORMAT_ABGR2101010;
3322         case DISPPLANE_RGBX161616:
3323                 return DRM_FORMAT_XBGR16161616F;
3324         }
3325 }
3326
3327 int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
3328 {
3329         switch (format) {
3330         case PLANE_CTL_FORMAT_RGB_565:
3331                 return DRM_FORMAT_RGB565;
3332         case PLANE_CTL_FORMAT_NV12:
3333                 return DRM_FORMAT_NV12;
3334         case PLANE_CTL_FORMAT_P010:
3335                 return DRM_FORMAT_P010;
3336         case PLANE_CTL_FORMAT_P012:
3337                 return DRM_FORMAT_P012;
3338         case PLANE_CTL_FORMAT_P016:
3339                 return DRM_FORMAT_P016;
3340         case PLANE_CTL_FORMAT_Y210:
3341                 return DRM_FORMAT_Y210;
3342         case PLANE_CTL_FORMAT_Y212:
3343                 return DRM_FORMAT_Y212;
3344         case PLANE_CTL_FORMAT_Y216:
3345                 return DRM_FORMAT_Y216;
3346         case PLANE_CTL_FORMAT_Y410:
3347                 return DRM_FORMAT_XVYU2101010;
3348         case PLANE_CTL_FORMAT_Y412:
3349                 return DRM_FORMAT_XVYU12_16161616;
3350         case PLANE_CTL_FORMAT_Y416:
3351                 return DRM_FORMAT_XVYU16161616;
3352         default:
3353         case PLANE_CTL_FORMAT_XRGB_8888:
3354                 if (rgb_order) {
3355                         if (alpha)
3356                                 return DRM_FORMAT_ABGR8888;
3357                         else
3358                                 return DRM_FORMAT_XBGR8888;
3359                 } else {
3360                         if (alpha)
3361                                 return DRM_FORMAT_ARGB8888;
3362                         else
3363                                 return DRM_FORMAT_XRGB8888;
3364                 }
3365         case PLANE_CTL_FORMAT_XRGB_2101010:
3366                 if (rgb_order) {
3367                         if (alpha)
3368                                 return DRM_FORMAT_ABGR2101010;
3369                         else
3370                                 return DRM_FORMAT_XBGR2101010;
3371                 } else {
3372                         if (alpha)
3373                                 return DRM_FORMAT_ARGB2101010;
3374                         else
3375                                 return DRM_FORMAT_XRGB2101010;
3376                 }
3377         case PLANE_CTL_FORMAT_XRGB_16161616F:
3378                 if (rgb_order) {
3379                         if (alpha)
3380                                 return DRM_FORMAT_ABGR16161616F;
3381                         else
3382                                 return DRM_FORMAT_XBGR16161616F;
3383                 } else {
3384                         if (alpha)
3385                                 return DRM_FORMAT_ARGB16161616F;
3386                         else
3387                                 return DRM_FORMAT_XRGB16161616F;
3388                 }
3389         }
3390 }
3391
3392 static bool
3393 intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
3394                               struct intel_initial_plane_config *plane_config)
3395 {
3396         struct drm_device *dev = crtc->base.dev;
3397         struct drm_i915_private *dev_priv = to_i915(dev);
3398         struct drm_mode_fb_cmd2 mode_cmd = { 0 };
3399         struct drm_framebuffer *fb = &plane_config->fb->base;
3400         u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
3401         u32 size_aligned = round_up(plane_config->base + plane_config->size,
3402                                     PAGE_SIZE);
3403         struct drm_i915_gem_object *obj;
3404         bool ret = false;
3405
3406         size_aligned -= base_aligned;
3407
3408         if (plane_config->size == 0)
3409                 return false;
3410
3411         /* If the FB is too big, just don't use it since fbdev is not very
3412          * important and we should probably use that space with FBC or other
3413          * features. */
3414         if (size_aligned * 2 > dev_priv->stolen_usable_size)
3415                 return false;
3416
3417         switch (fb->modifier) {
3418         case DRM_FORMAT_MOD_LINEAR:
3419         case I915_FORMAT_MOD_X_TILED:
3420         case I915_FORMAT_MOD_Y_TILED:
3421                 break;
3422         default:
3423                 drm_dbg(&dev_priv->drm,
3424                         "Unsupported modifier for initial FB: 0x%llx\n",
3425                         fb->modifier);
3426                 return false;
3427         }
3428
3429         obj = i915_gem_object_create_stolen_for_preallocated(dev_priv,
3430                                                              base_aligned,
3431                                                              base_aligned,
3432                                                              size_aligned);
3433         if (IS_ERR(obj))
3434                 return false;
3435
3436         switch (plane_config->tiling) {
3437         case I915_TILING_NONE:
3438                 break;
3439         case I915_TILING_X:
3440         case I915_TILING_Y:
3441                 obj->tiling_and_stride = fb->pitches[0] | plane_config->tiling;
3442                 break;
3443         default:
3444                 MISSING_CASE(plane_config->tiling);
3445                 goto out;
3446         }
3447
3448         mode_cmd.pixel_format = fb->format->format;
3449         mode_cmd.width = fb->width;
3450         mode_cmd.height = fb->height;
3451         mode_cmd.pitches[0] = fb->pitches[0];
3452         mode_cmd.modifier[0] = fb->modifier;
3453         mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
3454
3455         if (intel_framebuffer_init(to_intel_framebuffer(fb), obj, &mode_cmd)) {
3456                 drm_dbg_kms(&dev_priv->drm, "intel fb init failed\n");
3457                 goto out;
3458         }
3459
3460
3461         drm_dbg_kms(&dev_priv->drm, "initial plane fb obj %p\n", obj);
3462         ret = true;
3463 out:
3464         i915_gem_object_put(obj);
3465         return ret;
3466 }
3467
3468 static void
3469 intel_set_plane_visible(struct intel_crtc_state *crtc_state,
3470                         struct intel_plane_state *plane_state,
3471                         bool visible)
3472 {
3473         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
3474
3475         plane_state->uapi.visible = visible;
3476
3477         if (visible)
3478                 crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base);
3479         else
3480                 crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base);
3481 }
3482
3483 static void fixup_active_planes(struct intel_crtc_state *crtc_state)
3484 {
3485         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
3486         struct drm_plane *plane;
3487
3488         /*
3489          * Active_planes aliases if multiple "primary" or cursor planes
3490          * have been used on the same (or wrong) pipe. plane_mask uses
3491          * unique ids, hence we can use that to reconstruct active_planes.
3492          */
3493         crtc_state->active_planes = 0;
3494
3495         drm_for_each_plane_mask(plane, &dev_priv->drm,
3496                                 crtc_state->uapi.plane_mask)
3497                 crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
3498 }
3499
3500 static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
3501                                          struct intel_plane *plane)
3502 {
3503         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3504         struct intel_crtc_state *crtc_state =
3505                 to_intel_crtc_state(crtc->base.state);
3506         struct intel_plane_state *plane_state =
3507                 to_intel_plane_state(plane->base.state);
3508
3509         drm_dbg_kms(&dev_priv->drm,
3510                     "Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
3511                     plane->base.base.id, plane->base.name,
3512                     crtc->base.base.id, crtc->base.name);
3513
3514         intel_set_plane_visible(crtc_state, plane_state, false);
3515         fixup_active_planes(crtc_state);
3516         crtc_state->data_rate[plane->id] = 0;
3517         crtc_state->min_cdclk[plane->id] = 0;
3518
3519         if (plane->id == PLANE_PRIMARY)
3520                 hsw_disable_ips(crtc_state);
3521
3522         /*
3523          * Vblank time updates from the shadow to live plane control register
3524          * are blocked if the memory self-refresh mode is active at that
3525          * moment. So to make sure the plane gets truly disabled, disable
3526          * first the self-refresh mode. The self-refresh enable bit in turn
3527          * will be checked/applied by the HW only at the next frame start
3528          * event which is after the vblank start event, so we need to have a
3529          * wait-for-vblank between disabling the plane and the pipe.
3530          */
3531         if (HAS_GMCH(dev_priv) &&
3532             intel_set_memory_cxsr(dev_priv, false))
3533                 intel_wait_for_vblank(dev_priv, crtc->pipe);
3534
3535         /*
3536          * Gen2 reports pipe underruns whenever all planes are disabled.
3537          * So disable underrun reporting before all the planes get disabled.
3538          */
3539         if (IS_GEN(dev_priv, 2) && !crtc_state->active_planes)
3540                 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
3541
3542         intel_disable_plane(plane, crtc_state);
3543 }
3544
3545 static struct intel_frontbuffer *
3546 to_intel_frontbuffer(struct drm_framebuffer *fb)
3547 {
3548         return fb ? to_intel_framebuffer(fb)->frontbuffer : NULL;
3549 }
3550
3551 static void
3552 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
3553                              struct intel_initial_plane_config *plane_config)
3554 {
3555         struct drm_device *dev = intel_crtc->base.dev;
3556         struct drm_i915_private *dev_priv = to_i915(dev);
3557         struct drm_crtc *c;
3558         struct drm_plane *primary = intel_crtc->base.primary;
3559         struct drm_plane_state *plane_state = primary->state;
3560         struct intel_plane *intel_plane = to_intel_plane(primary);
3561         struct intel_plane_state *intel_state =
3562                 to_intel_plane_state(plane_state);
3563         struct drm_framebuffer *fb;
3564
3565         if (!plane_config->fb)
3566                 return;
3567
3568         if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
3569                 fb = &plane_config->fb->base;
3570                 goto valid_fb;
3571         }
3572
3573         kfree(plane_config->fb);
3574
3575         /*
3576          * Failed to alloc the obj, check to see if we should share
3577          * an fb with another CRTC instead
3578          */
3579         for_each_crtc(dev, c) {
3580                 struct intel_plane_state *state;
3581
3582                 if (c == &intel_crtc->base)
3583                         continue;
3584
3585                 if (!to_intel_crtc(c)->active)
3586                         continue;
3587
3588                 state = to_intel_plane_state(c->primary->state);
3589                 if (!state->vma)
3590                         continue;
3591
3592                 if (intel_plane_ggtt_offset(state) == plane_config->base) {
3593                         fb = state->hw.fb;
3594                         drm_framebuffer_get(fb);
3595                         goto valid_fb;
3596                 }
3597         }
3598
3599         /*
3600          * We've failed to reconstruct the BIOS FB.  Current display state
3601          * indicates that the primary plane is visible, but has a NULL FB,
3602          * which will lead to problems later if we don't fix it up.  The
3603          * simplest solution is to just disable the primary plane now and
3604          * pretend the BIOS never had it enabled.
3605          */
3606         intel_plane_disable_noatomic(intel_crtc, intel_plane);
3607
3608         return;
3609
3610 valid_fb:
3611         intel_state->hw.rotation = plane_config->rotation;
3612         intel_fill_fb_ggtt_view(&intel_state->view, fb,
3613                                 intel_state->hw.rotation);
3614         intel_state->color_plane[0].stride =
3615                 intel_fb_pitch(fb, 0, intel_state->hw.rotation);
3616
3617         intel_state->vma =
3618                 intel_pin_and_fence_fb_obj(fb,
3619                                            &intel_state->view,
3620                                            intel_plane_uses_fence(intel_state),
3621                                            &intel_state->flags);
3622         if (IS_ERR(intel_state->vma)) {
3623                 drm_err(&dev_priv->drm,
3624                         "failed to pin boot fb on pipe %d: %li\n",
3625                         intel_crtc->pipe, PTR_ERR(intel_state->vma));
3626
3627                 intel_state->vma = NULL;
3628                 drm_framebuffer_put(fb);
3629                 return;
3630         }
3631
3632         intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
3633
3634         plane_state->src_x = 0;
3635         plane_state->src_y = 0;
3636         plane_state->src_w = fb->width << 16;
3637         plane_state->src_h = fb->height << 16;
3638
3639         plane_state->crtc_x = 0;
3640         plane_state->crtc_y = 0;
3641         plane_state->crtc_w = fb->width;
3642         plane_state->crtc_h = fb->height;
3643
3644         intel_state->uapi.src = drm_plane_state_src(plane_state);
3645         intel_state->uapi.dst = drm_plane_state_dest(plane_state);
3646
3647         if (plane_config->tiling)
3648                 dev_priv->preserve_bios_swizzle = true;
3649
3650         plane_state->fb = fb;
3651         plane_state->crtc = &intel_crtc->base;
3652         intel_plane_copy_uapi_to_hw_state(intel_state, intel_state);
3653
3654         atomic_or(to_intel_plane(primary)->frontbuffer_bit,
3655                   &to_intel_frontbuffer(fb)->bits);
3656 }
3657
3658 static int skl_max_plane_width(const struct drm_framebuffer *fb,
3659                                int color_plane,
3660                                unsigned int rotation)
3661 {
3662         int cpp = fb->format->cpp[color_plane];
3663
3664         switch (fb->modifier) {
3665         case DRM_FORMAT_MOD_LINEAR:
3666         case I915_FORMAT_MOD_X_TILED:
3667                 /*
3668                  * Validated limit is 4k, but has 5k should
3669                  * work apart from the following features:
3670                  * - Ytile (already limited to 4k)
3671                  * - FP16 (already limited to 4k)
3672                  * - render compression (already limited to 4k)
3673                  * - KVMR sprite and cursor (don't care)
3674                  * - horizontal panning (TODO verify this)
3675                  * - pipe and plane scaling (TODO verify this)
3676                  */
3677                 if (cpp == 8)
3678                         return 4096;
3679                 else
3680                         return 5120;
3681         case I915_FORMAT_MOD_Y_TILED_CCS:
3682         case I915_FORMAT_MOD_Yf_TILED_CCS:
3683         case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
3684                 /* FIXME AUX plane? */
3685         case I915_FORMAT_MOD_Y_TILED:
3686         case I915_FORMAT_MOD_Yf_TILED:
3687                 if (cpp == 8)
3688                         return 2048;
3689                 else
3690                         return 4096;
3691         default:
3692                 MISSING_CASE(fb->modifier);
3693                 return 2048;
3694         }
3695 }
3696
3697 static int glk_max_plane_width(const struct drm_framebuffer *fb,
3698                                int color_plane,
3699                                unsigned int rotation)
3700 {
3701         int cpp = fb->format->cpp[color_plane];
3702
3703         switch (fb->modifier) {
3704         case DRM_FORMAT_MOD_LINEAR:
3705         case I915_FORMAT_MOD_X_TILED:
3706                 if (cpp == 8)
3707                         return 4096;
3708                 else
3709                         return 5120;
3710         case I915_FORMAT_MOD_Y_TILED_CCS:
3711         case I915_FORMAT_MOD_Yf_TILED_CCS:
3712                 /* FIXME AUX plane? */
3713         case I915_FORMAT_MOD_Y_TILED:
3714         case I915_FORMAT_MOD_Yf_TILED:
3715                 if (cpp == 8)
3716                         return 2048;
3717                 else
3718                         return 5120;
3719         default:
3720                 MISSING_CASE(fb->modifier);
3721                 return 2048;
3722         }
3723 }
3724
3725 static int icl_max_plane_width(const struct drm_framebuffer *fb,
3726                                int color_plane,
3727                                unsigned int rotation)
3728 {
3729         return 5120;
3730 }
3731
3732 static int skl_max_plane_height(void)
3733 {
3734         return 4096;
3735 }
3736
3737 static int icl_max_plane_height(void)
3738 {
3739         return 4320;
3740 }
3741
3742 static bool
3743 skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state,
3744                                int main_x, int main_y, u32 main_offset,
3745                                int ccs_plane)
3746 {
3747         const struct drm_framebuffer *fb = plane_state->hw.fb;
3748         int aux_x = plane_state->color_plane[ccs_plane].x;
3749         int aux_y = plane_state->color_plane[ccs_plane].y;
3750         u32 aux_offset = plane_state->color_plane[ccs_plane].offset;
3751         u32 alignment = intel_surf_alignment(fb, ccs_plane);
3752         int hsub;
3753         int vsub;
3754
3755         intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane);
3756         while (aux_offset >= main_offset && aux_y <= main_y) {
3757                 int x, y;
3758
3759                 if (aux_x == main_x && aux_y == main_y)
3760                         break;
3761
3762                 if (aux_offset == 0)
3763                         break;
3764
3765                 x = aux_x / hsub;
3766                 y = aux_y / vsub;
3767                 aux_offset = intel_plane_adjust_aligned_offset(&x, &y,
3768                                                                plane_state,
3769                                                                ccs_plane,
3770                                                                aux_offset,
3771                                                                aux_offset -
3772                                                                 alignment);
3773                 aux_x = x * hsub + aux_x % hsub;
3774                 aux_y = y * vsub + aux_y % vsub;
3775         }
3776
3777         if (aux_x != main_x || aux_y != main_y)
3778                 return false;
3779
3780         plane_state->color_plane[ccs_plane].offset = aux_offset;
3781         plane_state->color_plane[ccs_plane].x = aux_x;
3782         plane_state->color_plane[ccs_plane].y = aux_y;
3783
3784         return true;
3785 }
3786
3787 static int skl_check_main_surface(struct intel_plane_state *plane_state)
3788 {
3789         struct drm_i915_private *dev_priv = to_i915(plane_state->uapi.plane->dev);
3790         const struct drm_framebuffer *fb = plane_state->hw.fb;
3791         unsigned int rotation = plane_state->hw.rotation;
3792         int x = plane_state->uapi.src.x1 >> 16;
3793         int y = plane_state->uapi.src.y1 >> 16;
3794         int w = drm_rect_width(&plane_state->uapi.src) >> 16;
3795         int h = drm_rect_height(&plane_state->uapi.src) >> 16;
3796         int max_width;
3797         int max_height;
3798         u32 alignment;
3799         u32 offset;
3800         int aux_plane = intel_main_to_aux_plane(fb, 0);
3801         u32 aux_offset = plane_state->color_plane[aux_plane].offset;
3802
3803         if (INTEL_GEN(dev_priv) >= 11)
3804                 max_width = icl_max_plane_width(fb, 0, rotation);
3805         else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
3806                 max_width = glk_max_plane_width(fb, 0, rotation);
3807         else
3808                 max_width = skl_max_plane_width(fb, 0, rotation);
3809
3810         if (INTEL_GEN(dev_priv) >= 11)
3811                 max_height = icl_max_plane_height();
3812         else
3813                 max_height = skl_max_plane_height();
3814
3815         if (w > max_width || h > max_height) {
3816                 drm_dbg_kms(&dev_priv->drm,
3817                             "requested Y/RGB source size %dx%d too big (limit %dx%d)\n",
3818                             w, h, max_width, max_height);
3819                 return -EINVAL;
3820         }
3821
3822         intel_add_fb_offsets(&x, &y, plane_state, 0);
3823         offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 0);
3824         alignment = intel_surf_alignment(fb, 0);
3825         if (WARN_ON(alignment && !is_power_of_2(alignment)))
3826                 return -EINVAL;
3827
3828         /*
3829          * AUX surface offset is specified as the distance from the
3830          * main surface offset, and it must be non-negative. Make
3831          * sure that is what we will get.
3832          */
3833         if (offset > aux_offset)
3834                 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3835                                                            offset, aux_offset & ~(alignment - 1));
3836
3837         /*
3838          * When using an X-tiled surface, the plane blows up
3839          * if the x offset + width exceed the stride.
3840          *
3841          * TODO: linear and Y-tiled seem fine, Yf untested,
3842          */
3843         if (fb->modifier == I915_FORMAT_MOD_X_TILED) {
3844                 int cpp = fb->format->cpp[0];
3845
3846                 while ((x + w) * cpp > plane_state->color_plane[0].stride) {
3847                         if (offset == 0) {
3848                                 drm_dbg_kms(&dev_priv->drm,
3849                                             "Unable to find suitable display surface offset due to X-tiling\n");
3850                                 return -EINVAL;
3851                         }
3852
3853                         offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3854                                                                    offset, offset - alignment);
3855                 }
3856         }
3857
3858         /*
3859          * CCS AUX surface doesn't have its own x/y offsets, we must make sure
3860          * they match with the main surface x/y offsets.
3861          */
3862         if (is_ccs_modifier(fb->modifier)) {
3863                 while (!skl_check_main_ccs_coordinates(plane_state, x, y,
3864                                                        offset, aux_plane)) {
3865                         if (offset == 0)
3866                                 break;
3867
3868                         offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3869                                                                    offset, offset - alignment);
3870                 }
3871
3872                 if (x != plane_state->color_plane[aux_plane].x ||
3873                     y != plane_state->color_plane[aux_plane].y) {
3874                         drm_dbg_kms(&dev_priv->drm,
3875                                     "Unable to find suitable display surface offset due to CCS\n");
3876                         return -EINVAL;
3877                 }
3878         }
3879
3880         plane_state->color_plane[0].offset = offset;
3881         plane_state->color_plane[0].x = x;
3882         plane_state->color_plane[0].y = y;
3883
3884         /*
3885          * Put the final coordinates back so that the src
3886          * coordinate checks will see the right values.
3887          */
3888         drm_rect_translate_to(&plane_state->uapi.src,
3889                               x << 16, y << 16);
3890
3891         return 0;
3892 }
3893
3894 static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
3895 {
3896         struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
3897         const struct drm_framebuffer *fb = plane_state->hw.fb;
3898         unsigned int rotation = plane_state->hw.rotation;
3899         int uv_plane = 1;
3900         int max_width = skl_max_plane_width(fb, uv_plane, rotation);
3901         int max_height = 4096;
3902         int x = plane_state->uapi.src.x1 >> 17;
3903         int y = plane_state->uapi.src.y1 >> 17;
3904         int w = drm_rect_width(&plane_state->uapi.src) >> 17;
3905         int h = drm_rect_height(&plane_state->uapi.src) >> 17;
3906         u32 offset;
3907
3908         intel_add_fb_offsets(&x, &y, plane_state, uv_plane);
3909         offset = intel_plane_compute_aligned_offset(&x, &y,
3910                                                     plane_state, uv_plane);
3911
3912         /* FIXME not quite sure how/if these apply to the chroma plane */
3913         if (w > max_width || h > max_height) {
3914                 drm_dbg_kms(&i915->drm,
3915                             "CbCr source size %dx%d too big (limit %dx%d)\n",
3916                             w, h, max_width, max_height);
3917                 return -EINVAL;
3918         }
3919
3920         if (is_ccs_modifier(fb->modifier)) {
3921                 int ccs_plane = main_to_ccs_plane(fb, uv_plane);
3922                 int aux_offset = plane_state->color_plane[ccs_plane].offset;
3923                 int alignment = intel_surf_alignment(fb, uv_plane);
3924
3925                 if (offset > aux_offset)
3926                         offset = intel_plane_adjust_aligned_offset(&x, &y,
3927                                                                    plane_state,
3928                                                                    uv_plane,
3929                                                                    offset,
3930                                                                    aux_offset & ~(alignment - 1));
3931
3932                 while (!skl_check_main_ccs_coordinates(plane_state, x, y,
3933                                                        offset, ccs_plane)) {
3934                         if (offset == 0)
3935                                 break;
3936
3937                         offset = intel_plane_adjust_aligned_offset(&x, &y,
3938                                                                    plane_state,
3939                                                                    uv_plane,
3940                                                                    offset, offset - alignment);
3941                 }
3942
3943                 if (x != plane_state->color_plane[ccs_plane].x ||
3944                     y != plane_state->color_plane[ccs_plane].y) {
3945                         drm_dbg_kms(&i915->drm,
3946                                     "Unable to find suitable display surface offset due to CCS\n");
3947                         return -EINVAL;
3948                 }
3949         }
3950
3951         plane_state->color_plane[uv_plane].offset = offset;
3952         plane_state->color_plane[uv_plane].x = x;
3953         plane_state->color_plane[uv_plane].y = y;
3954
3955         return 0;
3956 }
3957
3958 static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
3959 {
3960         const struct drm_framebuffer *fb = plane_state->hw.fb;
3961         int src_x = plane_state->uapi.src.x1 >> 16;
3962         int src_y = plane_state->uapi.src.y1 >> 16;
3963         u32 offset;
3964         int ccs_plane;
3965
3966         for (ccs_plane = 0; ccs_plane < fb->format->num_planes; ccs_plane++) {
3967                 int main_hsub, main_vsub;
3968                 int hsub, vsub;
3969                 int x, y;
3970
3971                 if (!is_ccs_plane(fb, ccs_plane))
3972                         continue;
3973
3974                 intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, fb,
3975                                                ccs_to_main_plane(fb, ccs_plane));
3976                 intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane);
3977
3978                 hsub *= main_hsub;
3979                 vsub *= main_vsub;
3980                 x = src_x / hsub;
3981                 y = src_y / vsub;
3982
3983                 intel_add_fb_offsets(&x, &y, plane_state, ccs_plane);
3984
3985                 offset = intel_plane_compute_aligned_offset(&x, &y,
3986                                                             plane_state,
3987                                                             ccs_plane);
3988
3989                 plane_state->color_plane[ccs_plane].offset = offset;
3990                 plane_state->color_plane[ccs_plane].x = (x * hsub +
3991                                                          src_x % hsub) /
3992                                                         main_hsub;
3993                 plane_state->color_plane[ccs_plane].y = (y * vsub +
3994                                                          src_y % vsub) /
3995                                                         main_vsub;
3996         }
3997
3998         return 0;
3999 }
4000
4001 int skl_check_plane_surface(struct intel_plane_state *plane_state)
4002 {
4003         const struct drm_framebuffer *fb = plane_state->hw.fb;
4004         int ret;
4005         bool needs_aux = false;
4006
4007         ret = intel_plane_compute_gtt(plane_state);
4008         if (ret)
4009                 return ret;
4010
4011         if (!plane_state->uapi.visible)
4012                 return 0;
4013
4014         /*
4015          * Handle the AUX surface first since the main surface setup depends on
4016          * it.
4017          */
4018         if (is_ccs_modifier(fb->modifier)) {
4019                 needs_aux = true;
4020                 ret = skl_check_ccs_aux_surface(plane_state);
4021                 if (ret)
4022                         return ret;
4023         }
4024
4025         if (intel_format_info_is_yuv_semiplanar(fb->format,
4026                                                 fb->modifier)) {
4027                 needs_aux = true;
4028                 ret = skl_check_nv12_aux_surface(plane_state);
4029                 if (ret)
4030                         return ret;
4031         }
4032
4033         if (!needs_aux) {
4034                 int i;
4035
4036                 for (i = 1; i < fb->format->num_planes; i++) {
4037                         plane_state->color_plane[i].offset = ~0xfff;
4038                         plane_state->color_plane[i].x = 0;
4039                         plane_state->color_plane[i].y = 0;
4040                 }
4041         }
4042
4043         ret = skl_check_main_surface(plane_state);
4044         if (ret)
4045                 return ret;
4046
4047         return 0;
4048 }
4049
4050 static void i9xx_plane_ratio(const struct intel_crtc_state *crtc_state,
4051                              const struct intel_plane_state *plane_state,
4052                              unsigned int *num, unsigned int *den)
4053 {
4054         const struct drm_framebuffer *fb = plane_state->hw.fb;
4055         unsigned int cpp = fb->format->cpp[0];
4056
4057         /*
4058          * g4x bspec says 64bpp pixel rate can't exceed 80%
4059          * of cdclk when the sprite plane is enabled on the
4060          * same pipe. ilk/snb bspec says 64bpp pixel rate is
4061          * never allowed to exceed 80% of cdclk. Let's just go
4062          * with the ilk/snb limit always.
4063          */
4064         if (cpp == 8) {
4065                 *num = 10;
4066                 *den = 8;
4067         } else {
4068                 *num = 1;
4069                 *den = 1;
4070         }
4071 }
4072
4073 static int i9xx_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
4074                                 const struct intel_plane_state *plane_state)
4075 {
4076         unsigned int pixel_rate;
4077         unsigned int num, den;
4078
4079         /*
4080          * Note that crtc_state->pixel_rate accounts for both
4081          * horizontal and vertical panel fitter downscaling factors.
4082          * Pre-HSW bspec tells us to only consider the horizontal
4083          * downscaling factor here. We ignore that and just consider
4084          * both for simplicity.
4085          */
4086         pixel_rate = crtc_state->pixel_rate;
4087
4088         i9xx_plane_ratio(crtc_state, plane_state, &num, &den);
4089
4090         /* two pixels per clock with double wide pipe */
4091         if (crtc_state->double_wide)
4092                 den *= 2;
4093
4094         return DIV_ROUND_UP(pixel_rate * num, den);
4095 }
4096
4097 unsigned int
4098 i9xx_plane_max_stride(struct intel_plane *plane,
4099                       u32 pixel_format, u64 modifier,
4100                       unsigned int rotation)
4101 {
4102         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
4103
4104         if (!HAS_GMCH(dev_priv)) {
4105                 return 32*1024;
4106         } else if (INTEL_GEN(dev_priv) >= 4) {
4107                 if (modifier == I915_FORMAT_MOD_X_TILED)
4108                         return 16*1024;
4109                 else
4110                         return 32*1024;
4111         } else if (INTEL_GEN(dev_priv) >= 3) {
4112                 if (modifier == I915_FORMAT_MOD_X_TILED)
4113                         return 8*1024;
4114                 else
4115                         return 16*1024;
4116         } else {
4117                 if (plane->i9xx_plane == PLANE_C)
4118                         return 4*1024;
4119                 else
4120                         return 8*1024;
4121         }
4122 }
4123
4124 static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
4125 {
4126         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4127         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4128         u32 dspcntr = 0;
4129
4130         if (crtc_state->gamma_enable)
4131                 dspcntr |= DISPPLANE_GAMMA_ENABLE;
4132
4133         if (crtc_state->csc_enable)
4134                 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
4135
4136         if (INTEL_GEN(dev_priv) < 5)
4137                 dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe);
4138
4139         return dspcntr;
4140 }
4141
4142 static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
4143                           const struct intel_plane_state *plane_state)
4144 {
4145         struct drm_i915_private *dev_priv =
4146                 to_i915(plane_state->uapi.plane->dev);
4147         const struct drm_framebuffer *fb = plane_state->hw.fb;
4148         unsigned int rotation = plane_state->hw.rotation;
4149         u32 dspcntr;
4150
4151         dspcntr = DISPLAY_PLANE_ENABLE;
4152
4153         if (IS_G4X(dev_priv) || IS_GEN(dev_priv, 5) ||
4154             IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
4155                 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
4156
4157         switch (fb->format->format) {
4158         case DRM_FORMAT_C8:
4159                 dspcntr |= DISPPLANE_8BPP;
4160                 break;
4161         case DRM_FORMAT_XRGB1555:
4162                 dspcntr |= DISPPLANE_BGRX555;
4163                 break;
4164         case DRM_FORMAT_ARGB1555:
4165                 dspcntr |= DISPPLANE_BGRA555;
4166                 break;
4167         case DRM_FORMAT_RGB565:
4168                 dspcntr |= DISPPLANE_BGRX565;
4169                 break;
4170         case DRM_FORMAT_XRGB8888:
4171                 dspcntr |= DISPPLANE_BGRX888;
4172                 break;
4173         case DRM_FORMAT_XBGR8888:
4174                 dspcntr |= DISPPLANE_RGBX888;
4175                 break;
4176         case DRM_FORMAT_ARGB8888:
4177                 dspcntr |= DISPPLANE_BGRA888;
4178                 break;
4179         case DRM_FORMAT_ABGR8888:
4180                 dspcntr |= DISPPLANE_RGBA888;
4181                 break;
4182         case DRM_FORMAT_XRGB2101010:
4183                 dspcntr |= DISPPLANE_BGRX101010;
4184                 break;
4185         case DRM_FORMAT_XBGR2101010:
4186                 dspcntr |= DISPPLANE_RGBX101010;
4187                 break;
4188         case DRM_FORMAT_ARGB2101010:
4189                 dspcntr |= DISPPLANE_BGRA101010;
4190                 break;
4191         case DRM_FORMAT_ABGR2101010:
4192                 dspcntr |= DISPPLANE_RGBA101010;
4193                 break;
4194         case DRM_FORMAT_XBGR16161616F:
4195                 dspcntr |= DISPPLANE_RGBX161616;
4196                 break;
4197         default:
4198                 MISSING_CASE(fb->format->format);
4199                 return 0;
4200         }
4201
4202         if (INTEL_GEN(dev_priv) >= 4 &&
4203             fb->modifier == I915_FORMAT_MOD_X_TILED)
4204                 dspcntr |= DISPPLANE_TILED;
4205
4206         if (rotation & DRM_MODE_ROTATE_180)
4207                 dspcntr |= DISPPLANE_ROTATE_180;
4208
4209         if (rotation & DRM_MODE_REFLECT_X)
4210                 dspcntr |= DISPPLANE_MIRROR;
4211
4212         return dspcntr;
4213 }
4214
4215 int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
4216 {
4217         struct drm_i915_private *dev_priv =
4218                 to_i915(plane_state->uapi.plane->dev);
4219         const struct drm_framebuffer *fb = plane_state->hw.fb;
4220         int src_x, src_y, src_w;
4221         u32 offset;
4222         int ret;
4223
4224         ret = intel_plane_compute_gtt(plane_state);
4225         if (ret)
4226                 return ret;
4227
4228         if (!plane_state->uapi.visible)
4229                 return 0;
4230
4231         src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
4232         src_x = plane_state->uapi.src.x1 >> 16;
4233         src_y = plane_state->uapi.src.y1 >> 16;
4234
4235         /* Undocumented hardware limit on i965/g4x/vlv/chv */
4236         if (HAS_GMCH(dev_priv) && fb->format->cpp[0] == 8 && src_w > 2048)
4237                 return -EINVAL;
4238
4239         intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
4240
4241         if (INTEL_GEN(dev_priv) >= 4)
4242                 offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
4243                                                             plane_state, 0);
4244         else
4245                 offset = 0;
4246
4247         /*
4248          * Put the final coordinates back so that the src
4249          * coordinate checks will see the right values.
4250          */
4251         drm_rect_translate_to(&plane_state->uapi.src,
4252                               src_x << 16, src_y << 16);
4253
4254         /* HSW/BDW do this automagically in hardware */
4255         if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
4256                 unsigned int rotation = plane_state->hw.rotation;
4257                 int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
4258                 int src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
4259
4260                 if (rotation & DRM_MODE_ROTATE_180) {
4261                         src_x += src_w - 1;
4262                         src_y += src_h - 1;
4263                 } else if (rotation & DRM_MODE_REFLECT_X) {
4264                         src_x += src_w - 1;
4265                 }
4266         }
4267
4268         plane_state->color_plane[0].offset = offset;
4269         plane_state->color_plane[0].x = src_x;
4270         plane_state->color_plane[0].y = src_y;
4271
4272         return 0;
4273 }
4274
4275 static bool i9xx_plane_has_windowing(struct intel_plane *plane)
4276 {
4277         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
4278         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
4279
4280         if (IS_CHERRYVIEW(dev_priv))
4281                 return i9xx_plane == PLANE_B;
4282         else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
4283                 return false;
4284         else if (IS_GEN(dev_priv, 4))
4285                 return i9xx_plane == PLANE_C;
4286         else
4287                 return i9xx_plane == PLANE_B ||
4288                         i9xx_plane == PLANE_C;
4289 }
4290
4291 static int
4292 i9xx_plane_check(struct intel_crtc_state *crtc_state,
4293                  struct intel_plane_state *plane_state)
4294 {
4295         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
4296         int ret;
4297
4298         ret = chv_plane_check_rotation(plane_state);
4299         if (ret)
4300                 return ret;
4301
4302         ret = drm_atomic_helper_check_plane_state(&plane_state->uapi,
4303                                                   &crtc_state->uapi,
4304                                                   DRM_PLANE_HELPER_NO_SCALING,
4305                                                   DRM_PLANE_HELPER_NO_SCALING,
4306                                                   i9xx_plane_has_windowing(plane),
4307                                                   true);
4308         if (ret)
4309                 return ret;
4310
4311         ret = i9xx_check_plane_surface(plane_state);
4312         if (ret)
4313                 return ret;
4314
4315         if (!plane_state->uapi.visible)
4316                 return 0;
4317
4318         ret = intel_plane_check_src_coordinates(plane_state);
4319         if (ret)
4320                 return ret;
4321
4322         plane_state->ctl = i9xx_plane_ctl(crtc_state, plane_state);
4323
4324         return 0;
4325 }
4326
4327 static void i9xx_update_plane(struct intel_plane *plane,
4328                               const struct intel_crtc_state *crtc_state,
4329                               const struct intel_plane_state *plane_state)
4330 {
4331         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
4332         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
4333         u32 linear_offset;
4334         int x = plane_state->color_plane[0].x;
4335         int y = plane_state->color_plane[0].y;
4336         int crtc_x = plane_state->uapi.dst.x1;
4337         int crtc_y = plane_state->uapi.dst.y1;
4338         int crtc_w = drm_rect_width(&plane_state->uapi.dst);
4339         int crtc_h = drm_rect_height(&plane_state->uapi.dst);
4340         unsigned long irqflags;
4341         u32 dspaddr_offset;
4342         u32 dspcntr;
4343
4344         dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state);
4345
4346         linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
4347
4348         if (INTEL_GEN(dev_priv) >= 4)
4349                 dspaddr_offset = plane_state->color_plane[0].offset;
4350         else
4351                 dspaddr_offset = linear_offset;
4352
4353         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
4354
4355         intel_de_write_fw(dev_priv, DSPSTRIDE(i9xx_plane),
4356                           plane_state->color_plane[0].stride);
4357
4358         if (INTEL_GEN(dev_priv) < 4) {
4359                 /*
4360                  * PLANE_A doesn't actually have a full window
4361                  * generator but let's assume we still need to
4362                  * program whatever is there.
4363                  */
4364                 intel_de_write_fw(dev_priv, DSPPOS(i9xx_plane),
4365                                   (crtc_y << 16) | crtc_x);
4366                 intel_de_write_fw(dev_priv, DSPSIZE(i9xx_plane),
4367                                   ((crtc_h - 1) << 16) | (crtc_w - 1));
4368         } else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
4369                 intel_de_write_fw(dev_priv, PRIMPOS(i9xx_plane),
4370                                   (crtc_y << 16) | crtc_x);
4371                 intel_de_write_fw(dev_priv, PRIMSIZE(i9xx_plane),
4372                                   ((crtc_h - 1) << 16) | (crtc_w - 1));
4373                 intel_de_write_fw(dev_priv, PRIMCNSTALPHA(i9xx_plane), 0);
4374         }
4375
4376         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
4377                 intel_de_write_fw(dev_priv, DSPOFFSET(i9xx_plane),
4378                                   (y << 16) | x);
4379         } else if (INTEL_GEN(dev_priv) >= 4) {
4380                 intel_de_write_fw(dev_priv, DSPLINOFF(i9xx_plane),
4381                                   linear_offset);
4382                 intel_de_write_fw(dev_priv, DSPTILEOFF(i9xx_plane),
4383                                   (y << 16) | x);
4384         }
4385
4386         /*
4387          * The control register self-arms if the plane was previously
4388          * disabled. Try to make the plane enable atomic by writing
4389          * the control register just before the surface register.
4390          */
4391         intel_de_write_fw(dev_priv, DSPCNTR(i9xx_plane), dspcntr);
4392         if (INTEL_GEN(dev_priv) >= 4)
4393                 intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane),
4394                                   intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
4395         else
4396                 intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane),
4397                                   intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
4398
4399         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
4400 }
4401
4402 static void i9xx_disable_plane(struct intel_plane *plane,
4403                                const struct intel_crtc_state *crtc_state)
4404 {
4405         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
4406         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
4407         unsigned long irqflags;
4408         u32 dspcntr;
4409
4410         /*
4411          * DSPCNTR pipe gamma enable on g4x+ and pipe csc
4412          * enable on ilk+ affect the pipe bottom color as
4413          * well, so we must configure them even if the plane
4414          * is disabled.
4415          *
4416          * On pre-g4x there is no way to gamma correct the
4417          * pipe bottom color but we'll keep on doing this
4418          * anyway so that the crtc state readout works correctly.
4419          */
4420         dspcntr = i9xx_plane_ctl_crtc(crtc_state);
4421
4422         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
4423
4424         intel_de_write_fw(dev_priv, DSPCNTR(i9xx_plane), dspcntr);
4425         if (INTEL_GEN(dev_priv) >= 4)
4426                 intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane), 0);
4427         else
4428                 intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane), 0);
4429
4430         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
4431 }
4432
4433 static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
4434                                     enum pipe *pipe)
4435 {
4436         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
4437         enum intel_display_power_domain power_domain;
4438         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
4439         intel_wakeref_t wakeref;
4440         bool ret;
4441         u32 val;
4442
4443         /*
4444          * Not 100% correct for planes that can move between pipes,
4445          * but that's only the case for gen2-4 which don't have any
4446          * display power wells.
4447          */
4448         power_domain = POWER_DOMAIN_PIPE(plane->pipe);
4449         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
4450         if (!wakeref)
4451                 return false;
4452
4453         val = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
4454
4455         ret = val & DISPLAY_PLANE_ENABLE;
4456
4457         if (INTEL_GEN(dev_priv) >= 5)
4458                 *pipe = plane->pipe;
4459         else
4460                 *pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
4461                         DISPPLANE_SEL_PIPE_SHIFT;
4462
4463         intel_display_power_put(dev_priv, power_domain, wakeref);
4464
4465         return ret;
4466 }
4467
4468 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
4469 {
4470         struct drm_device *dev = intel_crtc->base.dev;
4471         struct drm_i915_private *dev_priv = to_i915(dev);
4472
4473         intel_de_write(dev_priv, SKL_PS_CTRL(intel_crtc->pipe, id), 0);
4474         intel_de_write(dev_priv, SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
4475         intel_de_write(dev_priv, SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
4476 }
4477
4478 /*
4479  * This function detaches (aka. unbinds) unused scalers in hardware
4480  */
4481 static void skl_detach_scalers(const struct intel_crtc_state *crtc_state)
4482 {
4483         struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
4484         const struct intel_crtc_scaler_state *scaler_state =
4485                 &crtc_state->scaler_state;
4486         int i;
4487
4488         /* loop through and disable scalers that aren't in use */
4489         for (i = 0; i < intel_crtc->num_scalers; i++) {
4490                 if (!scaler_state->scalers[i].in_use)
4491                         skl_detach_scaler(intel_crtc, i);
4492         }
4493 }
4494
4495 static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb,
4496                                           int color_plane, unsigned int rotation)
4497 {
4498         /*
4499          * The stride is either expressed as a multiple of 64 bytes chunks for
4500          * linear buffers or in number of tiles for tiled buffers.
4501          */
4502         if (is_surface_linear(fb, color_plane))
4503                 return 64;
4504         else if (drm_rotation_90_or_270(rotation))
4505                 return intel_tile_height(fb, color_plane);
4506         else
4507                 return intel_tile_width_bytes(fb, color_plane);
4508 }
4509
4510 u32 skl_plane_stride(const struct intel_plane_state *plane_state,
4511                      int color_plane)
4512 {
4513         const struct drm_framebuffer *fb = plane_state->hw.fb;
4514         unsigned int rotation = plane_state->hw.rotation;
4515         u32 stride = plane_state->color_plane[color_plane].stride;
4516
4517         if (color_plane >= fb->format->num_planes)
4518                 return 0;
4519
4520         return stride / skl_plane_stride_mult(fb, color_plane, rotation);
4521 }
4522
4523 static u32 skl_plane_ctl_format(u32 pixel_format)
4524 {
4525         switch (pixel_format) {
4526         case DRM_FORMAT_C8:
4527                 return PLANE_CTL_FORMAT_INDEXED;
4528         case DRM_FORMAT_RGB565:
4529                 return PLANE_CTL_FORMAT_RGB_565;
4530         case DRM_FORMAT_XBGR8888:
4531         case DRM_FORMAT_ABGR8888:
4532                 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
4533         case DRM_FORMAT_XRGB8888:
4534         case DRM_FORMAT_ARGB8888:
4535                 return PLANE_CTL_FORMAT_XRGB_8888;
4536         case DRM_FORMAT_XBGR2101010:
4537         case DRM_FORMAT_ABGR2101010:
4538                 return PLANE_CTL_FORMAT_XRGB_2101010 | PLANE_CTL_ORDER_RGBX;
4539         case DRM_FORMAT_XRGB2101010:
4540         case DRM_FORMAT_ARGB2101010:
4541                 return PLANE_CTL_FORMAT_XRGB_2101010;
4542         case DRM_FORMAT_XBGR16161616F:
4543         case DRM_FORMAT_ABGR16161616F:
4544                 return PLANE_CTL_FORMAT_XRGB_16161616F | PLANE_CTL_ORDER_RGBX;
4545         case DRM_FORMAT_XRGB16161616F:
4546         case DRM_FORMAT_ARGB16161616F:
4547                 return PLANE_CTL_FORMAT_XRGB_16161616F;
4548         case DRM_FORMAT_YUYV:
4549                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
4550         case DRM_FORMAT_YVYU:
4551                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
4552         case DRM_FORMAT_UYVY:
4553                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
4554         case DRM_FORMAT_VYUY:
4555                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
4556         case DRM_FORMAT_NV12:
4557                 return PLANE_CTL_FORMAT_NV12;
4558         case DRM_FORMAT_P010:
4559                 return PLANE_CTL_FORMAT_P010;
4560         case DRM_FORMAT_P012:
4561                 return PLANE_CTL_FORMAT_P012;
4562         case DRM_FORMAT_P016:
4563                 return PLANE_CTL_FORMAT_P016;
4564         case DRM_FORMAT_Y210:
4565                 return PLANE_CTL_FORMAT_Y210;
4566         case DRM_FORMAT_Y212:
4567                 return PLANE_CTL_FORMAT_Y212;
4568         case DRM_FORMAT_Y216:
4569                 return PLANE_CTL_FORMAT_Y216;
4570         case DRM_FORMAT_XVYU2101010:
4571                 return PLANE_CTL_FORMAT_Y410;
4572         case DRM_FORMAT_XVYU12_16161616:
4573                 return PLANE_CTL_FORMAT_Y412;
4574         case DRM_FORMAT_XVYU16161616:
4575                 return PLANE_CTL_FORMAT_Y416;
4576         default:
4577                 MISSING_CASE(pixel_format);
4578         }
4579
4580         return 0;
4581 }
4582
4583 static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state)
4584 {
4585         if (!plane_state->hw.fb->format->has_alpha)
4586                 return PLANE_CTL_ALPHA_DISABLE;
4587
4588         switch (plane_state->hw.pixel_blend_mode) {
4589         case DRM_MODE_BLEND_PIXEL_NONE:
4590                 return PLANE_CTL_ALPHA_DISABLE;
4591         case DRM_MODE_BLEND_PREMULTI:
4592                 return PLANE_CTL_ALPHA_SW_PREMULTIPLY;
4593         case DRM_MODE_BLEND_COVERAGE:
4594                 return PLANE_CTL_ALPHA_HW_PREMULTIPLY;
4595         default:
4596                 MISSING_CASE(plane_state->hw.pixel_blend_mode);
4597                 return PLANE_CTL_ALPHA_DISABLE;
4598         }
4599 }
4600
4601 static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state)
4602 {
4603         if (!plane_state->hw.fb->format->has_alpha)
4604                 return PLANE_COLOR_ALPHA_DISABLE;
4605
4606         switch (plane_state->hw.pixel_blend_mode) {
4607         case DRM_MODE_BLEND_PIXEL_NONE:
4608                 return PLANE_COLOR_ALPHA_DISABLE;
4609         case DRM_MODE_BLEND_PREMULTI:
4610                 return PLANE_COLOR_ALPHA_SW_PREMULTIPLY;
4611         case DRM_MODE_BLEND_COVERAGE:
4612                 return PLANE_COLOR_ALPHA_HW_PREMULTIPLY;
4613         default:
4614                 MISSING_CASE(plane_state->hw.pixel_blend_mode);
4615                 return PLANE_COLOR_ALPHA_DISABLE;
4616         }
4617 }
4618
4619 static u32 skl_plane_ctl_tiling(u64 fb_modifier)
4620 {
4621         switch (fb_modifier) {
4622         case DRM_FORMAT_MOD_LINEAR:
4623                 break;
4624         case I915_FORMAT_MOD_X_TILED:
4625                 return PLANE_CTL_TILED_X;
4626         case I915_FORMAT_MOD_Y_TILED:
4627                 return PLANE_CTL_TILED_Y;
4628         case I915_FORMAT_MOD_Y_TILED_CCS:
4629                 return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
4630         case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
4631                 return PLANE_CTL_TILED_Y |
4632                        PLANE_CTL_RENDER_DECOMPRESSION_ENABLE |
4633                        PLANE_CTL_CLEAR_COLOR_DISABLE;
4634         case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
4635                 return PLANE_CTL_TILED_Y | PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE;
4636         case I915_FORMAT_MOD_Yf_TILED:
4637                 return PLANE_CTL_TILED_YF;
4638         case I915_FORMAT_MOD_Yf_TILED_CCS:
4639                 return PLANE_CTL_TILED_YF | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
4640         default:
4641                 MISSING_CASE(fb_modifier);
4642         }
4643
4644         return 0;
4645 }
4646
4647 static u32 skl_plane_ctl_rotate(unsigned int rotate)
4648 {
4649         switch (rotate) {
4650         case DRM_MODE_ROTATE_0:
4651                 break;
4652         /*
4653          * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
4654          * while i915 HW rotation is clockwise, thats why this swapping.
4655          */
4656         case DRM_MODE_ROTATE_90:
4657                 return PLANE_CTL_ROTATE_270;
4658         case DRM_MODE_ROTATE_180:
4659                 return PLANE_CTL_ROTATE_180;
4660         case DRM_MODE_ROTATE_270:
4661                 return PLANE_CTL_ROTATE_90;
4662         default:
4663                 MISSING_CASE(rotate);
4664         }
4665
4666         return 0;
4667 }
4668
4669 static u32 cnl_plane_ctl_flip(unsigned int reflect)
4670 {
4671         switch (reflect) {
4672         case 0:
4673                 break;
4674         case DRM_MODE_REFLECT_X:
4675                 return PLANE_CTL_FLIP_HORIZONTAL;
4676         case DRM_MODE_REFLECT_Y:
4677         default:
4678                 MISSING_CASE(reflect);
4679         }
4680
4681         return 0;
4682 }
4683
4684 u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
4685 {
4686         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
4687         u32 plane_ctl = 0;
4688
4689         if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
4690                 return plane_ctl;
4691
4692         if (crtc_state->gamma_enable)
4693                 plane_ctl |= PLANE_CTL_PIPE_GAMMA_ENABLE;
4694
4695         if (crtc_state->csc_enable)
4696                 plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE;
4697
4698         return plane_ctl;
4699 }
4700
4701 u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
4702                   const struct intel_plane_state *plane_state)
4703 {
4704         struct drm_i915_private *dev_priv =
4705                 to_i915(plane_state->uapi.plane->dev);
4706         const struct drm_framebuffer *fb = plane_state->hw.fb;
4707         unsigned int rotation = plane_state->hw.rotation;
4708         const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
4709         u32 plane_ctl;
4710
4711         plane_ctl = PLANE_CTL_ENABLE;
4712
4713         if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) {
4714                 plane_ctl |= skl_plane_ctl_alpha(plane_state);
4715                 plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
4716
4717                 if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709)
4718                         plane_ctl |= PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709;
4719
4720                 if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
4721                         plane_ctl |= PLANE_CTL_YUV_RANGE_CORRECTION_DISABLE;
4722         }
4723
4724         plane_ctl |= skl_plane_ctl_format(fb->format->format);
4725         plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
4726         plane_ctl |= skl_plane_ctl_rotate(rotation & DRM_MODE_ROTATE_MASK);
4727
4728         if (INTEL_GEN(dev_priv) >= 10)
4729                 plane_ctl |= cnl_plane_ctl_flip(rotation &
4730                                                 DRM_MODE_REFLECT_MASK);
4731
4732         if (key->flags & I915_SET_COLORKEY_DESTINATION)
4733                 plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
4734         else if (key->flags & I915_SET_COLORKEY_SOURCE)
4735                 plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
4736
4737         return plane_ctl;
4738 }
4739
4740 u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state)
4741 {
4742         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
4743         u32 plane_color_ctl = 0;
4744
4745         if (INTEL_GEN(dev_priv) >= 11)
4746                 return plane_color_ctl;
4747
4748         if (crtc_state->gamma_enable)
4749                 plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE;
4750
4751         if (crtc_state->csc_enable)
4752                 plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
4753
4754         return plane_color_ctl;
4755 }
4756
4757 u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
4758                         const struct intel_plane_state *plane_state)
4759 {
4760         struct drm_i915_private *dev_priv =
4761                 to_i915(plane_state->uapi.plane->dev);
4762         const struct drm_framebuffer *fb = plane_state->hw.fb;
4763         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
4764         u32 plane_color_ctl = 0;
4765
4766         plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
4767         plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state);
4768
4769         if (fb->format->is_yuv && !icl_is_hdr_plane(dev_priv, plane->id)) {
4770                 if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709)
4771                         plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
4772                 else
4773                         plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV601_TO_RGB709;
4774
4775                 if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
4776                         plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
4777         } else if (fb->format->is_yuv) {
4778                 plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE;
4779         }
4780
4781         return plane_color_ctl;
4782 }
4783
4784 static int
4785 __intel_display_resume(struct drm_device *dev,
4786                        struct drm_atomic_state *state,
4787                        struct drm_modeset_acquire_ctx *ctx)
4788 {
4789         struct drm_crtc_state *crtc_state;
4790         struct drm_crtc *crtc;
4791         int i, ret;
4792
4793         intel_modeset_setup_hw_state(dev, ctx);
4794         intel_vga_redisable(to_i915(dev));
4795
4796         if (!state)
4797                 return 0;
4798
4799         /*
4800          * We've duplicated the state, pointers to the old state are invalid.
4801          *
4802          * Don't attempt to use the old state until we commit the duplicated state.
4803          */
4804         for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
4805                 /*
4806                  * Force recalculation even if we restore
4807                  * current state. With fast modeset this may not result
4808                  * in a modeset when the state is compatible.
4809                  */
4810                 crtc_state->mode_changed = true;
4811         }
4812
4813         /* ignore any reset values/BIOS leftovers in the WM registers */
4814         if (!HAS_GMCH(to_i915(dev)))
4815                 to_intel_atomic_state(state)->skip_intermediate_wm = true;
4816
4817         ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
4818
4819         WARN_ON(ret == -EDEADLK);
4820         return ret;
4821 }
4822
4823 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
4824 {
4825         return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
4826                 intel_has_gpu_reset(&dev_priv->gt));
4827 }
4828
4829 void intel_prepare_reset(struct drm_i915_private *dev_priv)
4830 {
4831         struct drm_device *dev = &dev_priv->drm;
4832         struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
4833         struct drm_atomic_state *state;
4834         int ret;
4835
4836         /* reset doesn't touch the display */
4837         if (!i915_modparams.force_reset_modeset_test &&
4838             !gpu_reset_clobbers_display(dev_priv))
4839                 return;
4840
4841         /* We have a modeset vs reset deadlock, defensively unbreak it. */
4842         set_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
4843         smp_mb__after_atomic();
4844         wake_up_bit(&dev_priv->gt.reset.flags, I915_RESET_MODESET);
4845
4846         if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
4847                 drm_dbg_kms(&dev_priv->drm,
4848                             "Modeset potentially stuck, unbreaking through wedging\n");
4849                 intel_gt_set_wedged(&dev_priv->gt);
4850         }
4851
4852         /*
4853          * Need mode_config.mutex so that we don't
4854          * trample ongoing ->detect() and whatnot.
4855          */
4856         mutex_lock(&dev->mode_config.mutex);
4857         drm_modeset_acquire_init(ctx, 0);
4858         while (1) {
4859                 ret = drm_modeset_lock_all_ctx(dev, ctx);
4860                 if (ret != -EDEADLK)
4861                         break;
4862
4863                 drm_modeset_backoff(ctx);
4864         }
4865         /*
4866          * Disabling the crtcs gracefully seems nicer. Also the
4867          * g33 docs say we should at least disable all the planes.
4868          */
4869         state = drm_atomic_helper_duplicate_state(dev, ctx);
4870         if (IS_ERR(state)) {
4871                 ret = PTR_ERR(state);
4872                 drm_err(&dev_priv->drm, "Duplicating state failed with %i\n",
4873                         ret);
4874                 return;
4875         }
4876
4877         ret = drm_atomic_helper_disable_all(dev, ctx);
4878         if (ret) {
4879                 drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
4880                         ret);
4881                 drm_atomic_state_put(state);
4882                 return;
4883         }
4884
4885         dev_priv->modeset_restore_state = state;
4886         state->acquire_ctx = ctx;
4887 }
4888
4889 void intel_finish_reset(struct drm_i915_private *dev_priv)
4890 {
4891         struct drm_device *dev = &dev_priv->drm;
4892         struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
4893         struct drm_atomic_state *state;
4894         int ret;
4895
4896         /* reset doesn't touch the display */
4897         if (!test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
4898                 return;
4899
4900         state = fetch_and_zero(&dev_priv->modeset_restore_state);
4901         if (!state)
4902                 goto unlock;
4903
4904         /* reset doesn't touch the display */
4905         if (!gpu_reset_clobbers_display(dev_priv)) {
4906                 /* for testing only restore the display */
4907                 ret = __intel_display_resume(dev, state, ctx);
4908                 if (ret)
4909                         drm_err(&dev_priv->drm,
4910                                 "Restoring old state failed with %i\n", ret);
4911         } else {
4912                 /*
4913                  * The display has been reset as well,
4914                  * so need a full re-initialization.
4915                  */
4916                 intel_pps_unlock_regs_wa(dev_priv);
4917                 intel_modeset_init_hw(dev_priv);
4918                 intel_init_clock_gating(dev_priv);
4919
4920                 spin_lock_irq(&dev_priv->irq_lock);
4921                 if (dev_priv->display.hpd_irq_setup)
4922                         dev_priv->display.hpd_irq_setup(dev_priv);
4923                 spin_unlock_irq(&dev_priv->irq_lock);
4924
4925                 ret = __intel_display_resume(dev, state, ctx);
4926                 if (ret)
4927                         drm_err(&dev_priv->drm,
4928                                 "Restoring old state failed with %i\n", ret);
4929
4930                 intel_hpd_init(dev_priv);
4931         }
4932
4933         drm_atomic_state_put(state);
4934 unlock:
4935         drm_modeset_drop_locks(ctx);
4936         drm_modeset_acquire_fini(ctx);
4937         mutex_unlock(&dev->mode_config.mutex);
4938
4939         clear_bit_unlock(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
4940 }
4941
4942 static void icl_set_pipe_chicken(struct intel_crtc *crtc)
4943 {
4944         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4945         enum pipe pipe = crtc->pipe;
4946         u32 tmp;
4947
4948         tmp = intel_de_read(dev_priv, PIPE_CHICKEN(pipe));
4949
4950         /*
4951          * Display WA #1153: icl
4952          * enable hardware to bypass the alpha math
4953          * and rounding for per-pixel values 00 and 0xff
4954          */
4955         tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
4956         /*
4957          * Display WA # 1605353570: icl
4958          * Set the pixel rounding bit to 1 for allowing
4959          * passthrough of Frame buffer pixels unmodified
4960          * across pipe
4961          */
4962         tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
4963         intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp);
4964 }
4965
4966 static void icl_enable_trans_port_sync(const struct intel_crtc_state *crtc_state)
4967 {
4968         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4969         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4970         u32 trans_ddi_func_ctl2_val;
4971         u8 master_select;
4972
4973         /*
4974          * Configure the master select and enable Transcoder Port Sync for
4975          * Slave CRTCs transcoder.
4976          */
4977         if (crtc_state->master_transcoder == INVALID_TRANSCODER)
4978                 return;
4979
4980         if (crtc_state->master_transcoder == TRANSCODER_EDP)
4981                 master_select = 0;
4982         else
4983                 master_select = crtc_state->master_transcoder + 1;
4984
4985         /* Set the master select bits for Tranascoder Port Sync */
4986         trans_ddi_func_ctl2_val = (PORT_SYNC_MODE_MASTER_SELECT(master_select) &
4987                                    PORT_SYNC_MODE_MASTER_SELECT_MASK) <<
4988                 PORT_SYNC_MODE_MASTER_SELECT_SHIFT;
4989         /* Enable Transcoder Port Sync */
4990         trans_ddi_func_ctl2_val |= PORT_SYNC_MODE_ENABLE;
4991
4992         intel_de_write(dev_priv,
4993                        TRANS_DDI_FUNC_CTL2(crtc_state->cpu_transcoder),
4994                        trans_ddi_func_ctl2_val);
4995 }
4996
4997 static void intel_fdi_normal_train(struct intel_crtc *crtc)
4998 {
4999         struct drm_device *dev = crtc->base.dev;
5000         struct drm_i915_private *dev_priv = to_i915(dev);
5001         enum pipe pipe = crtc->pipe;
5002         i915_reg_t reg;
5003         u32 temp;
5004
5005         /* enable normal train */
5006         reg = FDI_TX_CTL(pipe);
5007         temp = intel_de_read(dev_priv, reg);
5008         if (IS_IVYBRIDGE(dev_priv)) {
5009                 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
5010                 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
5011         } else {
5012                 temp &= ~FDI_LINK_TRAIN_NONE;
5013                 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
5014         }
5015         intel_de_write(dev_priv, reg, temp);
5016
5017         reg = FDI_RX_CTL(pipe);
5018         temp = intel_de_read(dev_priv, reg);
5019         if (HAS_PCH_CPT(dev_priv)) {
5020                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
5021                 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
5022         } else {
5023                 temp &= ~FDI_LINK_TRAIN_NONE;
5024                 temp |= FDI_LINK_TRAIN_NONE;
5025         }
5026         intel_de_write(dev_priv, reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
5027
5028         /* wait one idle pattern time */
5029         intel_de_posting_read(dev_priv, reg);
5030         udelay(1000);
5031
5032         /* IVB wants error correction enabled */
5033         if (IS_IVYBRIDGE(dev_priv))
5034                 intel_de_write(dev_priv, reg,
5035                                intel_de_read(dev_priv, reg) | FDI_FS_ERRC_ENABLE | FDI_FE_ERRC_ENABLE);
5036 }
5037
5038 /* The FDI link training functions for ILK/Ibexpeak. */
5039 static void ilk_fdi_link_train(struct intel_crtc *crtc,
5040                                const struct intel_crtc_state *crtc_state)
5041 {
5042         struct drm_device *dev = crtc->base.dev;
5043         struct drm_i915_private *dev_priv = to_i915(dev);
5044         enum pipe pipe = crtc->pipe;
5045         i915_reg_t reg;
5046         u32 temp, tries;
5047
5048         /* FDI needs bits from pipe first */
5049         assert_pipe_enabled(dev_priv, crtc_state->cpu_transcoder);
5050
5051         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
5052            for train result */
5053         reg = FDI_RX_IMR(pipe);
5054         temp = intel_de_read(dev_priv, reg);
5055         temp &= ~FDI_RX_SYMBOL_LOCK;
5056         temp &= ~FDI_RX_BIT_LOCK;
5057         intel_de_write(dev_priv, reg, temp);
5058         intel_de_read(dev_priv, reg);
5059         udelay(150);
5060
5061         /* enable CPU FDI TX and PCH FDI RX */
5062         reg = FDI_TX_CTL(pipe);
5063         temp = intel_de_read(dev_priv, reg);
5064         temp &= ~FDI_DP_PORT_WIDTH_MASK;
5065         temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
5066         temp &= ~FDI_LINK_TRAIN_NONE;
5067         temp |= FDI_LINK_TRAIN_PATTERN_1;
5068         intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
5069
5070         reg = FDI_RX_CTL(pipe);
5071         temp = intel_de_read(dev_priv, reg);
5072         temp &= ~FDI_LINK_TRAIN_NONE;
5073         temp |= FDI_LINK_TRAIN_PATTERN_1;
5074         intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
5075
5076         intel_de_posting_read(dev_priv, reg);
5077         udelay(150);
5078
5079         /* Ironlake workaround, enable clock pointer after FDI enable*/
5080         intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
5081                        FDI_RX_PHASE_SYNC_POINTER_OVR);
5082         intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
5083                        FDI_RX_PHASE_SYNC_POINTER_OVR | FDI_RX_PHASE_SYNC_POINTER_EN);
5084
5085         reg = FDI_RX_IIR(pipe);
5086         for (tries = 0; tries < 5; tries++) {
5087                 temp = intel_de_read(dev_priv, reg);
5088                 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
5089
5090                 if ((temp & FDI_RX_BIT_LOCK)) {
5091                         drm_dbg_kms(&dev_priv->drm, "FDI train 1 done.\n");
5092                         intel_de_write(dev_priv, reg, temp | FDI_RX_BIT_LOCK);
5093                         break;
5094                 }
5095         }
5096         if (tries == 5)
5097                 drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
5098
5099         /* Train 2 */
5100         reg = FDI_TX_CTL(pipe);
5101         temp = intel_de_read(dev_priv, reg);
5102         temp &= ~FDI_LINK_TRAIN_NONE;
5103         temp |= FDI_LINK_TRAIN_PATTERN_2;
5104         intel_de_write(dev_priv, reg, temp);
5105
5106         reg = FDI_RX_CTL(pipe);
5107         temp = intel_de_read(dev_priv, reg);
5108         temp &= ~FDI_LINK_TRAIN_NONE;
5109         temp |= FDI_LINK_TRAIN_PATTERN_2;
5110         intel_de_write(dev_priv, reg, temp);
5111
5112         intel_de_posting_read(dev_priv, reg);
5113         udelay(150);
5114
5115         reg = FDI_RX_IIR(pipe);
5116         for (tries = 0; tries < 5; tries++) {
5117                 temp = intel_de_read(dev_priv, reg);
5118                 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
5119
5120                 if (temp & FDI_RX_SYMBOL_LOCK) {
5121                         intel_de_write(dev_priv, reg,
5122                                        temp | FDI_RX_SYMBOL_LOCK);
5123                         drm_dbg_kms(&dev_priv->drm, "FDI train 2 done.\n");
5124                         break;
5125                 }
5126         }
5127         if (tries == 5)
5128                 drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
5129
5130         drm_dbg_kms(&dev_priv->drm, "FDI train done\n");
5131
5132 }
5133
5134 static const int snb_b_fdi_train_param[] = {
5135         FDI_LINK_TRAIN_400MV_0DB_SNB_B,
5136         FDI_LINK_TRAIN_400MV_6DB_SNB_B,
5137         FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
5138         FDI_LINK_TRAIN_800MV_0DB_SNB_B,
5139 };
5140
5141 /* The FDI link training functions for SNB/Cougarpoint. */
5142 static void gen6_fdi_link_train(struct intel_crtc *crtc,
5143                                 const struct intel_crtc_state *crtc_state)
5144 {
5145         struct drm_device *dev = crtc->base.dev;
5146         struct drm_i915_private *dev_priv = to_i915(dev);
5147         enum pipe pipe = crtc->pipe;
5148         i915_reg_t reg;
5149         u32 temp, i, retry;
5150
5151         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
5152            for train result */
5153         reg = FDI_RX_IMR(pipe);
5154         temp = intel_de_read(dev_priv, reg);
5155         temp &= ~FDI_RX_SYMBOL_LOCK;
5156         temp &= ~FDI_RX_BIT_LOCK;
5157         intel_de_write(dev_priv, reg, temp);
5158
5159         intel_de_posting_read(dev_priv, reg);
5160         udelay(150);
5161
5162         /* enable CPU FDI TX and PCH FDI RX */
5163         reg = FDI_TX_CTL(pipe);
5164         temp = intel_de_read(dev_priv, reg);
5165         temp &= ~FDI_DP_PORT_WIDTH_MASK;
5166         temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
5167         temp &= ~FDI_LINK_TRAIN_NONE;
5168         temp |= FDI_LINK_TRAIN_PATTERN_1;
5169         temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
5170         /* SNB-B */
5171         temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
5172         intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
5173
5174         intel_de_write(dev_priv, FDI_RX_MISC(pipe),
5175                        FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
5176
5177         reg = FDI_RX_CTL(pipe);
5178         temp = intel_de_read(dev_priv, reg);
5179         if (HAS_PCH_CPT(dev_priv)) {
5180                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
5181                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
5182         } else {
5183                 temp &= ~FDI_LINK_TRAIN_NONE;
5184                 temp |= FDI_LINK_TRAIN_PATTERN_1;
5185         }
5186         intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
5187
5188         intel_de_posting_read(dev_priv, reg);
5189         udelay(150);
5190
5191         for (i = 0; i < 4; i++) {
5192                 reg = FDI_TX_CTL(pipe);
5193                 temp = intel_de_read(dev_priv, reg);
5194                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
5195                 temp |= snb_b_fdi_train_param[i];
5196                 intel_de_write(dev_priv, reg, temp);
5197
5198                 intel_de_posting_read(dev_priv, reg);
5199                 udelay(500);
5200
5201                 for (retry = 0; retry < 5; retry++) {
5202                         reg = FDI_RX_IIR(pipe);
5203                         temp = intel_de_read(dev_priv, reg);
5204                         drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
5205                         if (temp & FDI_RX_BIT_LOCK) {
5206                                 intel_de_write(dev_priv, reg,
5207                                                temp | FDI_RX_BIT_LOCK);
5208                                 drm_dbg_kms(&dev_priv->drm,
5209                                             "FDI train 1 done.\n");
5210                                 break;
5211                         }
5212                         udelay(50);
5213                 }
5214                 if (retry < 5)
5215                         break;
5216         }
5217         if (i == 4)
5218                 drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
5219
5220         /* Train 2 */
5221         reg = FDI_TX_CTL(pipe);
5222         temp = intel_de_read(dev_priv, reg);
5223         temp &= ~FDI_LINK_TRAIN_NONE;
5224         temp |= FDI_LINK_TRAIN_PATTERN_2;
5225         if (IS_GEN(dev_priv, 6)) {
5226                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
5227                 /* SNB-B */
5228                 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
5229         }
5230         intel_de_write(dev_priv, reg, temp);
5231
5232         reg = FDI_RX_CTL(pipe);
5233         temp = intel_de_read(dev_priv, reg);
5234         if (HAS_PCH_CPT(dev_priv)) {
5235                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
5236                 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
5237         } else {
5238                 temp &= ~FDI_LINK_TRAIN_NONE;
5239                 temp |= FDI_LINK_TRAIN_PATTERN_2;
5240         }
5241         intel_de_write(dev_priv, reg, temp);
5242
5243         intel_de_posting_read(dev_priv, reg);
5244         udelay(150);
5245
5246         for (i = 0; i < 4; i++) {
5247                 reg = FDI_TX_CTL(pipe);
5248                 temp = intel_de_read(dev_priv, reg);
5249                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
5250                 temp |= snb_b_fdi_train_param[i];
5251                 intel_de_write(dev_priv, reg, temp);
5252
5253                 intel_de_posting_read(dev_priv, reg);
5254                 udelay(500);
5255
5256                 for (retry = 0; retry < 5; retry++) {
5257                         reg = FDI_RX_IIR(pipe);
5258                         temp = intel_de_read(dev_priv, reg);
5259                         drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
5260                         if (temp & FDI_RX_SYMBOL_LOCK) {
5261                                 intel_de_write(dev_priv, reg,
5262                                                temp | FDI_RX_SYMBOL_LOCK);
5263                                 drm_dbg_kms(&dev_priv->drm,
5264                                             "FDI train 2 done.\n");
5265                                 break;
5266                         }
5267                         udelay(50);
5268                 }
5269                 if (retry < 5)
5270                         break;
5271         }
5272         if (i == 4)
5273                 drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
5274
5275         drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
5276 }
5277
5278 /* Manual link training for Ivy Bridge A0 parts */
5279 static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
5280                                       const struct intel_crtc_state *crtc_state)
5281 {
5282         struct drm_device *dev = crtc->base.dev;
5283         struct drm_i915_private *dev_priv = to_i915(dev);
5284         enum pipe pipe = crtc->pipe;
5285         i915_reg_t reg;
5286         u32 temp, i, j;
5287
5288         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
5289            for train result */
5290         reg = FDI_RX_IMR(pipe);
5291         temp = intel_de_read(dev_priv, reg);
5292         temp &= ~FDI_RX_SYMBOL_LOCK;
5293         temp &= ~FDI_RX_BIT_LOCK;
5294         intel_de_write(dev_priv, reg, temp);
5295
5296         intel_de_posting_read(dev_priv, reg);
5297         udelay(150);
5298
5299         drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR before link train 0x%x\n",
5300                     intel_de_read(dev_priv, FDI_RX_IIR(pipe)));
5301
5302         /* Try each vswing and preemphasis setting twice before moving on */
5303         for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
5304                 /* disable first in case we need to retry */
5305                 reg = FDI_TX_CTL(pipe);
5306                 temp = intel_de_read(dev_priv, reg);
5307                 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
5308                 temp &= ~FDI_TX_ENABLE;
5309                 intel_de_write(dev_priv, reg, temp);
5310
5311                 reg = FDI_RX_CTL(pipe);
5312                 temp = intel_de_read(dev_priv, reg);
5313                 temp &= ~FDI_LINK_TRAIN_AUTO;
5314                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
5315                 temp &= ~FDI_RX_ENABLE;
5316                 intel_de_write(dev_priv, reg, temp);
5317
5318                 /* enable CPU FDI TX and PCH FDI RX */
5319                 reg = FDI_TX_CTL(pipe);
5320                 temp = intel_de_read(dev_priv, reg);
5321                 temp &= ~FDI_DP_PORT_WIDTH_MASK;
5322                 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
5323                 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
5324                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
5325                 temp |= snb_b_fdi_train_param[j/2];
5326                 temp |= FDI_COMPOSITE_SYNC;
5327                 intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
5328
5329                 intel_de_write(dev_priv, FDI_RX_MISC(pipe),
5330                                FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
5331
5332                 reg = FDI_RX_CTL(pipe);
5333                 temp = intel_de_read(dev_priv, reg);
5334                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
5335                 temp |= FDI_COMPOSITE_SYNC;
5336                 intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
5337
5338                 intel_de_posting_read(dev_priv, reg);
5339                 udelay(1); /* should be 0.5us */
5340
5341                 for (i = 0; i < 4; i++) {
5342                         reg = FDI_RX_IIR(pipe);
5343                         temp = intel_de_read(dev_priv, reg);
5344                         drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
5345
5346                         if (temp & FDI_RX_BIT_LOCK ||
5347                             (intel_de_read(dev_priv, reg) & FDI_RX_BIT_LOCK)) {
5348                                 intel_de_write(dev_priv, reg,
5349                                                temp | FDI_RX_BIT_LOCK);
5350                                 drm_dbg_kms(&dev_priv->drm,
5351                                             "FDI train 1 done, level %i.\n",
5352                                             i);
5353                                 break;
5354                         }
5355                         udelay(1); /* should be 0.5us */
5356                 }
5357                 if (i == 4) {
5358                         drm_dbg_kms(&dev_priv->drm,
5359                                     "FDI train 1 fail on vswing %d\n", j / 2);
5360                         continue;
5361                 }
5362
5363                 /* Train 2 */
5364                 reg = FDI_TX_CTL(pipe);
5365                 temp = intel_de_read(dev_priv, reg);
5366                 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
5367                 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
5368                 intel_de_write(dev_priv, reg, temp);
5369
5370                 reg = FDI_RX_CTL(pipe);
5371                 temp = intel_de_read(dev_priv, reg);
5372                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
5373                 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
5374                 intel_de_write(dev_priv, reg, temp);
5375
5376                 intel_de_posting_read(dev_priv, reg);
5377                 udelay(2); /* should be 1.5us */
5378
5379                 for (i = 0; i < 4; i++) {
5380                         reg = FDI_RX_IIR(pipe);
5381                         temp = intel_de_read(dev_priv, reg);
5382                         drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
5383
5384                         if (temp & FDI_RX_SYMBOL_LOCK ||
5385                             (intel_de_read(dev_priv, reg) & FDI_RX_SYMBOL_LOCK)) {
5386                                 intel_de_write(dev_priv, reg,
5387                                                temp | FDI_RX_SYMBOL_LOCK);
5388                                 drm_dbg_kms(&dev_priv->drm,
5389                                             "FDI train 2 done, level %i.\n",
5390                                             i);
5391                                 goto train_done;
5392                         }
5393                         udelay(2); /* should be 1.5us */
5394                 }
5395                 if (i == 4)
5396                         drm_dbg_kms(&dev_priv->drm,
5397                                     "FDI train 2 fail on vswing %d\n", j / 2);
5398         }
5399
5400 train_done:
5401         drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
5402 }
5403
5404 static void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
5405 {
5406         struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
5407         struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
5408         enum pipe pipe = intel_crtc->pipe;
5409         i915_reg_t reg;
5410         u32 temp;
5411
5412         /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
5413         reg = FDI_RX_CTL(pipe);
5414         temp = intel_de_read(dev_priv, reg);
5415         temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
5416         temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
5417         temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
5418         intel_de_write(dev_priv, reg, temp | FDI_RX_PLL_ENABLE);
5419
5420         intel_de_posting_read(dev_priv, reg);
5421         udelay(200);
5422
5423         /* Switch from Rawclk to PCDclk */
5424         temp = intel_de_read(dev_priv, reg);
5425         intel_de_write(dev_priv, reg, temp | FDI_PCDCLK);
5426
5427         intel_de_posting_read(dev_priv, reg);
5428         udelay(200);
5429
5430         /* Enable CPU FDI TX PLL, always on for Ironlake */
5431         reg = FDI_TX_CTL(pipe);
5432         temp = intel_de_read(dev_priv, reg);
5433         if ((temp & FDI_TX_PLL_ENABLE) == 0) {
5434                 intel_de_write(dev_priv, reg, temp | FDI_TX_PLL_ENABLE);
5435
5436                 intel_de_posting_read(dev_priv, reg);
5437                 udelay(100);
5438         }
5439 }
5440
5441 static void ilk_fdi_pll_disable(struct intel_crtc *intel_crtc)
5442 {
5443         struct drm_device *dev = intel_crtc->base.dev;
5444         struct drm_i915_private *dev_priv = to_i915(dev);
5445         enum pipe pipe = intel_crtc->pipe;
5446         i915_reg_t reg;
5447         u32 temp;
5448
5449         /* Switch from PCDclk to Rawclk */
5450         reg = FDI_RX_CTL(pipe);
5451         temp = intel_de_read(dev_priv, reg);
5452         intel_de_write(dev_priv, reg, temp & ~FDI_PCDCLK);
5453
5454         /* Disable CPU FDI TX PLL */
5455         reg = FDI_TX_CTL(pipe);
5456         temp = intel_de_read(dev_priv, reg);
5457         intel_de_write(dev_priv, reg, temp & ~FDI_TX_PLL_ENABLE);
5458
5459         intel_de_posting_read(dev_priv, reg);
5460         udelay(100);
5461
5462         reg = FDI_RX_CTL(pipe);
5463         temp = intel_de_read(dev_priv, reg);
5464         intel_de_write(dev_priv, reg, temp & ~FDI_RX_PLL_ENABLE);
5465
5466         /* Wait for the clocks to turn off. */
5467         intel_de_posting_read(dev_priv, reg);
5468         udelay(100);
5469 }
5470
5471 static void ilk_fdi_disable(struct intel_crtc *crtc)
5472 {
5473         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5474         enum pipe pipe = crtc->pipe;
5475         i915_reg_t reg;
5476         u32 temp;
5477
5478         /* disable CPU FDI tx and PCH FDI rx */
5479         reg = FDI_TX_CTL(pipe);
5480         temp = intel_de_read(dev_priv, reg);
5481         intel_de_write(dev_priv, reg, temp & ~FDI_TX_ENABLE);
5482         intel_de_posting_read(dev_priv, reg);
5483
5484         reg = FDI_RX_CTL(pipe);
5485         temp = intel_de_read(dev_priv, reg);
5486         temp &= ~(0x7 << 16);
5487         temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
5488         intel_de_write(dev_priv, reg, temp & ~FDI_RX_ENABLE);
5489
5490         intel_de_posting_read(dev_priv, reg);
5491         udelay(100);
5492
5493         /* Ironlake workaround, disable clock pointer after downing FDI */
5494         if (HAS_PCH_IBX(dev_priv))
5495                 intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
5496                                FDI_RX_PHASE_SYNC_POINTER_OVR);
5497
5498         /* still set train pattern 1 */
5499         reg = FDI_TX_CTL(pipe);
5500         temp = intel_de_read(dev_priv, reg);
5501         temp &= ~FDI_LINK_TRAIN_NONE;
5502         temp |= FDI_LINK_TRAIN_PATTERN_1;
5503         intel_de_write(dev_priv, reg, temp);
5504
5505         reg = FDI_RX_CTL(pipe);
5506         temp = intel_de_read(dev_priv, reg);
5507         if (HAS_PCH_CPT(dev_priv)) {
5508                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
5509                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
5510         } else {
5511                 temp &= ~FDI_LINK_TRAIN_NONE;
5512                 temp |= FDI_LINK_TRAIN_PATTERN_1;
5513         }
5514         /* BPC in FDI rx is consistent with that in PIPECONF */
5515         temp &= ~(0x07 << 16);
5516         temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
5517         intel_de_write(dev_priv, reg, temp);
5518
5519         intel_de_posting_read(dev_priv, reg);
5520         udelay(100);
5521 }
5522
5523 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
5524 {
5525         struct drm_crtc *crtc;
5526         bool cleanup_done;
5527
5528         drm_for_each_crtc(crtc, &dev_priv->drm) {
5529                 struct drm_crtc_commit *commit;
5530                 spin_lock(&crtc->commit_lock);
5531                 commit = list_first_entry_or_null(&crtc->commit_list,
5532                                                   struct drm_crtc_commit, commit_entry);
5533                 cleanup_done = commit ?
5534                         try_wait_for_completion(&commit->cleanup_done) : true;
5535                 spin_unlock(&crtc->commit_lock);
5536
5537                 if (cleanup_done)
5538                         continue;
5539
5540                 drm_crtc_wait_one_vblank(crtc);
5541
5542                 return true;
5543         }
5544
5545         return false;
5546 }
5547
5548 void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
5549 {
5550         u32 temp;
5551
5552         intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_GATE);
5553
5554         mutex_lock(&dev_priv->sb_lock);
5555
5556         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
5557         temp |= SBI_SSCCTL_DISABLE;
5558         intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
5559
5560         mutex_unlock(&dev_priv->sb_lock);
5561 }
5562
5563 /* Program iCLKIP clock to the desired frequency */
5564 static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
5565 {
5566         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5567         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5568         int clock = crtc_state->hw.adjusted_mode.crtc_clock;
5569         u32 divsel, phaseinc, auxdiv, phasedir = 0;
5570         u32 temp;
5571
5572         lpt_disable_iclkip(dev_priv);
5573
5574         /* The iCLK virtual clock root frequency is in MHz,
5575          * but the adjusted_mode->crtc_clock in in KHz. To get the
5576          * divisors, it is necessary to divide one by another, so we
5577          * convert the virtual clock precision to KHz here for higher
5578          * precision.
5579          */
5580         for (auxdiv = 0; auxdiv < 2; auxdiv++) {
5581                 u32 iclk_virtual_root_freq = 172800 * 1000;
5582                 u32 iclk_pi_range = 64;
5583                 u32 desired_divisor;
5584
5585                 desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
5586                                                     clock << auxdiv);
5587                 divsel = (desired_divisor / iclk_pi_range) - 2;
5588                 phaseinc = desired_divisor % iclk_pi_range;
5589
5590                 /*
5591                  * Near 20MHz is a corner case which is
5592                  * out of range for the 7-bit divisor
5593                  */
5594                 if (divsel <= 0x7f)
5595                         break;
5596         }
5597
5598         /* This should not happen with any sane values */
5599         WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
5600                 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
5601         WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
5602                 ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
5603
5604         drm_dbg_kms(&dev_priv->drm,
5605                     "iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
5606                     clock, auxdiv, divsel, phasedir, phaseinc);
5607
5608         mutex_lock(&dev_priv->sb_lock);
5609
5610         /* Program SSCDIVINTPHASE6 */
5611         temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
5612         temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
5613         temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
5614         temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
5615         temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
5616         temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
5617         temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
5618         intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
5619
5620         /* Program SSCAUXDIV */
5621         temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
5622         temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
5623         temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
5624         intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
5625
5626         /* Enable modulator and associated divider */
5627         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
5628         temp &= ~SBI_SSCCTL_DISABLE;
5629         intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
5630
5631         mutex_unlock(&dev_priv->sb_lock);
5632
5633         /* Wait for initialization time */
5634         udelay(24);
5635
5636         intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_UNGATE);
5637 }
5638
5639 int lpt_get_iclkip(struct drm_i915_private *dev_priv)
5640 {
5641         u32 divsel, phaseinc, auxdiv;
5642         u32 iclk_virtual_root_freq = 172800 * 1000;
5643         u32 iclk_pi_range = 64;
5644         u32 desired_divisor;
5645         u32 temp;
5646
5647         if ((intel_de_read(dev_priv, PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
5648                 return 0;
5649
5650         mutex_lock(&dev_priv->sb_lock);
5651
5652         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
5653         if (temp & SBI_SSCCTL_DISABLE) {
5654                 mutex_unlock(&dev_priv->sb_lock);
5655                 return 0;
5656         }
5657
5658         temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
5659         divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
5660                 SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
5661         phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
5662                 SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
5663
5664         temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
5665         auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
5666                 SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
5667
5668         mutex_unlock(&dev_priv->sb_lock);
5669
5670         desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
5671
5672         return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
5673                                  desired_divisor << auxdiv);
5674 }
5675
5676 static void ilk_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
5677                                            enum pipe pch_transcoder)
5678 {
5679         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5680         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5681         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
5682
5683         intel_de_write(dev_priv, PCH_TRANS_HTOTAL(pch_transcoder),
5684                        intel_de_read(dev_priv, HTOTAL(cpu_transcoder)));
5685         intel_de_write(dev_priv, PCH_TRANS_HBLANK(pch_transcoder),
5686                        intel_de_read(dev_priv, HBLANK(cpu_transcoder)));
5687         intel_de_write(dev_priv, PCH_TRANS_HSYNC(pch_transcoder),
5688                        intel_de_read(dev_priv, HSYNC(cpu_transcoder)));
5689
5690         intel_de_write(dev_priv, PCH_TRANS_VTOTAL(pch_transcoder),
5691                        intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
5692         intel_de_write(dev_priv, PCH_TRANS_VBLANK(pch_transcoder),
5693                        intel_de_read(dev_priv, VBLANK(cpu_transcoder)));
5694         intel_de_write(dev_priv, PCH_TRANS_VSYNC(pch_transcoder),
5695                        intel_de_read(dev_priv, VSYNC(cpu_transcoder)));
5696         intel_de_write(dev_priv, PCH_TRANS_VSYNCSHIFT(pch_transcoder),
5697                        intel_de_read(dev_priv, VSYNCSHIFT(cpu_transcoder)));
5698 }
5699
5700 static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
5701 {
5702         u32 temp;
5703
5704         temp = intel_de_read(dev_priv, SOUTH_CHICKEN1);
5705         if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
5706                 return;
5707
5708         WARN_ON(intel_de_read(dev_priv, FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
5709         WARN_ON(intel_de_read(dev_priv, FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
5710
5711         temp &= ~FDI_BC_BIFURCATION_SELECT;
5712         if (enable)
5713                 temp |= FDI_BC_BIFURCATION_SELECT;
5714
5715         drm_dbg_kms(&dev_priv->drm, "%sabling fdi C rx\n",
5716                     enable ? "en" : "dis");
5717         intel_de_write(dev_priv, SOUTH_CHICKEN1, temp);
5718         intel_de_posting_read(dev_priv, SOUTH_CHICKEN1);
5719 }
5720
5721 static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
5722 {
5723         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5724         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5725
5726         switch (crtc->pipe) {
5727         case PIPE_A:
5728                 break;
5729         case PIPE_B:
5730                 if (crtc_state->fdi_lanes > 2)
5731                         cpt_set_fdi_bc_bifurcation(dev_priv, false);
5732                 else
5733                         cpt_set_fdi_bc_bifurcation(dev_priv, true);
5734
5735                 break;
5736         case PIPE_C:
5737                 cpt_set_fdi_bc_bifurcation(dev_priv, true);
5738
5739                 break;
5740         default:
5741                 BUG();
5742         }
5743 }
5744
5745 /*
5746  * Finds the encoder associated with the given CRTC. This can only be
5747  * used when we know that the CRTC isn't feeding multiple encoders!
5748  */
5749 static struct intel_encoder *
5750 intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
5751                            const struct intel_crtc_state *crtc_state)
5752 {
5753         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5754         const struct drm_connector_state *connector_state;
5755         const struct drm_connector *connector;
5756         struct intel_encoder *encoder = NULL;
5757         int num_encoders = 0;
5758         int i;
5759
5760         for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
5761                 if (connector_state->crtc != &crtc->base)
5762                         continue;
5763
5764                 encoder = to_intel_encoder(connector_state->best_encoder);
5765                 num_encoders++;
5766         }
5767
5768         drm_WARN(encoder->base.dev, num_encoders != 1,
5769                  "%d encoders for pipe %c\n",
5770                  num_encoders, pipe_name(crtc->pipe));
5771
5772         return encoder;
5773 }
5774
5775 /*
5776  * Enable PCH resources required for PCH ports:
5777  *   - PCH PLLs
5778  *   - FDI training & RX/TX
5779  *   - update transcoder timings
5780  *   - DP transcoding bits
5781  *   - transcoder
5782  */
5783 static void ilk_pch_enable(const struct intel_atomic_state *state,
5784                            const struct intel_crtc_state *crtc_state)
5785 {
5786         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5787         struct drm_device *dev = crtc->base.dev;
5788         struct drm_i915_private *dev_priv = to_i915(dev);
5789         enum pipe pipe = crtc->pipe;
5790         u32 temp;
5791
5792         assert_pch_transcoder_disabled(dev_priv, pipe);
5793
5794         if (IS_IVYBRIDGE(dev_priv))
5795                 ivb_update_fdi_bc_bifurcation(crtc_state);
5796
5797         /* Write the TU size bits before fdi link training, so that error
5798          * detection works. */
5799         intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
5800                        intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
5801
5802         /* For PCH output, training FDI link */
5803         dev_priv->display.fdi_link_train(crtc, crtc_state);
5804
5805         /* We need to program the right clock selection before writing the pixel
5806          * mutliplier into the DPLL. */
5807         if (HAS_PCH_CPT(dev_priv)) {
5808                 u32 sel;
5809
5810                 temp = intel_de_read(dev_priv, PCH_DPLL_SEL);
5811                 temp |= TRANS_DPLL_ENABLE(pipe);
5812                 sel = TRANS_DPLLB_SEL(pipe);
5813                 if (crtc_state->shared_dpll ==
5814                     intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
5815                         temp |= sel;
5816                 else
5817                         temp &= ~sel;
5818                 intel_de_write(dev_priv, PCH_DPLL_SEL, temp);
5819         }
5820
5821         /* XXX: pch pll's can be enabled any time before we enable the PCH
5822          * transcoder, and we actually should do this to not upset any PCH
5823          * transcoder that already use the clock when we share it.
5824          *
5825          * Note that enable_shared_dpll tries to do the right thing, but
5826          * get_shared_dpll unconditionally resets the pll - we need that to have
5827          * the right LVDS enable sequence. */
5828         intel_enable_shared_dpll(crtc_state);
5829
5830         /* set transcoder timing, panel must allow it */
5831         assert_panel_unlocked(dev_priv, pipe);
5832         ilk_pch_transcoder_set_timings(crtc_state, pipe);
5833
5834         intel_fdi_normal_train(crtc);
5835
5836         /* For PCH DP, enable TRANS_DP_CTL */
5837         if (HAS_PCH_CPT(dev_priv) &&
5838             intel_crtc_has_dp_encoder(crtc_state)) {
5839                 const struct drm_display_mode *adjusted_mode =
5840                         &crtc_state->hw.adjusted_mode;
5841                 u32 bpc = (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
5842                 i915_reg_t reg = TRANS_DP_CTL(pipe);
5843                 enum port port;
5844
5845                 temp = intel_de_read(dev_priv, reg);
5846                 temp &= ~(TRANS_DP_PORT_SEL_MASK |
5847                           TRANS_DP_SYNC_MASK |
5848                           TRANS_DP_BPC_MASK);
5849                 temp |= TRANS_DP_OUTPUT_ENABLE;
5850                 temp |= bpc << 9; /* same format but at 11:9 */
5851
5852                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
5853                         temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
5854                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
5855                         temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
5856
5857                 port = intel_get_crtc_new_encoder(state, crtc_state)->port;
5858                 WARN_ON(port < PORT_B || port > PORT_D);
5859                 temp |= TRANS_DP_PORT_SEL(port);
5860
5861                 intel_de_write(dev_priv, reg, temp);
5862         }
5863
5864         ilk_enable_pch_transcoder(crtc_state);
5865 }
5866
5867 void lpt_pch_enable(const struct intel_crtc_state *crtc_state)
5868 {
5869         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5870         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5871         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
5872
5873         assert_pch_transcoder_disabled(dev_priv, PIPE_A);
5874
5875         lpt_program_iclkip(crtc_state);
5876
5877         /* Set transcoder timing. */
5878         ilk_pch_transcoder_set_timings(crtc_state, PIPE_A);
5879
5880         lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
5881 }
5882
5883 static void cpt_verify_modeset(struct drm_i915_private *dev_priv,
5884                                enum pipe pipe)
5885 {
5886         i915_reg_t dslreg = PIPEDSL(pipe);
5887         u32 temp;
5888
5889         temp = intel_de_read(dev_priv, dslreg);
5890         udelay(500);
5891         if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5)) {
5892                 if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5))
5893                         drm_err(&dev_priv->drm,
5894                                 "mode set failed: pipe %c stuck\n",
5895                                 pipe_name(pipe));
5896         }
5897 }
5898
5899 /*
5900  * The hardware phase 0.0 refers to the center of the pixel.
5901  * We want to start from the top/left edge which is phase
5902  * -0.5. That matches how the hardware calculates the scaling
5903  * factors (from top-left of the first pixel to bottom-right
5904  * of the last pixel, as opposed to the pixel centers).
5905  *
5906  * For 4:2:0 subsampled chroma planes we obviously have to
5907  * adjust that so that the chroma sample position lands in
5908  * the right spot.
5909  *
5910  * Note that for packed YCbCr 4:2:2 formats there is no way to
5911  * control chroma siting. The hardware simply replicates the
5912  * chroma samples for both of the luma samples, and thus we don't
5913  * actually get the expected MPEG2 chroma siting convention :(
5914  * The same behaviour is observed on pre-SKL platforms as well.
5915  *
5916  * Theory behind the formula (note that we ignore sub-pixel
5917  * source coordinates):
5918  * s = source sample position
5919  * d = destination sample position
5920  *
5921  * Downscaling 4:1:
5922  * -0.5
5923  * | 0.0
5924  * | |     1.5 (initial phase)
5925  * | |     |
5926  * v v     v
5927  * | s | s | s | s |
5928  * |       d       |
5929  *
5930  * Upscaling 1:4:
5931  * -0.5
5932  * | -0.375 (initial phase)
5933  * | |     0.0
5934  * | |     |
5935  * v v     v
5936  * |       s       |
5937  * | d | d | d | d |
5938  */
5939 u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited)
5940 {
5941         int phase = -0x8000;
5942         u16 trip = 0;
5943
5944         if (chroma_cosited)
5945                 phase += (sub - 1) * 0x8000 / sub;
5946
5947         phase += scale / (2 * sub);
5948
5949         /*
5950          * Hardware initial phase limited to [-0.5:1.5].
5951          * Since the max hardware scale factor is 3.0, we
5952          * should never actually excdeed 1.0 here.
5953          */
5954         WARN_ON(phase < -0x8000 || phase > 0x18000);
5955
5956         if (phase < 0)
5957                 phase = 0x10000 + phase;
5958         else
5959                 trip = PS_PHASE_TRIP;
5960
5961         return ((phase >> 2) & PS_PHASE_MASK) | trip;
5962 }
5963
5964 #define SKL_MIN_SRC_W 8
5965 #define SKL_MAX_SRC_W 4096
5966 #define SKL_MIN_SRC_H 8
5967 #define SKL_MAX_SRC_H 4096
5968 #define SKL_MIN_DST_W 8
5969 #define SKL_MAX_DST_W 4096
5970 #define SKL_MIN_DST_H 8
5971 #define SKL_MAX_DST_H 4096
5972 #define ICL_MAX_SRC_W 5120
5973 #define ICL_MAX_SRC_H 4096
5974 #define ICL_MAX_DST_W 5120
5975 #define ICL_MAX_DST_H 4096
5976 #define SKL_MIN_YUV_420_SRC_W 16
5977 #define SKL_MIN_YUV_420_SRC_H 16
5978
5979 static int
5980 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
5981                   unsigned int scaler_user, int *scaler_id,
5982                   int src_w, int src_h, int dst_w, int dst_h,
5983                   const struct drm_format_info *format,
5984                   u64 modifier, bool need_scaler)
5985 {
5986         struct intel_crtc_scaler_state *scaler_state =
5987                 &crtc_state->scaler_state;
5988         struct intel_crtc *intel_crtc =
5989                 to_intel_crtc(crtc_state->uapi.crtc);
5990         struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
5991         const struct drm_display_mode *adjusted_mode =
5992                 &crtc_state->hw.adjusted_mode;
5993
5994         /*
5995          * Src coordinates are already rotated by 270 degrees for
5996          * the 90/270 degree plane rotation cases (to match the
5997          * GTT mapping), hence no need to account for rotation here.
5998          */
5999         if (src_w != dst_w || src_h != dst_h)
6000                 need_scaler = true;
6001
6002         /*
6003          * Scaling/fitting not supported in IF-ID mode in GEN9+
6004          * TODO: Interlace fetch mode doesn't support YUV420 planar formats.
6005          * Once NV12 is enabled, handle it here while allocating scaler
6006          * for NV12.
6007          */
6008         if (INTEL_GEN(dev_priv) >= 9 && crtc_state->hw.enable &&
6009             need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
6010                 drm_dbg_kms(&dev_priv->drm,
6011                             "Pipe/Plane scaling not supported with IF-ID mode\n");
6012                 return -EINVAL;
6013         }
6014
6015         /*
6016          * if plane is being disabled or scaler is no more required or force detach
6017          *  - free scaler binded to this plane/crtc
6018          *  - in order to do this, update crtc->scaler_usage
6019          *
6020          * Here scaler state in crtc_state is set free so that
6021          * scaler can be assigned to other user. Actual register
6022          * update to free the scaler is done in plane/panel-fit programming.
6023          * For this purpose crtc/plane_state->scaler_id isn't reset here.
6024          */
6025         if (force_detach || !need_scaler) {
6026                 if (*scaler_id >= 0) {
6027                         scaler_state->scaler_users &= ~(1 << scaler_user);
6028                         scaler_state->scalers[*scaler_id].in_use = 0;
6029
6030                         drm_dbg_kms(&dev_priv->drm,
6031                                     "scaler_user index %u.%u: "
6032                                     "Staged freeing scaler id %d scaler_users = 0x%x\n",
6033                                     intel_crtc->pipe, scaler_user, *scaler_id,
6034                                     scaler_state->scaler_users);
6035                         *scaler_id = -1;
6036                 }
6037                 return 0;
6038         }
6039
6040         if (format && intel_format_info_is_yuv_semiplanar(format, modifier) &&
6041             (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
6042                 drm_dbg_kms(&dev_priv->drm,
6043                             "Planar YUV: src dimensions not met\n");
6044                 return -EINVAL;
6045         }
6046
6047         /* range checks */
6048         if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
6049             dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
6050             (INTEL_GEN(dev_priv) >= 11 &&
6051              (src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H ||
6052               dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) ||
6053             (INTEL_GEN(dev_priv) < 11 &&
6054              (src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
6055               dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H))) {
6056                 drm_dbg_kms(&dev_priv->drm,
6057                             "scaler_user index %u.%u: src %ux%u dst %ux%u "
6058                             "size is out of scaler range\n",
6059                             intel_crtc->pipe, scaler_user, src_w, src_h,
6060                             dst_w, dst_h);
6061                 return -EINVAL;
6062         }
6063
6064         /* mark this plane as a scaler user in crtc_state */
6065         scaler_state->scaler_users |= (1 << scaler_user);
6066         drm_dbg_kms(&dev_priv->drm, "scaler_user index %u.%u: "
6067                     "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
6068                     intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
6069                     scaler_state->scaler_users);
6070
6071         return 0;
6072 }
6073
6074 /**
6075  * skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
6076  *
6077  * @state: crtc's scaler state
6078  *
6079  * Return
6080  *     0 - scaler_usage updated successfully
6081  *    error - requested scaling cannot be supported or other error condition
6082  */
6083 int skl_update_scaler_crtc(struct intel_crtc_state *state)
6084 {
6085         const struct drm_display_mode *adjusted_mode = &state->hw.adjusted_mode;
6086         bool need_scaler = false;
6087
6088         if (state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
6089             state->pch_pfit.enabled)
6090                 need_scaler = true;
6091
6092         return skl_update_scaler(state, !state->hw.active, SKL_CRTC_INDEX,
6093                                  &state->scaler_state.scaler_id,
6094                                  state->pipe_src_w, state->pipe_src_h,
6095                                  adjusted_mode->crtc_hdisplay,
6096                                  adjusted_mode->crtc_vdisplay, NULL, 0,
6097                                  need_scaler);
6098 }
6099
6100 /**
6101  * skl_update_scaler_plane - Stages update to scaler state for a given plane.
6102  * @crtc_state: crtc's scaler state
6103  * @plane_state: atomic plane state to update
6104  *
6105  * Return
6106  *     0 - scaler_usage updated successfully
6107  *    error - requested scaling cannot be supported or other error condition
6108  */
6109 static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
6110                                    struct intel_plane_state *plane_state)
6111 {
6112         struct intel_plane *intel_plane =
6113                 to_intel_plane(plane_state->uapi.plane);
6114         struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
6115         struct drm_framebuffer *fb = plane_state->hw.fb;
6116         int ret;
6117         bool force_detach = !fb || !plane_state->uapi.visible;
6118         bool need_scaler = false;
6119
6120         /* Pre-gen11 and SDR planes always need a scaler for planar formats. */
6121         if (!icl_is_hdr_plane(dev_priv, intel_plane->id) &&
6122             fb && intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier))
6123                 need_scaler = true;
6124
6125         ret = skl_update_scaler(crtc_state, force_detach,
6126                                 drm_plane_index(&intel_plane->base),
6127                                 &plane_state->scaler_id,
6128                                 drm_rect_width(&plane_state->uapi.src) >> 16,
6129                                 drm_rect_height(&plane_state->uapi.src) >> 16,
6130                                 drm_rect_width(&plane_state->uapi.dst),
6131                                 drm_rect_height(&plane_state->uapi.dst),
6132                                 fb ? fb->format : NULL,
6133                                 fb ? fb->modifier : 0,
6134                                 need_scaler);
6135
6136         if (ret || plane_state->scaler_id < 0)
6137                 return ret;
6138
6139         /* check colorkey */
6140         if (plane_state->ckey.flags) {
6141                 drm_dbg_kms(&dev_priv->drm,
6142                             "[PLANE:%d:%s] scaling with color key not allowed",
6143                             intel_plane->base.base.id,
6144                             intel_plane->base.name);
6145                 return -EINVAL;
6146         }
6147
6148         /* Check src format */
6149         switch (fb->format->format) {
6150         case DRM_FORMAT_RGB565:
6151         case DRM_FORMAT_XBGR8888:
6152         case DRM_FORMAT_XRGB8888:
6153         case DRM_FORMAT_ABGR8888:
6154         case DRM_FORMAT_ARGB8888:
6155         case DRM_FORMAT_XRGB2101010:
6156         case DRM_FORMAT_XBGR2101010:
6157         case DRM_FORMAT_ARGB2101010:
6158         case DRM_FORMAT_ABGR2101010:
6159         case DRM_FORMAT_YUYV:
6160         case DRM_FORMAT_YVYU:
6161         case DRM_FORMAT_UYVY:
6162         case DRM_FORMAT_VYUY:
6163         case DRM_FORMAT_NV12:
6164         case DRM_FORMAT_P010:
6165         case DRM_FORMAT_P012:
6166         case DRM_FORMAT_P016:
6167         case DRM_FORMAT_Y210:
6168         case DRM_FORMAT_Y212:
6169         case DRM_FORMAT_Y216:
6170         case DRM_FORMAT_XVYU2101010:
6171         case DRM_FORMAT_XVYU12_16161616:
6172         case DRM_FORMAT_XVYU16161616:
6173                 break;
6174         case DRM_FORMAT_XBGR16161616F:
6175         case DRM_FORMAT_ABGR16161616F:
6176         case DRM_FORMAT_XRGB16161616F:
6177         case DRM_FORMAT_ARGB16161616F:
6178                 if (INTEL_GEN(dev_priv) >= 11)
6179                         break;
6180                 /* fall through */
6181         default:
6182                 drm_dbg_kms(&dev_priv->drm,
6183                             "[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
6184                             intel_plane->base.base.id, intel_plane->base.name,
6185                             fb->base.id, fb->format->format);
6186                 return -EINVAL;
6187         }
6188
6189         return 0;
6190 }
6191
6192 void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state)
6193 {
6194         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
6195         int i;
6196
6197         for (i = 0; i < crtc->num_scalers; i++)
6198                 skl_detach_scaler(crtc, i);
6199 }
6200
6201 static void skl_pfit_enable(const struct intel_crtc_state *crtc_state)
6202 {
6203         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6204         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6205         enum pipe pipe = crtc->pipe;
6206         const struct intel_crtc_scaler_state *scaler_state =
6207                 &crtc_state->scaler_state;
6208
6209         if (crtc_state->pch_pfit.enabled) {
6210                 u16 uv_rgb_hphase, uv_rgb_vphase;
6211                 int pfit_w, pfit_h, hscale, vscale;
6212                 int id;
6213
6214                 if (WARN_ON(crtc_state->scaler_state.scaler_id < 0))
6215                         return;
6216
6217                 pfit_w = (crtc_state->pch_pfit.size >> 16) & 0xFFFF;
6218                 pfit_h = crtc_state->pch_pfit.size & 0xFFFF;
6219
6220                 hscale = (crtc_state->pipe_src_w << 16) / pfit_w;
6221                 vscale = (crtc_state->pipe_src_h << 16) / pfit_h;
6222
6223                 uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
6224                 uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
6225
6226                 id = scaler_state->scaler_id;
6227                 intel_de_write(dev_priv, SKL_PS_CTRL(pipe, id),
6228                                PS_SCALER_EN | PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
6229                 intel_de_write_fw(dev_priv, SKL_PS_VPHASE(pipe, id),
6230                                   PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
6231                 intel_de_write_fw(dev_priv, SKL_PS_HPHASE(pipe, id),
6232                                   PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
6233                 intel_de_write(dev_priv, SKL_PS_WIN_POS(pipe, id),
6234                                crtc_state->pch_pfit.pos);
6235                 intel_de_write(dev_priv, SKL_PS_WIN_SZ(pipe, id),
6236                                crtc_state->pch_pfit.size);
6237         }
6238 }
6239
6240 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state)
6241 {
6242         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6243         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6244         enum pipe pipe = crtc->pipe;
6245
6246         if (crtc_state->pch_pfit.enabled) {
6247                 /* Force use of hard-coded filter coefficients
6248                  * as some pre-programmed values are broken,
6249                  * e.g. x201.
6250                  */
6251                 if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
6252                         intel_de_write(dev_priv, PF_CTL(pipe),
6253                                        PF_ENABLE | PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe));
6254                 else
6255                         intel_de_write(dev_priv, PF_CTL(pipe),
6256                                        PF_ENABLE | PF_FILTER_MED_3x3);
6257                 intel_de_write(dev_priv, PF_WIN_POS(pipe),
6258                                crtc_state->pch_pfit.pos);
6259                 intel_de_write(dev_priv, PF_WIN_SZ(pipe),
6260                                crtc_state->pch_pfit.size);
6261         }
6262 }
6263
6264 void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
6265 {
6266         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6267         struct drm_device *dev = crtc->base.dev;
6268         struct drm_i915_private *dev_priv = to_i915(dev);
6269
6270         if (!crtc_state->ips_enabled)
6271                 return;
6272
6273         /*
6274          * We can only enable IPS after we enable a plane and wait for a vblank
6275          * This function is called from post_plane_update, which is run after
6276          * a vblank wait.
6277          */
6278         WARN_ON(!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
6279
6280         if (IS_BROADWELL(dev_priv)) {
6281                 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
6282                                                 IPS_ENABLE | IPS_PCODE_CONTROL));
6283                 /* Quoting Art Runyan: "its not safe to expect any particular
6284                  * value in IPS_CTL bit 31 after enabling IPS through the
6285                  * mailbox." Moreover, the mailbox may return a bogus state,
6286                  * so we need to just enable it and continue on.
6287                  */
6288         } else {
6289                 intel_de_write(dev_priv, IPS_CTL, IPS_ENABLE);
6290                 /* The bit only becomes 1 in the next vblank, so this wait here
6291                  * is essentially intel_wait_for_vblank. If we don't have this
6292                  * and don't wait for vblanks until the end of crtc_enable, then
6293                  * the HW state readout code will complain that the expected
6294                  * IPS_CTL value is not the one we read. */
6295                 if (intel_de_wait_for_set(dev_priv, IPS_CTL, IPS_ENABLE, 50))
6296                         drm_err(&dev_priv->drm,
6297                                 "Timed out waiting for IPS enable\n");
6298         }
6299 }
6300
6301 void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
6302 {
6303         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6304         struct drm_device *dev = crtc->base.dev;
6305         struct drm_i915_private *dev_priv = to_i915(dev);
6306
6307         if (!crtc_state->ips_enabled)
6308                 return;
6309
6310         if (IS_BROADWELL(dev_priv)) {
6311                 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
6312                 /*
6313                  * Wait for PCODE to finish disabling IPS. The BSpec specified
6314                  * 42ms timeout value leads to occasional timeouts so use 100ms
6315                  * instead.
6316                  */
6317                 if (intel_de_wait_for_clear(dev_priv, IPS_CTL, IPS_ENABLE, 100))
6318                         drm_err(&dev_priv->drm,
6319                                 "Timed out waiting for IPS disable\n");
6320         } else {
6321                 intel_de_write(dev_priv, IPS_CTL, 0);
6322                 intel_de_posting_read(dev_priv, IPS_CTL);
6323         }
6324
6325         /* We need to wait for a vblank before we can disable the plane. */
6326         intel_wait_for_vblank(dev_priv, crtc->pipe);
6327 }
6328
6329 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
6330 {
6331         if (intel_crtc->overlay)
6332                 (void) intel_overlay_switch_off(intel_crtc->overlay);
6333
6334         /* Let userspace switch the overlay on again. In most cases userspace
6335          * has to recompute where to put it anyway.
6336          */
6337 }
6338
6339 static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
6340                                        const struct intel_crtc_state *new_crtc_state)
6341 {
6342         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
6343         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6344
6345         if (!old_crtc_state->ips_enabled)
6346                 return false;
6347
6348         if (needs_modeset(new_crtc_state))
6349                 return true;
6350
6351         /*
6352          * Workaround : Do not read or write the pipe palette/gamma data while
6353          * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
6354          *
6355          * Disable IPS before we program the LUT.
6356          */
6357         if (IS_HASWELL(dev_priv) &&
6358             (new_crtc_state->uapi.color_mgmt_changed ||
6359              new_crtc_state->update_pipe) &&
6360             new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
6361                 return true;
6362
6363         return !new_crtc_state->ips_enabled;
6364 }
6365
6366 static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
6367                                        const struct intel_crtc_state *new_crtc_state)
6368 {
6369         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
6370         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6371
6372         if (!new_crtc_state->ips_enabled)
6373                 return false;
6374
6375         if (needs_modeset(new_crtc_state))
6376                 return true;
6377
6378         /*
6379          * Workaround : Do not read or write the pipe palette/gamma data while
6380          * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
6381          *
6382          * Re-enable IPS after the LUT has been programmed.
6383          */
6384         if (IS_HASWELL(dev_priv) &&
6385             (new_crtc_state->uapi.color_mgmt_changed ||
6386              new_crtc_state->update_pipe) &&
6387             new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
6388                 return true;
6389
6390         /*
6391          * We can't read out IPS on broadwell, assume the worst and
6392          * forcibly enable IPS on the first fastset.
6393          */
6394         if (new_crtc_state->update_pipe &&
6395             old_crtc_state->hw.adjusted_mode.private_flags & I915_MODE_FLAG_INHERITED)
6396                 return true;
6397
6398         return !old_crtc_state->ips_enabled;
6399 }
6400
6401 static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state)
6402 {
6403         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
6404
6405         if (!crtc_state->nv12_planes)
6406                 return false;
6407
6408         /* WA Display #0827: Gen9:all */
6409         if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv))
6410                 return true;
6411
6412         return false;
6413 }
6414
6415 static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state)
6416 {
6417         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
6418
6419         /* Wa_2006604312:icl */
6420         if (crtc_state->scaler_state.scaler_users > 0 && IS_ICELAKE(dev_priv))
6421                 return true;
6422
6423         return false;
6424 }
6425
6426 static bool planes_enabling(const struct intel_crtc_state *old_crtc_state,
6427                             const struct intel_crtc_state *new_crtc_state)
6428 {
6429         return (!old_crtc_state->active_planes || needs_modeset(new_crtc_state)) &&
6430                 new_crtc_state->active_planes;
6431 }
6432
6433 static bool planes_disabling(const struct intel_crtc_state *old_crtc_state,
6434                              const struct intel_crtc_state *new_crtc_state)
6435 {
6436         return old_crtc_state->active_planes &&
6437                 (!new_crtc_state->active_planes || needs_modeset(new_crtc_state));
6438 }
6439
6440 static void intel_post_plane_update(struct intel_atomic_state *state,
6441                                     struct intel_crtc *crtc)
6442 {
6443         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6444         const struct intel_crtc_state *old_crtc_state =
6445                 intel_atomic_get_old_crtc_state(state, crtc);
6446         const struct intel_crtc_state *new_crtc_state =
6447                 intel_atomic_get_new_crtc_state(state, crtc);
6448         enum pipe pipe = crtc->pipe;
6449
6450         intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits);
6451
6452         if (new_crtc_state->update_wm_post && new_crtc_state->hw.active)
6453                 intel_update_watermarks(crtc);
6454
6455         if (hsw_post_update_enable_ips(old_crtc_state, new_crtc_state))
6456                 hsw_enable_ips(new_crtc_state);
6457
6458         intel_fbc_post_update(state, crtc);
6459
6460         if (needs_nv12_wa(old_crtc_state) &&
6461             !needs_nv12_wa(new_crtc_state))
6462                 skl_wa_827(dev_priv, pipe, false);
6463
6464         if (needs_scalerclk_wa(old_crtc_state) &&
6465             !needs_scalerclk_wa(new_crtc_state))
6466                 icl_wa_scalerclkgating(dev_priv, pipe, false);
6467 }
6468
6469 static void intel_pre_plane_update(struct intel_atomic_state *state,
6470                                    struct intel_crtc *crtc)
6471 {
6472         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6473         const struct intel_crtc_state *old_crtc_state =
6474                 intel_atomic_get_old_crtc_state(state, crtc);
6475         const struct intel_crtc_state *new_crtc_state =
6476                 intel_atomic_get_new_crtc_state(state, crtc);
6477         enum pipe pipe = crtc->pipe;
6478
6479         if (hsw_pre_update_disable_ips(old_crtc_state, new_crtc_state))
6480                 hsw_disable_ips(old_crtc_state);
6481
6482         if (intel_fbc_pre_update(state, crtc))
6483                 intel_wait_for_vblank(dev_priv, pipe);
6484
6485         /* Display WA 827 */
6486         if (!needs_nv12_wa(old_crtc_state) &&
6487             needs_nv12_wa(new_crtc_state))
6488                 skl_wa_827(dev_priv, pipe, true);
6489
6490         /* Wa_2006604312:icl */
6491         if (!needs_scalerclk_wa(old_crtc_state) &&
6492             needs_scalerclk_wa(new_crtc_state))
6493                 icl_wa_scalerclkgating(dev_priv, pipe, true);
6494
6495         /*
6496          * Vblank time updates from the shadow to live plane control register
6497          * are blocked if the memory self-refresh mode is active at that
6498          * moment. So to make sure the plane gets truly disabled, disable
6499          * first the self-refresh mode. The self-refresh enable bit in turn
6500          * will be checked/applied by the HW only at the next frame start
6501          * event which is after the vblank start event, so we need to have a
6502          * wait-for-vblank between disabling the plane and the pipe.
6503          */
6504         if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active &&
6505             new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
6506                 intel_wait_for_vblank(dev_priv, pipe);
6507
6508         /*
6509          * IVB workaround: must disable low power watermarks for at least
6510          * one frame before enabling scaling.  LP watermarks can be re-enabled
6511          * when scaling is disabled.
6512          *
6513          * WaCxSRDisabledForSpriteScaling:ivb
6514          */
6515         if (old_crtc_state->hw.active &&
6516             new_crtc_state->disable_lp_wm && ilk_disable_lp_wm(dev_priv))
6517                 intel_wait_for_vblank(dev_priv, pipe);
6518
6519         /*
6520          * If we're doing a modeset we don't need to do any
6521          * pre-vblank watermark programming here.
6522          */
6523         if (!needs_modeset(new_crtc_state)) {
6524                 /*
6525                  * For platforms that support atomic watermarks, program the
6526                  * 'intermediate' watermarks immediately.  On pre-gen9 platforms, these
6527                  * will be the intermediate values that are safe for both pre- and
6528                  * post- vblank; when vblank happens, the 'active' values will be set
6529                  * to the final 'target' values and we'll do this again to get the
6530                  * optimal watermarks.  For gen9+ platforms, the values we program here
6531                  * will be the final target values which will get automatically latched
6532                  * at vblank time; no further programming will be necessary.
6533                  *
6534                  * If a platform hasn't been transitioned to atomic watermarks yet,
6535                  * we'll continue to update watermarks the old way, if flags tell
6536                  * us to.
6537                  */
6538                 if (dev_priv->display.initial_watermarks)
6539                         dev_priv->display.initial_watermarks(state, crtc);
6540                 else if (new_crtc_state->update_wm_pre)
6541                         intel_update_watermarks(crtc);
6542         }
6543
6544         /*
6545          * Gen2 reports pipe underruns whenever all planes are disabled.
6546          * So disable underrun reporting before all the planes get disabled.
6547          *
6548          * We do this after .initial_watermarks() so that we have a
6549          * chance of catching underruns with the intermediate watermarks
6550          * vs. the old plane configuration.
6551          */
6552         if (IS_GEN(dev_priv, 2) && planes_disabling(old_crtc_state, new_crtc_state))
6553                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6554 }
6555
6556 static void intel_crtc_disable_planes(struct intel_atomic_state *state,
6557                                       struct intel_crtc *crtc)
6558 {
6559         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6560         const struct intel_crtc_state *new_crtc_state =
6561                 intel_atomic_get_new_crtc_state(state, crtc);
6562         unsigned int update_mask = new_crtc_state->update_planes;
6563         const struct intel_plane_state *old_plane_state;
6564         struct intel_plane *plane;
6565         unsigned fb_bits = 0;
6566         int i;
6567
6568         intel_crtc_dpms_overlay_disable(crtc);
6569
6570         for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
6571                 if (crtc->pipe != plane->pipe ||
6572                     !(update_mask & BIT(plane->id)))
6573                         continue;
6574
6575                 intel_disable_plane(plane, new_crtc_state);
6576
6577                 if (old_plane_state->uapi.visible)
6578                         fb_bits |= plane->frontbuffer_bit;
6579         }
6580
6581         intel_frontbuffer_flip(dev_priv, fb_bits);
6582 }
6583
6584 /*
6585  * intel_connector_primary_encoder - get the primary encoder for a connector
6586  * @connector: connector for which to return the encoder
6587  *
6588  * Returns the primary encoder for a connector. There is a 1:1 mapping from
6589  * all connectors to their encoder, except for DP-MST connectors which have
6590  * both a virtual and a primary encoder. These DP-MST primary encoders can be
6591  * pointed to by as many DP-MST connectors as there are pipes.
6592  */
6593 static struct intel_encoder *
6594 intel_connector_primary_encoder(struct intel_connector *connector)
6595 {
6596         struct intel_encoder *encoder;
6597
6598         if (connector->mst_port)
6599                 return &dp_to_dig_port(connector->mst_port)->base;
6600
6601         encoder = intel_attached_encoder(connector);
6602         WARN_ON(!encoder);
6603
6604         return encoder;
6605 }
6606
6607 static void intel_encoders_update_prepare(struct intel_atomic_state *state)
6608 {
6609         struct drm_connector_state *new_conn_state;
6610         struct drm_connector *connector;
6611         int i;
6612
6613         for_each_new_connector_in_state(&state->base, connector, new_conn_state,
6614                                         i) {
6615                 struct intel_connector *intel_connector;
6616                 struct intel_encoder *encoder;
6617                 struct intel_crtc *crtc;
6618
6619                 if (!intel_connector_needs_modeset(state, connector))
6620                         continue;
6621
6622                 intel_connector = to_intel_connector(connector);
6623                 encoder = intel_connector_primary_encoder(intel_connector);
6624                 if (!encoder->update_prepare)
6625                         continue;
6626
6627                 crtc = new_conn_state->crtc ?
6628                         to_intel_crtc(new_conn_state->crtc) : NULL;
6629                 encoder->update_prepare(state, encoder, crtc);
6630         }
6631 }
6632
6633 static void intel_encoders_update_complete(struct intel_atomic_state *state)
6634 {
6635         struct drm_connector_state *new_conn_state;
6636         struct drm_connector *connector;
6637         int i;
6638
6639         for_each_new_connector_in_state(&state->base, connector, new_conn_state,
6640                                         i) {
6641                 struct intel_connector *intel_connector;
6642                 struct intel_encoder *encoder;
6643                 struct intel_crtc *crtc;
6644
6645                 if (!intel_connector_needs_modeset(state, connector))
6646                         continue;
6647
6648                 intel_connector = to_intel_connector(connector);
6649                 encoder = intel_connector_primary_encoder(intel_connector);
6650                 if (!encoder->update_complete)
6651                         continue;
6652
6653                 crtc = new_conn_state->crtc ?
6654                         to_intel_crtc(new_conn_state->crtc) : NULL;
6655                 encoder->update_complete(state, encoder, crtc);
6656         }
6657 }
6658
6659 static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state,
6660                                           struct intel_crtc *crtc)
6661 {
6662         const struct intel_crtc_state *crtc_state =
6663                 intel_atomic_get_new_crtc_state(state, crtc);
6664         const struct drm_connector_state *conn_state;
6665         struct drm_connector *conn;
6666         int i;
6667
6668         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6669                 struct intel_encoder *encoder =
6670                         to_intel_encoder(conn_state->best_encoder);
6671
6672                 if (conn_state->crtc != &crtc->base)
6673                         continue;
6674
6675                 if (encoder->pre_pll_enable)
6676                         encoder->pre_pll_enable(encoder, crtc_state, conn_state);
6677         }
6678 }
6679
6680 static void intel_encoders_pre_enable(struct intel_atomic_state *state,
6681                                       struct intel_crtc *crtc)
6682 {
6683         const struct intel_crtc_state *crtc_state =
6684                 intel_atomic_get_new_crtc_state(state, crtc);
6685         const struct drm_connector_state *conn_state;
6686         struct drm_connector *conn;
6687         int i;
6688
6689         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6690                 struct intel_encoder *encoder =
6691                         to_intel_encoder(conn_state->best_encoder);
6692
6693                 if (conn_state->crtc != &crtc->base)
6694                         continue;
6695
6696                 if (encoder->pre_enable)
6697                         encoder->pre_enable(encoder, crtc_state, conn_state);
6698         }
6699 }
6700
6701 static void intel_encoders_enable(struct intel_atomic_state *state,
6702                                   struct intel_crtc *crtc)
6703 {
6704         const struct intel_crtc_state *crtc_state =
6705                 intel_atomic_get_new_crtc_state(state, crtc);
6706         const struct drm_connector_state *conn_state;
6707         struct drm_connector *conn;
6708         int i;
6709
6710         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6711                 struct intel_encoder *encoder =
6712                         to_intel_encoder(conn_state->best_encoder);
6713
6714                 if (conn_state->crtc != &crtc->base)
6715                         continue;
6716
6717                 if (encoder->enable)
6718                         encoder->enable(encoder, crtc_state, conn_state);
6719                 intel_opregion_notify_encoder(encoder, true);
6720         }
6721 }
6722
6723 static void intel_encoders_disable(struct intel_atomic_state *state,
6724                                    struct intel_crtc *crtc)
6725 {
6726         const struct intel_crtc_state *old_crtc_state =
6727                 intel_atomic_get_old_crtc_state(state, crtc);
6728         const struct drm_connector_state *old_conn_state;
6729         struct drm_connector *conn;
6730         int i;
6731
6732         for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
6733                 struct intel_encoder *encoder =
6734                         to_intel_encoder(old_conn_state->best_encoder);
6735
6736                 if (old_conn_state->crtc != &crtc->base)
6737                         continue;
6738
6739                 intel_opregion_notify_encoder(encoder, false);
6740                 if (encoder->disable)
6741                         encoder->disable(encoder, old_crtc_state, old_conn_state);
6742         }
6743 }
6744
6745 static void intel_encoders_post_disable(struct intel_atomic_state *state,
6746                                         struct intel_crtc *crtc)
6747 {
6748         const struct intel_crtc_state *old_crtc_state =
6749                 intel_atomic_get_old_crtc_state(state, crtc);
6750         const struct drm_connector_state *old_conn_state;
6751         struct drm_connector *conn;
6752         int i;
6753
6754         for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
6755                 struct intel_encoder *encoder =
6756                         to_intel_encoder(old_conn_state->best_encoder);
6757
6758                 if (old_conn_state->crtc != &crtc->base)
6759                         continue;
6760
6761                 if (encoder->post_disable)
6762                         encoder->post_disable(encoder, old_crtc_state, old_conn_state);
6763         }
6764 }
6765
6766 static void intel_encoders_post_pll_disable(struct intel_atomic_state *state,
6767                                             struct intel_crtc *crtc)
6768 {
6769         const struct intel_crtc_state *old_crtc_state =
6770                 intel_atomic_get_old_crtc_state(state, crtc);
6771         const struct drm_connector_state *old_conn_state;
6772         struct drm_connector *conn;
6773         int i;
6774
6775         for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
6776                 struct intel_encoder *encoder =
6777                         to_intel_encoder(old_conn_state->best_encoder);
6778
6779                 if (old_conn_state->crtc != &crtc->base)
6780                         continue;
6781
6782                 if (encoder->post_pll_disable)
6783                         encoder->post_pll_disable(encoder, old_crtc_state, old_conn_state);
6784         }
6785 }
6786
6787 static void intel_encoders_update_pipe(struct intel_atomic_state *state,
6788                                        struct intel_crtc *crtc)
6789 {
6790         const struct intel_crtc_state *crtc_state =
6791                 intel_atomic_get_new_crtc_state(state, crtc);
6792         const struct drm_connector_state *conn_state;
6793         struct drm_connector *conn;
6794         int i;
6795
6796         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6797                 struct intel_encoder *encoder =
6798                         to_intel_encoder(conn_state->best_encoder);
6799
6800                 if (conn_state->crtc != &crtc->base)
6801                         continue;
6802
6803                 if (encoder->update_pipe)
6804                         encoder->update_pipe(encoder, crtc_state, conn_state);
6805         }
6806 }
6807
6808 static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state)
6809 {
6810         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6811         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
6812
6813         plane->disable_plane(plane, crtc_state);
6814 }
6815
6816 static void ilk_crtc_enable(struct intel_atomic_state *state,
6817                             struct intel_crtc *crtc)
6818 {
6819         const struct intel_crtc_state *new_crtc_state =
6820                 intel_atomic_get_new_crtc_state(state, crtc);
6821         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6822         enum pipe pipe = crtc->pipe;
6823
6824         if (WARN_ON(crtc->active))
6825                 return;
6826
6827         /*
6828          * Sometimes spurious CPU pipe underruns happen during FDI
6829          * training, at least with VGA+HDMI cloning. Suppress them.
6830          *
6831          * On ILK we get an occasional spurious CPU pipe underruns
6832          * between eDP port A enable and vdd enable. Also PCH port
6833          * enable seems to result in the occasional CPU pipe underrun.
6834          *
6835          * Spurious PCH underruns also occur during PCH enabling.
6836          */
6837         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6838         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
6839
6840         if (new_crtc_state->has_pch_encoder)
6841                 intel_prepare_shared_dpll(new_crtc_state);
6842
6843         if (intel_crtc_has_dp_encoder(new_crtc_state))
6844                 intel_dp_set_m_n(new_crtc_state, M1_N1);
6845
6846         intel_set_pipe_timings(new_crtc_state);
6847         intel_set_pipe_src_size(new_crtc_state);
6848
6849         if (new_crtc_state->has_pch_encoder)
6850                 intel_cpu_transcoder_set_m_n(new_crtc_state,
6851                                              &new_crtc_state->fdi_m_n, NULL);
6852
6853         ilk_set_pipeconf(new_crtc_state);
6854
6855         crtc->active = true;
6856
6857         intel_encoders_pre_enable(state, crtc);
6858
6859         if (new_crtc_state->has_pch_encoder) {
6860                 /* Note: FDI PLL enabling _must_ be done before we enable the
6861                  * cpu pipes, hence this is separate from all the other fdi/pch
6862                  * enabling. */
6863                 ilk_fdi_pll_enable(new_crtc_state);
6864         } else {
6865                 assert_fdi_tx_disabled(dev_priv, pipe);
6866                 assert_fdi_rx_disabled(dev_priv, pipe);
6867         }
6868
6869         ilk_pfit_enable(new_crtc_state);
6870
6871         /*
6872          * On ILK+ LUT must be loaded before the pipe is running but with
6873          * clocks enabled
6874          */
6875         intel_color_load_luts(new_crtc_state);
6876         intel_color_commit(new_crtc_state);
6877         /* update DSPCNTR to configure gamma for pipe bottom color */
6878         intel_disable_primary_plane(new_crtc_state);
6879
6880         if (dev_priv->display.initial_watermarks)
6881                 dev_priv->display.initial_watermarks(state, crtc);
6882         intel_enable_pipe(new_crtc_state);
6883
6884         if (new_crtc_state->has_pch_encoder)
6885                 ilk_pch_enable(state, new_crtc_state);
6886
6887         intel_crtc_vblank_on(new_crtc_state);
6888
6889         intel_encoders_enable(state, crtc);
6890
6891         if (HAS_PCH_CPT(dev_priv))
6892                 cpt_verify_modeset(dev_priv, pipe);
6893
6894         /*
6895          * Must wait for vblank to avoid spurious PCH FIFO underruns.
6896          * And a second vblank wait is needed at least on ILK with
6897          * some interlaced HDMI modes. Let's do the double wait always
6898          * in case there are more corner cases we don't know about.
6899          */
6900         if (new_crtc_state->has_pch_encoder) {
6901                 intel_wait_for_vblank(dev_priv, pipe);
6902                 intel_wait_for_vblank(dev_priv, pipe);
6903         }
6904         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6905         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
6906 }
6907
6908 /* IPS only exists on ULT machines and is tied to pipe A. */
6909 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
6910 {
6911         return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
6912 }
6913
6914 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
6915                                             enum pipe pipe, bool apply)
6916 {
6917         u32 val = intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe));
6918         u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
6919
6920         if (apply)
6921                 val |= mask;
6922         else
6923                 val &= ~mask;
6924
6925         intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), val);
6926 }
6927
6928 static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
6929 {
6930         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6931         enum pipe pipe = crtc->pipe;
6932         u32 val;
6933
6934         val = MBUS_DBOX_A_CREDIT(2);
6935
6936         if (INTEL_GEN(dev_priv) >= 12) {
6937                 val |= MBUS_DBOX_BW_CREDIT(2);
6938                 val |= MBUS_DBOX_B_CREDIT(12);
6939         } else {
6940                 val |= MBUS_DBOX_BW_CREDIT(1);
6941                 val |= MBUS_DBOX_B_CREDIT(8);
6942         }
6943
6944         intel_de_write(dev_priv, PIPE_MBUS_DBOX_CTL(pipe), val);
6945 }
6946
6947 static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state)
6948 {
6949         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6950         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6951
6952         intel_de_write(dev_priv, WM_LINETIME(crtc->pipe),
6953                        HSW_LINETIME(crtc_state->linetime) |
6954                        HSW_IPS_LINETIME(crtc_state->ips_linetime));
6955 }
6956
6957 static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
6958 {
6959         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6960         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6961         i915_reg_t reg = CHICKEN_TRANS(crtc_state->cpu_transcoder);
6962         u32 val;
6963
6964         val = intel_de_read(dev_priv, reg);
6965         val &= ~HSW_FRAME_START_DELAY_MASK;
6966         val |= HSW_FRAME_START_DELAY(0);
6967         intel_de_write(dev_priv, reg, val);
6968 }
6969
6970 static void hsw_crtc_enable(struct intel_atomic_state *state,
6971                             struct intel_crtc *crtc)
6972 {
6973         const struct intel_crtc_state *new_crtc_state =
6974                 intel_atomic_get_new_crtc_state(state, crtc);
6975         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6976         enum pipe pipe = crtc->pipe, hsw_workaround_pipe;
6977         enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
6978         bool psl_clkgate_wa;
6979
6980         if (WARN_ON(crtc->active))
6981                 return;
6982
6983         intel_encoders_pre_pll_enable(state, crtc);
6984
6985         if (new_crtc_state->shared_dpll)
6986                 intel_enable_shared_dpll(new_crtc_state);
6987
6988         intel_encoders_pre_enable(state, crtc);
6989
6990         if (!transcoder_is_dsi(cpu_transcoder))
6991                 intel_set_pipe_timings(new_crtc_state);
6992
6993         if (INTEL_GEN(dev_priv) >= 11)
6994                 icl_enable_trans_port_sync(new_crtc_state);
6995
6996         intel_set_pipe_src_size(new_crtc_state);
6997
6998         if (cpu_transcoder != TRANSCODER_EDP &&
6999             !transcoder_is_dsi(cpu_transcoder))
7000                 intel_de_write(dev_priv, PIPE_MULT(cpu_transcoder),
7001                                new_crtc_state->pixel_multiplier - 1);
7002
7003         if (new_crtc_state->has_pch_encoder)
7004                 intel_cpu_transcoder_set_m_n(new_crtc_state,
7005                                              &new_crtc_state->fdi_m_n, NULL);
7006
7007         if (!transcoder_is_dsi(cpu_transcoder)) {
7008                 hsw_set_frame_start_delay(new_crtc_state);
7009                 hsw_set_pipeconf(new_crtc_state);
7010         }
7011
7012         if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
7013                 bdw_set_pipemisc(new_crtc_state);
7014
7015         crtc->active = true;
7016
7017         /* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
7018         psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
7019                 new_crtc_state->pch_pfit.enabled;
7020         if (psl_clkgate_wa)
7021                 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
7022
7023         if (INTEL_GEN(dev_priv) >= 9)
7024                 skl_pfit_enable(new_crtc_state);
7025         else
7026                 ilk_pfit_enable(new_crtc_state);
7027
7028         /*
7029          * On ILK+ LUT must be loaded before the pipe is running but with
7030          * clocks enabled
7031          */
7032         intel_color_load_luts(new_crtc_state);
7033         intel_color_commit(new_crtc_state);
7034         /* update DSPCNTR to configure gamma/csc for pipe bottom color */
7035         if (INTEL_GEN(dev_priv) < 9)
7036                 intel_disable_primary_plane(new_crtc_state);
7037
7038         hsw_set_linetime_wm(new_crtc_state);
7039
7040         if (INTEL_GEN(dev_priv) >= 11)
7041                 icl_set_pipe_chicken(crtc);
7042
7043         if (!transcoder_is_dsi(cpu_transcoder))
7044                 intel_ddi_enable_transcoder_func(new_crtc_state);
7045
7046         if (dev_priv->display.initial_watermarks)
7047                 dev_priv->display.initial_watermarks(state, crtc);
7048
7049         if (INTEL_GEN(dev_priv) >= 11)
7050                 icl_pipe_mbus_enable(crtc);
7051
7052         intel_encoders_enable(state, crtc);
7053
7054         if (psl_clkgate_wa) {
7055                 intel_wait_for_vblank(dev_priv, pipe);
7056                 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
7057         }
7058
7059         /* If we change the relative order between pipe/planes enabling, we need
7060          * to change the workaround. */
7061         hsw_workaround_pipe = new_crtc_state->hsw_workaround_pipe;
7062         if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
7063                 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
7064                 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
7065         }
7066 }
7067
7068 void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state)
7069 {
7070         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
7071         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7072         enum pipe pipe = crtc->pipe;
7073
7074         /* To avoid upsetting the power well on haswell only disable the pfit if
7075          * it's in use. The hw state code will make sure we get this right. */
7076         if (old_crtc_state->pch_pfit.enabled) {
7077                 intel_de_write(dev_priv, PF_CTL(pipe), 0);
7078                 intel_de_write(dev_priv, PF_WIN_POS(pipe), 0);
7079                 intel_de_write(dev_priv, PF_WIN_SZ(pipe), 0);
7080         }
7081 }
7082
7083 static void ilk_crtc_disable(struct intel_atomic_state *state,
7084                              struct intel_crtc *crtc)
7085 {
7086         const struct intel_crtc_state *old_crtc_state =
7087                 intel_atomic_get_old_crtc_state(state, crtc);
7088         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7089         enum pipe pipe = crtc->pipe;
7090
7091         /*
7092          * Sometimes spurious CPU pipe underruns happen when the
7093          * pipe is already disabled, but FDI RX/TX is still enabled.
7094          * Happens at least with VGA+HDMI cloning. Suppress them.
7095          */
7096         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
7097         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
7098
7099         intel_encoders_disable(state, crtc);
7100
7101         intel_crtc_vblank_off(old_crtc_state);
7102
7103         intel_disable_pipe(old_crtc_state);
7104
7105         ilk_pfit_disable(old_crtc_state);
7106
7107         if (old_crtc_state->has_pch_encoder)
7108                 ilk_fdi_disable(crtc);
7109
7110         intel_encoders_post_disable(state, crtc);
7111
7112         if (old_crtc_state->has_pch_encoder) {
7113                 ilk_disable_pch_transcoder(dev_priv, pipe);
7114
7115                 if (HAS_PCH_CPT(dev_priv)) {
7116                         i915_reg_t reg;
7117                         u32 temp;
7118
7119                         /* disable TRANS_DP_CTL */
7120                         reg = TRANS_DP_CTL(pipe);
7121                         temp = intel_de_read(dev_priv, reg);
7122                         temp &= ~(TRANS_DP_OUTPUT_ENABLE |
7123                                   TRANS_DP_PORT_SEL_MASK);
7124                         temp |= TRANS_DP_PORT_SEL_NONE;
7125                         intel_de_write(dev_priv, reg, temp);
7126
7127                         /* disable DPLL_SEL */
7128                         temp = intel_de_read(dev_priv, PCH_DPLL_SEL);
7129                         temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
7130                         intel_de_write(dev_priv, PCH_DPLL_SEL, temp);
7131                 }
7132
7133                 ilk_fdi_pll_disable(crtc);
7134         }
7135
7136         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
7137         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
7138 }
7139
7140 static void hsw_crtc_disable(struct intel_atomic_state *state,
7141                              struct intel_crtc *crtc)
7142 {
7143         /*
7144          * FIXME collapse everything to one hook.
7145          * Need care with mst->ddi interactions.
7146          */
7147         intel_encoders_disable(state, crtc);
7148         intel_encoders_post_disable(state, crtc);
7149 }
7150
7151 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
7152 {
7153         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7154         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7155
7156         if (!crtc_state->gmch_pfit.control)
7157                 return;
7158
7159         /*
7160          * The panel fitter should only be adjusted whilst the pipe is disabled,
7161          * according to register description and PRM.
7162          */
7163         WARN_ON(intel_de_read(dev_priv, PFIT_CONTROL) & PFIT_ENABLE);
7164         assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
7165
7166         intel_de_write(dev_priv, PFIT_PGM_RATIOS,
7167                        crtc_state->gmch_pfit.pgm_ratios);
7168         intel_de_write(dev_priv, PFIT_CONTROL, crtc_state->gmch_pfit.control);
7169
7170         /* Border color in case we don't scale up to the full screen. Black by
7171          * default, change to something else for debugging. */
7172         intel_de_write(dev_priv, BCLRPAT(crtc->pipe), 0);
7173 }
7174
7175 bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
7176 {
7177         if (phy == PHY_NONE)
7178                 return false;
7179
7180         if (IS_ELKHARTLAKE(dev_priv))
7181                 return phy <= PHY_C;
7182
7183         if (INTEL_GEN(dev_priv) >= 11)
7184                 return phy <= PHY_B;
7185
7186         return false;
7187 }
7188
7189 bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
7190 {
7191         if (INTEL_GEN(dev_priv) >= 12)
7192                 return phy >= PHY_D && phy <= PHY_I;
7193
7194         if (INTEL_GEN(dev_priv) >= 11 && !IS_ELKHARTLAKE(dev_priv))
7195                 return phy >= PHY_C && phy <= PHY_F;
7196
7197         return false;
7198 }
7199
7200 enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
7201 {
7202         if (IS_ELKHARTLAKE(i915) && port == PORT_D)
7203                 return PHY_A;
7204
7205         return (enum phy)port;
7206 }
7207
7208 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
7209 {
7210         if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port)))
7211                 return PORT_TC_NONE;
7212
7213         if (INTEL_GEN(dev_priv) >= 12)
7214                 return port - PORT_D;
7215
7216         return port - PORT_C;
7217 }
7218
7219 enum intel_display_power_domain intel_port_to_power_domain(enum port port)
7220 {
7221         switch (port) {
7222         case PORT_A:
7223                 return POWER_DOMAIN_PORT_DDI_A_LANES;
7224         case PORT_B:
7225                 return POWER_DOMAIN_PORT_DDI_B_LANES;
7226         case PORT_C:
7227                 return POWER_DOMAIN_PORT_DDI_C_LANES;
7228         case PORT_D:
7229                 return POWER_DOMAIN_PORT_DDI_D_LANES;
7230         case PORT_E:
7231                 return POWER_DOMAIN_PORT_DDI_E_LANES;
7232         case PORT_F:
7233                 return POWER_DOMAIN_PORT_DDI_F_LANES;
7234         case PORT_G:
7235                 return POWER_DOMAIN_PORT_DDI_G_LANES;
7236         default:
7237                 MISSING_CASE(port);
7238                 return POWER_DOMAIN_PORT_OTHER;
7239         }
7240 }
7241
7242 enum intel_display_power_domain
7243 intel_aux_power_domain(struct intel_digital_port *dig_port)
7244 {
7245         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
7246         enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
7247
7248         if (intel_phy_is_tc(dev_priv, phy) &&
7249             dig_port->tc_mode == TC_PORT_TBT_ALT) {
7250                 switch (dig_port->aux_ch) {
7251                 case AUX_CH_C:
7252                         return POWER_DOMAIN_AUX_C_TBT;
7253                 case AUX_CH_D:
7254                         return POWER_DOMAIN_AUX_D_TBT;
7255                 case AUX_CH_E:
7256                         return POWER_DOMAIN_AUX_E_TBT;
7257                 case AUX_CH_F:
7258                         return POWER_DOMAIN_AUX_F_TBT;
7259                 case AUX_CH_G:
7260                         return POWER_DOMAIN_AUX_G_TBT;
7261                 default:
7262                         MISSING_CASE(dig_port->aux_ch);
7263                         return POWER_DOMAIN_AUX_C_TBT;
7264                 }
7265         }
7266
7267         switch (dig_port->aux_ch) {
7268         case AUX_CH_A:
7269                 return POWER_DOMAIN_AUX_A;
7270         case AUX_CH_B:
7271                 return POWER_DOMAIN_AUX_B;
7272         case AUX_CH_C:
7273                 return POWER_DOMAIN_AUX_C;
7274         case AUX_CH_D:
7275                 return POWER_DOMAIN_AUX_D;
7276         case AUX_CH_E:
7277                 return POWER_DOMAIN_AUX_E;
7278         case AUX_CH_F:
7279                 return POWER_DOMAIN_AUX_F;
7280         case AUX_CH_G:
7281                 return POWER_DOMAIN_AUX_G;
7282         default:
7283                 MISSING_CASE(dig_port->aux_ch);
7284                 return POWER_DOMAIN_AUX_A;
7285         }
7286 }
7287
7288 static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state)
7289 {
7290         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7291         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7292         struct drm_encoder *encoder;
7293         enum pipe pipe = crtc->pipe;
7294         u64 mask;
7295         enum transcoder transcoder = crtc_state->cpu_transcoder;
7296
7297         if (!crtc_state->hw.active)
7298                 return 0;
7299
7300         mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
7301         mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder));
7302         if (crtc_state->pch_pfit.enabled ||
7303             crtc_state->pch_pfit.force_thru)
7304                 mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
7305
7306         drm_for_each_encoder_mask(encoder, &dev_priv->drm,
7307                                   crtc_state->uapi.encoder_mask) {
7308                 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
7309
7310                 mask |= BIT_ULL(intel_encoder->power_domain);
7311         }
7312
7313         if (HAS_DDI(dev_priv) && crtc_state->has_audio)
7314                 mask |= BIT_ULL(POWER_DOMAIN_AUDIO);
7315
7316         if (crtc_state->shared_dpll)
7317                 mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE);
7318
7319         return mask;
7320 }
7321
7322 static u64
7323 modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state)
7324 {
7325         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7326         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7327         enum intel_display_power_domain domain;
7328         u64 domains, new_domains, old_domains;
7329
7330         old_domains = crtc->enabled_power_domains;
7331         crtc->enabled_power_domains = new_domains =
7332                 get_crtc_power_domains(crtc_state);
7333
7334         domains = new_domains & ~old_domains;
7335
7336         for_each_power_domain(domain, domains)
7337                 intel_display_power_get(dev_priv, domain);
7338
7339         return old_domains & ~new_domains;
7340 }
7341
7342 static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
7343                                       u64 domains)
7344 {
7345         enum intel_display_power_domain domain;
7346
7347         for_each_power_domain(domain, domains)
7348                 intel_display_power_put_unchecked(dev_priv, domain);
7349 }
7350
7351 static void valleyview_crtc_enable(struct intel_atomic_state *state,
7352                                    struct intel_crtc *crtc)
7353 {
7354         const struct intel_crtc_state *new_crtc_state =
7355                 intel_atomic_get_new_crtc_state(state, crtc);
7356         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7357         enum pipe pipe = crtc->pipe;
7358
7359         if (WARN_ON(crtc->active))
7360                 return;
7361
7362         if (intel_crtc_has_dp_encoder(new_crtc_state))
7363                 intel_dp_set_m_n(new_crtc_state, M1_N1);
7364
7365         intel_set_pipe_timings(new_crtc_state);
7366         intel_set_pipe_src_size(new_crtc_state);
7367
7368         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
7369                 intel_de_write(dev_priv, CHV_BLEND(pipe), CHV_BLEND_LEGACY);
7370                 intel_de_write(dev_priv, CHV_CANVAS(pipe), 0);
7371         }
7372
7373         i9xx_set_pipeconf(new_crtc_state);
7374
7375         crtc->active = true;
7376
7377         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
7378
7379         intel_encoders_pre_pll_enable(state, crtc);
7380
7381         if (IS_CHERRYVIEW(dev_priv)) {
7382                 chv_prepare_pll(crtc, new_crtc_state);
7383                 chv_enable_pll(crtc, new_crtc_state);
7384         } else {
7385                 vlv_prepare_pll(crtc, new_crtc_state);
7386                 vlv_enable_pll(crtc, new_crtc_state);
7387         }
7388
7389         intel_encoders_pre_enable(state, crtc);
7390
7391         i9xx_pfit_enable(new_crtc_state);
7392
7393         intel_color_load_luts(new_crtc_state);
7394         intel_color_commit(new_crtc_state);
7395         /* update DSPCNTR to configure gamma for pipe bottom color */
7396         intel_disable_primary_plane(new_crtc_state);
7397
7398         dev_priv->display.initial_watermarks(state, crtc);
7399         intel_enable_pipe(new_crtc_state);
7400
7401         intel_crtc_vblank_on(new_crtc_state);
7402
7403         intel_encoders_enable(state, crtc);
7404 }
7405
7406 static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state)
7407 {
7408         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7409         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7410
7411         intel_de_write(dev_priv, FP0(crtc->pipe),
7412                        crtc_state->dpll_hw_state.fp0);
7413         intel_de_write(dev_priv, FP1(crtc->pipe),
7414                        crtc_state->dpll_hw_state.fp1);
7415 }
7416
7417 static void i9xx_crtc_enable(struct intel_atomic_state *state,
7418                              struct intel_crtc *crtc)
7419 {
7420         const struct intel_crtc_state *new_crtc_state =
7421                 intel_atomic_get_new_crtc_state(state, crtc);
7422         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7423         enum pipe pipe = crtc->pipe;
7424
7425         if (WARN_ON(crtc->active))
7426                 return;
7427
7428         i9xx_set_pll_dividers(new_crtc_state);
7429
7430         if (intel_crtc_has_dp_encoder(new_crtc_state))
7431                 intel_dp_set_m_n(new_crtc_state, M1_N1);
7432
7433         intel_set_pipe_timings(new_crtc_state);
7434         intel_set_pipe_src_size(new_crtc_state);
7435
7436         i9xx_set_pipeconf(new_crtc_state);
7437
7438         crtc->active = true;
7439
7440         if (!IS_GEN(dev_priv, 2))
7441                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
7442
7443         intel_encoders_pre_enable(state, crtc);
7444
7445         i9xx_enable_pll(crtc, new_crtc_state);
7446
7447         i9xx_pfit_enable(new_crtc_state);
7448
7449         intel_color_load_luts(new_crtc_state);
7450         intel_color_commit(new_crtc_state);
7451         /* update DSPCNTR to configure gamma for pipe bottom color */
7452         intel_disable_primary_plane(new_crtc_state);
7453
7454         if (dev_priv->display.initial_watermarks)
7455                 dev_priv->display.initial_watermarks(state, crtc);
7456         else
7457                 intel_update_watermarks(crtc);
7458         intel_enable_pipe(new_crtc_state);
7459
7460         intel_crtc_vblank_on(new_crtc_state);
7461
7462         intel_encoders_enable(state, crtc);
7463 }
7464
7465 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
7466 {
7467         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
7468         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7469
7470         if (!old_crtc_state->gmch_pfit.control)
7471                 return;
7472
7473         assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder);
7474
7475         drm_dbg_kms(&dev_priv->drm, "disabling pfit, current: 0x%08x\n",
7476                     intel_de_read(dev_priv, PFIT_CONTROL));
7477         intel_de_write(dev_priv, PFIT_CONTROL, 0);
7478 }
7479
7480 static void i9xx_crtc_disable(struct intel_atomic_state *state,
7481                               struct intel_crtc *crtc)
7482 {
7483         struct intel_crtc_state *old_crtc_state =
7484                 intel_atomic_get_old_crtc_state(state, crtc);
7485         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7486         enum pipe pipe = crtc->pipe;
7487
7488         /*
7489          * On gen2 planes are double buffered but the pipe isn't, so we must
7490          * wait for planes to fully turn off before disabling the pipe.
7491          */
7492         if (IS_GEN(dev_priv, 2))
7493                 intel_wait_for_vblank(dev_priv, pipe);
7494
7495         intel_encoders_disable(state, crtc);
7496
7497         intel_crtc_vblank_off(old_crtc_state);
7498
7499         intel_disable_pipe(old_crtc_state);
7500
7501         i9xx_pfit_disable(old_crtc_state);
7502
7503         intel_encoders_post_disable(state, crtc);
7504
7505         if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
7506                 if (IS_CHERRYVIEW(dev_priv))
7507                         chv_disable_pll(dev_priv, pipe);
7508                 else if (IS_VALLEYVIEW(dev_priv))
7509                         vlv_disable_pll(dev_priv, pipe);
7510                 else
7511                         i9xx_disable_pll(old_crtc_state);
7512         }
7513
7514         intel_encoders_post_pll_disable(state, crtc);
7515
7516         if (!IS_GEN(dev_priv, 2))
7517                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
7518
7519         if (!dev_priv->display.initial_watermarks)
7520                 intel_update_watermarks(crtc);
7521
7522         /* clock the pipe down to 640x480@60 to potentially save power */
7523         if (IS_I830(dev_priv))
7524                 i830_enable_pipe(dev_priv, pipe);
7525 }
7526
7527 static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
7528                                         struct drm_modeset_acquire_ctx *ctx)
7529 {
7530         struct intel_encoder *encoder;
7531         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7532         struct intel_bw_state *bw_state =
7533                 to_intel_bw_state(dev_priv->bw_obj.state);
7534         struct intel_cdclk_state *cdclk_state =
7535                 to_intel_cdclk_state(dev_priv->cdclk.obj.state);
7536         struct intel_crtc_state *crtc_state =
7537                 to_intel_crtc_state(crtc->base.state);
7538         enum intel_display_power_domain domain;
7539         struct intel_plane *plane;
7540         struct drm_atomic_state *state;
7541         struct intel_crtc_state *temp_crtc_state;
7542         enum pipe pipe = crtc->pipe;
7543         u64 domains;
7544         int ret;
7545
7546         if (!crtc_state->hw.active)
7547                 return;
7548
7549         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
7550                 const struct intel_plane_state *plane_state =
7551                         to_intel_plane_state(plane->base.state);
7552
7553                 if (plane_state->uapi.visible)
7554                         intel_plane_disable_noatomic(crtc, plane);
7555         }
7556
7557         state = drm_atomic_state_alloc(&dev_priv->drm);
7558         if (!state) {
7559                 drm_dbg_kms(&dev_priv->drm,
7560                             "failed to disable [CRTC:%d:%s], out of memory",
7561                             crtc->base.base.id, crtc->base.name);
7562                 return;
7563         }
7564
7565         state->acquire_ctx = ctx;
7566
7567         /* Everything's already locked, -EDEADLK can't happen. */
7568         temp_crtc_state = intel_atomic_get_crtc_state(state, crtc);
7569         ret = drm_atomic_add_affected_connectors(state, &crtc->base);
7570
7571         WARN_ON(IS_ERR(temp_crtc_state) || ret);
7572
7573         dev_priv->display.crtc_disable(to_intel_atomic_state(state), crtc);
7574
7575         drm_atomic_state_put(state);
7576
7577         drm_dbg_kms(&dev_priv->drm,
7578                     "[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
7579                     crtc->base.base.id, crtc->base.name);
7580
7581         crtc->active = false;
7582         crtc->base.enabled = false;
7583
7584         WARN_ON(drm_atomic_set_mode_for_crtc(&crtc_state->uapi, NULL) < 0);
7585         crtc_state->uapi.active = false;
7586         crtc_state->uapi.connector_mask = 0;
7587         crtc_state->uapi.encoder_mask = 0;
7588         intel_crtc_free_hw_state(crtc_state);
7589         memset(&crtc_state->hw, 0, sizeof(crtc_state->hw));
7590
7591         for_each_encoder_on_crtc(&dev_priv->drm, &crtc->base, encoder)
7592                 encoder->base.crtc = NULL;
7593
7594         intel_fbc_disable(crtc);
7595         intel_update_watermarks(crtc);
7596         intel_disable_shared_dpll(crtc_state);
7597
7598         domains = crtc->enabled_power_domains;
7599         for_each_power_domain(domain, domains)
7600                 intel_display_power_put_unchecked(dev_priv, domain);
7601         crtc->enabled_power_domains = 0;
7602
7603         dev_priv->active_pipes &= ~BIT(pipe);
7604         cdclk_state->min_cdclk[pipe] = 0;
7605         cdclk_state->min_voltage_level[pipe] = 0;
7606
7607         bw_state->data_rate[pipe] = 0;
7608         bw_state->num_active_planes[pipe] = 0;
7609 }
7610
7611 /*
7612  * turn all crtc's off, but do not adjust state
7613  * This has to be paired with a call to intel_modeset_setup_hw_state.
7614  */
7615 int intel_display_suspend(struct drm_device *dev)
7616 {
7617         struct drm_i915_private *dev_priv = to_i915(dev);
7618         struct drm_atomic_state *state;
7619         int ret;
7620
7621         state = drm_atomic_helper_suspend(dev);
7622         ret = PTR_ERR_OR_ZERO(state);
7623         if (ret)
7624                 drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
7625                         ret);
7626         else
7627                 dev_priv->modeset_restore_state = state;
7628         return ret;
7629 }
7630
7631 void intel_encoder_destroy(struct drm_encoder *encoder)
7632 {
7633         struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
7634
7635         drm_encoder_cleanup(encoder);
7636         kfree(intel_encoder);
7637 }
7638
7639 /* Cross check the actual hw state with our own modeset state tracking (and it's
7640  * internal consistency). */
7641 static void intel_connector_verify_state(struct intel_crtc_state *crtc_state,
7642                                          struct drm_connector_state *conn_state)
7643 {
7644         struct intel_connector *connector = to_intel_connector(conn_state->connector);
7645         struct drm_i915_private *i915 = to_i915(connector->base.dev);
7646
7647         drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
7648                     connector->base.base.id, connector->base.name);
7649
7650         if (connector->get_hw_state(connector)) {
7651                 struct intel_encoder *encoder = intel_attached_encoder(connector);
7652
7653                 I915_STATE_WARN(!crtc_state,
7654                          "connector enabled without attached crtc\n");
7655
7656                 if (!crtc_state)
7657                         return;
7658
7659                 I915_STATE_WARN(!crtc_state->hw.active,
7660                                 "connector is active, but attached crtc isn't\n");
7661
7662                 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
7663                         return;
7664
7665                 I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
7666                         "atomic encoder doesn't match attached encoder\n");
7667
7668                 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
7669                         "attached encoder crtc differs from connector crtc\n");
7670         } else {
7671                 I915_STATE_WARN(crtc_state && crtc_state->hw.active,
7672                                 "attached crtc is active, but connector isn't\n");
7673                 I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
7674                         "best encoder set without crtc!\n");
7675         }
7676 }
7677
7678 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
7679 {
7680         if (crtc_state->hw.enable && crtc_state->has_pch_encoder)
7681                 return crtc_state->fdi_lanes;
7682
7683         return 0;
7684 }
7685
7686 static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
7687                                struct intel_crtc_state *pipe_config)
7688 {
7689         struct drm_i915_private *dev_priv = to_i915(dev);
7690         struct drm_atomic_state *state = pipe_config->uapi.state;
7691         struct intel_crtc *other_crtc;
7692         struct intel_crtc_state *other_crtc_state;
7693
7694         drm_dbg_kms(&dev_priv->drm,
7695                     "checking fdi config on pipe %c, lanes %i\n",
7696                     pipe_name(pipe), pipe_config->fdi_lanes);
7697         if (pipe_config->fdi_lanes > 4) {
7698                 drm_dbg_kms(&dev_priv->drm,
7699                             "invalid fdi lane config on pipe %c: %i lanes\n",
7700                             pipe_name(pipe), pipe_config->fdi_lanes);
7701                 return -EINVAL;
7702         }
7703
7704         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
7705                 if (pipe_config->fdi_lanes > 2) {
7706                         drm_dbg_kms(&dev_priv->drm,
7707                                     "only 2 lanes on haswell, required: %i lanes\n",
7708                                     pipe_config->fdi_lanes);
7709                         return -EINVAL;
7710                 } else {
7711                         return 0;
7712                 }
7713         }
7714
7715         if (INTEL_NUM_PIPES(dev_priv) == 2)
7716                 return 0;
7717
7718         /* Ivybridge 3 pipe is really complicated */
7719         switch (pipe) {
7720         case PIPE_A:
7721                 return 0;
7722         case PIPE_B:
7723                 if (pipe_config->fdi_lanes <= 2)
7724                         return 0;
7725
7726                 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C);
7727                 other_crtc_state =
7728                         intel_atomic_get_crtc_state(state, other_crtc);
7729                 if (IS_ERR(other_crtc_state))
7730                         return PTR_ERR(other_crtc_state);
7731
7732                 if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
7733                         drm_dbg_kms(&dev_priv->drm,
7734                                     "invalid shared fdi lane config on pipe %c: %i lanes\n",
7735                                     pipe_name(pipe), pipe_config->fdi_lanes);
7736                         return -EINVAL;
7737                 }
7738                 return 0;
7739         case PIPE_C:
7740                 if (pipe_config->fdi_lanes > 2) {
7741                         drm_dbg_kms(&dev_priv->drm,
7742                                     "only 2 lanes on pipe %c: required %i lanes\n",
7743                                     pipe_name(pipe), pipe_config->fdi_lanes);
7744                         return -EINVAL;
7745                 }
7746
7747                 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B);
7748                 other_crtc_state =
7749                         intel_atomic_get_crtc_state(state, other_crtc);
7750                 if (IS_ERR(other_crtc_state))
7751                         return PTR_ERR(other_crtc_state);
7752
7753                 if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
7754                         drm_dbg_kms(&dev_priv->drm,
7755                                     "fdi link B uses too many lanes to enable link C\n");
7756                         return -EINVAL;
7757                 }
7758                 return 0;
7759         default:
7760                 BUG();
7761         }
7762 }
7763
7764 #define RETRY 1
7765 static int ilk_fdi_compute_config(struct intel_crtc *intel_crtc,
7766                                   struct intel_crtc_state *pipe_config)
7767 {
7768         struct drm_device *dev = intel_crtc->base.dev;
7769         struct drm_i915_private *i915 = to_i915(dev);
7770         const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
7771         int lane, link_bw, fdi_dotclock, ret;
7772         bool needs_recompute = false;
7773
7774 retry:
7775         /* FDI is a binary signal running at ~2.7GHz, encoding
7776          * each output octet as 10 bits. The actual frequency
7777          * is stored as a divider into a 100MHz clock, and the
7778          * mode pixel clock is stored in units of 1KHz.
7779          * Hence the bw of each lane in terms of the mode signal
7780          * is:
7781          */
7782         link_bw = intel_fdi_link_freq(i915, pipe_config);
7783
7784         fdi_dotclock = adjusted_mode->crtc_clock;
7785
7786         lane = ilk_get_lanes_required(fdi_dotclock, link_bw,
7787                                       pipe_config->pipe_bpp);
7788
7789         pipe_config->fdi_lanes = lane;
7790
7791         intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
7792                                link_bw, &pipe_config->fdi_m_n, false, false);
7793
7794         ret = ilk_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
7795         if (ret == -EDEADLK)
7796                 return ret;
7797
7798         if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
7799                 pipe_config->pipe_bpp -= 2*3;
7800                 drm_dbg_kms(&i915->drm,
7801                             "fdi link bw constraint, reducing pipe bpp to %i\n",
7802                             pipe_config->pipe_bpp);
7803                 needs_recompute = true;
7804                 pipe_config->bw_constrained = true;
7805
7806                 goto retry;
7807         }
7808
7809         if (needs_recompute)
7810                 return RETRY;
7811
7812         return ret;
7813 }
7814
7815 bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
7816 {
7817         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7818         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7819
7820         /* IPS only exists on ULT machines and is tied to pipe A. */
7821         if (!hsw_crtc_supports_ips(crtc))
7822                 return false;
7823
7824         if (!i915_modparams.enable_ips)
7825                 return false;
7826
7827         if (crtc_state->pipe_bpp > 24)
7828                 return false;
7829
7830         /*
7831          * We compare against max which means we must take
7832          * the increased cdclk requirement into account when
7833          * calculating the new cdclk.
7834          *
7835          * Should measure whether using a lower cdclk w/o IPS
7836          */
7837         if (IS_BROADWELL(dev_priv) &&
7838             crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100)
7839                 return false;
7840
7841         return true;
7842 }
7843
7844 static int hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
7845 {
7846         struct drm_i915_private *dev_priv =
7847                 to_i915(crtc_state->uapi.crtc->dev);
7848         struct intel_atomic_state *state =
7849                 to_intel_atomic_state(crtc_state->uapi.state);
7850
7851         crtc_state->ips_enabled = false;
7852
7853         if (!hsw_crtc_state_ips_capable(crtc_state))
7854                 return 0;
7855
7856         /*
7857          * When IPS gets enabled, the pipe CRC changes. Since IPS gets
7858          * enabled and disabled dynamically based on package C states,
7859          * user space can't make reliable use of the CRCs, so let's just
7860          * completely disable it.
7861          */
7862         if (crtc_state->crc_enabled)
7863                 return 0;
7864
7865         /* IPS should be fine as long as at least one plane is enabled. */
7866         if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
7867                 return 0;
7868
7869         if (IS_BROADWELL(dev_priv)) {
7870                 const struct intel_cdclk_state *cdclk_state;
7871
7872                 cdclk_state = intel_atomic_get_cdclk_state(state);
7873                 if (IS_ERR(cdclk_state))
7874                         return PTR_ERR(cdclk_state);
7875
7876                 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
7877                 if (crtc_state->pixel_rate > cdclk_state->logical.cdclk * 95 / 100)
7878                         return 0;
7879         }
7880
7881         crtc_state->ips_enabled = true;
7882
7883         return 0;
7884 }
7885
7886 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
7887 {
7888         const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7889
7890         /* GDG double wide on either pipe, otherwise pipe A only */
7891         return INTEL_GEN(dev_priv) < 4 &&
7892                 (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
7893 }
7894
7895 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
7896 {
7897         u32 pixel_rate;
7898
7899         pixel_rate = pipe_config->hw.adjusted_mode.crtc_clock;
7900
7901         /*
7902          * We only use IF-ID interlacing. If we ever use
7903          * PF-ID we'll need to adjust the pixel_rate here.
7904          */
7905
7906         if (pipe_config->pch_pfit.enabled) {
7907                 u64 pipe_w, pipe_h, pfit_w, pfit_h;
7908                 u32 pfit_size = pipe_config->pch_pfit.size;
7909
7910                 pipe_w = pipe_config->pipe_src_w;
7911                 pipe_h = pipe_config->pipe_src_h;
7912
7913                 pfit_w = (pfit_size >> 16) & 0xFFFF;
7914                 pfit_h = pfit_size & 0xFFFF;
7915                 if (pipe_w < pfit_w)
7916                         pipe_w = pfit_w;
7917                 if (pipe_h < pfit_h)
7918                         pipe_h = pfit_h;
7919
7920                 if (WARN_ON(!pfit_w || !pfit_h))
7921                         return pixel_rate;
7922
7923                 pixel_rate = div_u64(mul_u32_u32(pixel_rate, pipe_w * pipe_h),
7924                                      pfit_w * pfit_h);
7925         }
7926
7927         return pixel_rate;
7928 }
7929
7930 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
7931 {
7932         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
7933
7934         if (HAS_GMCH(dev_priv))
7935                 /* FIXME calculate proper pipe pixel rate for GMCH pfit */
7936                 crtc_state->pixel_rate =
7937                         crtc_state->hw.adjusted_mode.crtc_clock;
7938         else
7939                 crtc_state->pixel_rate =
7940                         ilk_pipe_pixel_rate(crtc_state);
7941 }
7942
7943 static int intel_crtc_compute_config(struct intel_crtc *crtc,
7944                                      struct intel_crtc_state *pipe_config)
7945 {
7946         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7947         const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
7948         int clock_limit = dev_priv->max_dotclk_freq;
7949
7950         if (INTEL_GEN(dev_priv) < 4) {
7951                 clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
7952
7953                 /*
7954                  * Enable double wide mode when the dot clock
7955                  * is > 90% of the (display) core speed.
7956                  */
7957                 if (intel_crtc_supports_double_wide(crtc) &&
7958                     adjusted_mode->crtc_clock > clock_limit) {
7959                         clock_limit = dev_priv->max_dotclk_freq;
7960                         pipe_config->double_wide = true;
7961                 }
7962         }
7963
7964         if (adjusted_mode->crtc_clock > clock_limit) {
7965                 drm_dbg_kms(&dev_priv->drm,
7966                             "requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
7967                             adjusted_mode->crtc_clock, clock_limit,
7968                             yesno(pipe_config->double_wide));
7969                 return -EINVAL;
7970         }
7971
7972         if ((pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
7973              pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) &&
7974              pipe_config->hw.ctm) {
7975                 /*
7976                  * There is only one pipe CSC unit per pipe, and we need that
7977                  * for output conversion from RGB->YCBCR. So if CTM is already
7978                  * applied we can't support YCBCR420 output.
7979                  */
7980                 drm_dbg_kms(&dev_priv->drm,
7981                             "YCBCR420 and CTM together are not possible\n");
7982                 return -EINVAL;
7983         }
7984
7985         /*
7986          * Pipe horizontal size must be even in:
7987          * - DVO ganged mode
7988          * - LVDS dual channel mode
7989          * - Double wide pipe
7990          */
7991         if (pipe_config->pipe_src_w & 1) {
7992                 if (pipe_config->double_wide) {
7993                         drm_dbg_kms(&dev_priv->drm,
7994                                     "Odd pipe source width not supported with double wide pipe\n");
7995                         return -EINVAL;
7996                 }
7997
7998                 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
7999                     intel_is_dual_link_lvds(dev_priv)) {
8000                         drm_dbg_kms(&dev_priv->drm,
8001                                     "Odd pipe source width not supported with dual link LVDS\n");
8002                         return -EINVAL;
8003                 }
8004         }
8005
8006         /* Cantiga+ cannot handle modes with a hsync front porch of 0.
8007          * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
8008          */
8009         if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) &&
8010                 adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
8011                 return -EINVAL;
8012
8013         intel_crtc_compute_pixel_rate(pipe_config);
8014
8015         if (pipe_config->has_pch_encoder)
8016                 return ilk_fdi_compute_config(crtc, pipe_config);
8017
8018         return 0;
8019 }
8020
8021 static void
8022 intel_reduce_m_n_ratio(u32 *num, u32 *den)
8023 {
8024         while (*num > DATA_LINK_M_N_MASK ||
8025                *den > DATA_LINK_M_N_MASK) {
8026                 *num >>= 1;
8027                 *den >>= 1;
8028         }
8029 }
8030
8031 static void compute_m_n(unsigned int m, unsigned int n,
8032                         u32 *ret_m, u32 *ret_n,
8033                         bool constant_n)
8034 {
8035         /*
8036          * Several DP dongles in particular seem to be fussy about
8037          * too large link M/N values. Give N value as 0x8000 that
8038          * should be acceptable by specific devices. 0x8000 is the
8039          * specified fixed N value for asynchronous clock mode,
8040          * which the devices expect also in synchronous clock mode.
8041          */
8042         if (constant_n)
8043                 *ret_n = 0x8000;
8044         else
8045                 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
8046
8047         *ret_m = div_u64(mul_u32_u32(m, *ret_n), n);
8048         intel_reduce_m_n_ratio(ret_m, ret_n);
8049 }
8050
8051 void
8052 intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
8053                        int pixel_clock, int link_clock,
8054                        struct intel_link_m_n *m_n,
8055                        bool constant_n, bool fec_enable)
8056 {
8057         u32 data_clock = bits_per_pixel * pixel_clock;
8058
8059         if (fec_enable)
8060                 data_clock = intel_dp_mode_to_fec_clock(data_clock);
8061
8062         m_n->tu = 64;
8063         compute_m_n(data_clock,
8064                     link_clock * nlanes * 8,
8065                     &m_n->gmch_m, &m_n->gmch_n,
8066                     constant_n);
8067
8068         compute_m_n(pixel_clock, link_clock,
8069                     &m_n->link_m, &m_n->link_n,
8070                     constant_n);
8071 }
8072
8073 static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
8074 {
8075         /*
8076          * There may be no VBT; and if the BIOS enabled SSC we can
8077          * just keep using it to avoid unnecessary flicker.  Whereas if the
8078          * BIOS isn't using it, don't assume it will work even if the VBT
8079          * indicates as much.
8080          */
8081         if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
8082                 bool bios_lvds_use_ssc = intel_de_read(dev_priv,
8083                                                        PCH_DREF_CONTROL) &
8084                         DREF_SSC1_ENABLE;
8085
8086                 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
8087                         drm_dbg_kms(&dev_priv->drm,
8088                                     "SSC %s by BIOS, overriding VBT which says %s\n",
8089                                     enableddisabled(bios_lvds_use_ssc),
8090                                     enableddisabled(dev_priv->vbt.lvds_use_ssc));
8091                         dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
8092                 }
8093         }
8094 }
8095
8096 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
8097 {
8098         if (i915_modparams.panel_use_ssc >= 0)
8099                 return i915_modparams.panel_use_ssc != 0;
8100         return dev_priv->vbt.lvds_use_ssc
8101                 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
8102 }
8103
8104 static u32 pnv_dpll_compute_fp(struct dpll *dpll)
8105 {
8106         return (1 << dpll->n) << 16 | dpll->m2;
8107 }
8108
8109 static u32 i9xx_dpll_compute_fp(struct dpll *dpll)
8110 {
8111         return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
8112 }
8113
8114 static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
8115                                      struct intel_crtc_state *crtc_state,
8116                                      struct dpll *reduced_clock)
8117 {
8118         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8119         u32 fp, fp2 = 0;
8120
8121         if (IS_PINEVIEW(dev_priv)) {
8122                 fp = pnv_dpll_compute_fp(&crtc_state->dpll);
8123                 if (reduced_clock)
8124                         fp2 = pnv_dpll_compute_fp(reduced_clock);
8125         } else {
8126                 fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
8127                 if (reduced_clock)
8128                         fp2 = i9xx_dpll_compute_fp(reduced_clock);
8129         }
8130
8131         crtc_state->dpll_hw_state.fp0 = fp;
8132
8133         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
8134             reduced_clock) {
8135                 crtc_state->dpll_hw_state.fp1 = fp2;
8136         } else {
8137                 crtc_state->dpll_hw_state.fp1 = fp;
8138         }
8139 }
8140
8141 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
8142                 pipe)
8143 {
8144         u32 reg_val;
8145
8146         /*
8147          * PLLB opamp always calibrates to max value of 0x3f, force enable it
8148          * and set it to a reasonable value instead.
8149          */
8150         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
8151         reg_val &= 0xffffff00;
8152         reg_val |= 0x00000030;
8153         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
8154
8155         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
8156         reg_val &= 0x00ffffff;
8157         reg_val |= 0x8c000000;
8158         vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
8159
8160         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
8161         reg_val &= 0xffffff00;
8162         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
8163
8164         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
8165         reg_val &= 0x00ffffff;
8166         reg_val |= 0xb0000000;
8167         vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
8168 }
8169
8170 static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
8171                                          const struct intel_link_m_n *m_n)
8172 {
8173         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8174         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8175         enum pipe pipe = crtc->pipe;
8176
8177         intel_de_write(dev_priv, PCH_TRANS_DATA_M1(pipe),
8178                        TU_SIZE(m_n->tu) | m_n->gmch_m);
8179         intel_de_write(dev_priv, PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
8180         intel_de_write(dev_priv, PCH_TRANS_LINK_M1(pipe), m_n->link_m);
8181         intel_de_write(dev_priv, PCH_TRANS_LINK_N1(pipe), m_n->link_n);
8182 }
8183
8184 static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
8185                                  enum transcoder transcoder)
8186 {
8187         if (IS_HASWELL(dev_priv))
8188                 return transcoder == TRANSCODER_EDP;
8189
8190         /*
8191          * Strictly speaking some registers are available before
8192          * gen7, but we only support DRRS on gen7+
8193          */
8194         return IS_GEN(dev_priv, 7) || IS_CHERRYVIEW(dev_priv);
8195 }
8196
8197 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
8198                                          const struct intel_link_m_n *m_n,
8199                                          const struct intel_link_m_n *m2_n2)
8200 {
8201         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8202         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8203         enum pipe pipe = crtc->pipe;
8204         enum transcoder transcoder = crtc_state->cpu_transcoder;
8205
8206         if (INTEL_GEN(dev_priv) >= 5) {
8207                 intel_de_write(dev_priv, PIPE_DATA_M1(transcoder),
8208                                TU_SIZE(m_n->tu) | m_n->gmch_m);
8209                 intel_de_write(dev_priv, PIPE_DATA_N1(transcoder),
8210                                m_n->gmch_n);
8211                 intel_de_write(dev_priv, PIPE_LINK_M1(transcoder),
8212                                m_n->link_m);
8213                 intel_de_write(dev_priv, PIPE_LINK_N1(transcoder),
8214                                m_n->link_n);
8215                 /*
8216                  *  M2_N2 registers are set only if DRRS is supported
8217                  * (to make sure the registers are not unnecessarily accessed).
8218                  */
8219                 if (m2_n2 && crtc_state->has_drrs &&
8220                     transcoder_has_m2_n2(dev_priv, transcoder)) {
8221                         intel_de_write(dev_priv, PIPE_DATA_M2(transcoder),
8222                                        TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
8223                         intel_de_write(dev_priv, PIPE_DATA_N2(transcoder),
8224                                        m2_n2->gmch_n);
8225                         intel_de_write(dev_priv, PIPE_LINK_M2(transcoder),
8226                                        m2_n2->link_m);
8227                         intel_de_write(dev_priv, PIPE_LINK_N2(transcoder),
8228                                        m2_n2->link_n);
8229                 }
8230         } else {
8231                 intel_de_write(dev_priv, PIPE_DATA_M_G4X(pipe),
8232                                TU_SIZE(m_n->tu) | m_n->gmch_m);
8233                 intel_de_write(dev_priv, PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
8234                 intel_de_write(dev_priv, PIPE_LINK_M_G4X(pipe), m_n->link_m);
8235                 intel_de_write(dev_priv, PIPE_LINK_N_G4X(pipe), m_n->link_n);
8236         }
8237 }
8238
8239 void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n)
8240 {
8241         const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
8242         struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
8243
8244         if (m_n == M1_N1) {
8245                 dp_m_n = &crtc_state->dp_m_n;
8246                 dp_m2_n2 = &crtc_state->dp_m2_n2;
8247         } else if (m_n == M2_N2) {
8248
8249                 /*
8250                  * M2_N2 registers are not supported. Hence m2_n2 divider value
8251                  * needs to be programmed into M1_N1.
8252                  */
8253                 dp_m_n = &crtc_state->dp_m2_n2;
8254         } else {
8255                 drm_err(&i915->drm, "Unsupported divider value\n");
8256                 return;
8257         }
8258
8259         if (crtc_state->has_pch_encoder)
8260                 intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n);
8261         else
8262                 intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2);
8263 }
8264
8265 static void vlv_compute_dpll(struct intel_crtc *crtc,
8266                              struct intel_crtc_state *pipe_config)
8267 {
8268         pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
8269                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
8270         if (crtc->pipe != PIPE_A)
8271                 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
8272
8273         /* DPLL not used with DSI, but still need the rest set up */
8274         if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
8275                 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
8276                         DPLL_EXT_BUFFER_ENABLE_VLV;
8277
8278         pipe_config->dpll_hw_state.dpll_md =
8279                 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
8280 }
8281
8282 static void chv_compute_dpll(struct intel_crtc *crtc,
8283                              struct intel_crtc_state *pipe_config)
8284 {
8285         pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
8286                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
8287         if (crtc->pipe != PIPE_A)
8288                 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
8289
8290         /* DPLL not used with DSI, but still need the rest set up */
8291         if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
8292                 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
8293
8294         pipe_config->dpll_hw_state.dpll_md =
8295                 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
8296 }
8297
8298 static void vlv_prepare_pll(struct intel_crtc *crtc,
8299                             const struct intel_crtc_state *pipe_config)
8300 {
8301         struct drm_device *dev = crtc->base.dev;
8302         struct drm_i915_private *dev_priv = to_i915(dev);
8303         enum pipe pipe = crtc->pipe;
8304         u32 mdiv;
8305         u32 bestn, bestm1, bestm2, bestp1, bestp2;
8306         u32 coreclk, reg_val;
8307
8308         /* Enable Refclk */
8309         intel_de_write(dev_priv, DPLL(pipe),
8310                        pipe_config->dpll_hw_state.dpll & ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
8311
8312         /* No need to actually set up the DPLL with DSI */
8313         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8314                 return;
8315
8316         vlv_dpio_get(dev_priv);
8317
8318         bestn = pipe_config->dpll.n;
8319         bestm1 = pipe_config->dpll.m1;
8320         bestm2 = pipe_config->dpll.m2;
8321         bestp1 = pipe_config->dpll.p1;
8322         bestp2 = pipe_config->dpll.p2;
8323
8324         /* See eDP HDMI DPIO driver vbios notes doc */
8325
8326         /* PLL B needs special handling */
8327         if (pipe == PIPE_B)
8328                 vlv_pllb_recal_opamp(dev_priv, pipe);
8329
8330         /* Set up Tx target for periodic Rcomp update */
8331         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
8332
8333         /* Disable target IRef on PLL */
8334         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
8335         reg_val &= 0x00ffffff;
8336         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
8337
8338         /* Disable fast lock */
8339         vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
8340
8341         /* Set idtafcrecal before PLL is enabled */
8342         mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
8343         mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
8344         mdiv |= ((bestn << DPIO_N_SHIFT));
8345         mdiv |= (1 << DPIO_K_SHIFT);
8346
8347         /*
8348          * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
8349          * but we don't support that).
8350          * Note: don't use the DAC post divider as it seems unstable.
8351          */
8352         mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
8353         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
8354
8355         mdiv |= DPIO_ENABLE_CALIBRATION;
8356         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
8357
8358         /* Set HBR and RBR LPF coefficients */
8359         if (pipe_config->port_clock == 162000 ||
8360             intel_crtc_has_type(pipe_config, INTEL_OUTPUT_ANALOG) ||
8361             intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
8362                 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
8363                                  0x009f0003);
8364         else
8365                 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
8366                                  0x00d0000f);
8367
8368         if (intel_crtc_has_dp_encoder(pipe_config)) {
8369                 /* Use SSC source */
8370                 if (pipe == PIPE_A)
8371                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
8372                                          0x0df40000);
8373                 else
8374                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
8375                                          0x0df70000);
8376         } else { /* HDMI or VGA */
8377                 /* Use bend source */
8378                 if (pipe == PIPE_A)
8379                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
8380                                          0x0df70000);
8381                 else
8382                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
8383                                          0x0df40000);
8384         }
8385
8386         coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
8387         coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
8388         if (intel_crtc_has_dp_encoder(pipe_config))
8389                 coreclk |= 0x01000000;
8390         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
8391
8392         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
8393
8394         vlv_dpio_put(dev_priv);
8395 }
8396
8397 static void chv_prepare_pll(struct intel_crtc *crtc,
8398                             const struct intel_crtc_state *pipe_config)
8399 {
8400         struct drm_device *dev = crtc->base.dev;
8401         struct drm_i915_private *dev_priv = to_i915(dev);
8402         enum pipe pipe = crtc->pipe;
8403         enum dpio_channel port = vlv_pipe_to_channel(pipe);
8404         u32 loopfilter, tribuf_calcntr;
8405         u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
8406         u32 dpio_val;
8407         int vco;
8408
8409         /* Enable Refclk and SSC */
8410         intel_de_write(dev_priv, DPLL(pipe),
8411                        pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
8412
8413         /* No need to actually set up the DPLL with DSI */
8414         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8415                 return;
8416
8417         bestn = pipe_config->dpll.n;
8418         bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
8419         bestm1 = pipe_config->dpll.m1;
8420         bestm2 = pipe_config->dpll.m2 >> 22;
8421         bestp1 = pipe_config->dpll.p1;
8422         bestp2 = pipe_config->dpll.p2;
8423         vco = pipe_config->dpll.vco;
8424         dpio_val = 0;
8425         loopfilter = 0;
8426
8427         vlv_dpio_get(dev_priv);
8428
8429         /* p1 and p2 divider */
8430         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
8431                         5 << DPIO_CHV_S1_DIV_SHIFT |
8432                         bestp1 << DPIO_CHV_P1_DIV_SHIFT |
8433                         bestp2 << DPIO_CHV_P2_DIV_SHIFT |
8434                         1 << DPIO_CHV_K_DIV_SHIFT);
8435
8436         /* Feedback post-divider - m2 */
8437         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
8438
8439         /* Feedback refclk divider - n and m1 */
8440         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
8441                         DPIO_CHV_M1_DIV_BY_2 |
8442                         1 << DPIO_CHV_N_DIV_SHIFT);
8443
8444         /* M2 fraction division */
8445         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
8446
8447         /* M2 fraction division enable */
8448         dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
8449         dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
8450         dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
8451         if (bestm2_frac)
8452                 dpio_val |= DPIO_CHV_FRAC_DIV_EN;
8453         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
8454
8455         /* Program digital lock detect threshold */
8456         dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
8457         dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
8458                                         DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
8459         dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
8460         if (!bestm2_frac)
8461                 dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
8462         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
8463
8464         /* Loop filter */
8465         if (vco == 5400000) {
8466                 loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
8467                 loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
8468                 loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
8469                 tribuf_calcntr = 0x9;
8470         } else if (vco <= 6200000) {
8471                 loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
8472                 loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
8473                 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
8474                 tribuf_calcntr = 0x9;
8475         } else if (vco <= 6480000) {
8476                 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
8477                 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
8478                 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
8479                 tribuf_calcntr = 0x8;
8480         } else {
8481                 /* Not supported. Apply the same limits as in the max case */
8482                 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
8483                 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
8484                 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
8485                 tribuf_calcntr = 0;
8486         }
8487         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
8488
8489         dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
8490         dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
8491         dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
8492         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
8493
8494         /* AFC Recal */
8495         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
8496                         vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
8497                         DPIO_AFC_RECAL);
8498
8499         vlv_dpio_put(dev_priv);
8500 }
8501
8502 /**
8503  * vlv_force_pll_on - forcibly enable just the PLL
8504  * @dev_priv: i915 private structure
8505  * @pipe: pipe PLL to enable
8506  * @dpll: PLL configuration
8507  *
8508  * Enable the PLL for @pipe using the supplied @dpll config. To be used
8509  * in cases where we need the PLL enabled even when @pipe is not going to
8510  * be enabled.
8511  */
8512 int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
8513                      const struct dpll *dpll)
8514 {
8515         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
8516         struct intel_crtc_state *pipe_config;
8517
8518         pipe_config = intel_crtc_state_alloc(crtc);
8519         if (!pipe_config)
8520                 return -ENOMEM;
8521
8522         pipe_config->cpu_transcoder = (enum transcoder)pipe;
8523         pipe_config->pixel_multiplier = 1;
8524         pipe_config->dpll = *dpll;
8525
8526         if (IS_CHERRYVIEW(dev_priv)) {
8527                 chv_compute_dpll(crtc, pipe_config);
8528                 chv_prepare_pll(crtc, pipe_config);
8529                 chv_enable_pll(crtc, pipe_config);
8530         } else {
8531                 vlv_compute_dpll(crtc, pipe_config);
8532                 vlv_prepare_pll(crtc, pipe_config);
8533                 vlv_enable_pll(crtc, pipe_config);
8534         }
8535
8536         kfree(pipe_config);
8537
8538         return 0;
8539 }
8540
8541 /**
8542  * vlv_force_pll_off - forcibly disable just the PLL
8543  * @dev_priv: i915 private structure
8544  * @pipe: pipe PLL to disable
8545  *
8546  * Disable the PLL for @pipe. To be used in cases where we need
8547  * the PLL enabled even when @pipe is not going to be enabled.
8548  */
8549 void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
8550 {
8551         if (IS_CHERRYVIEW(dev_priv))
8552                 chv_disable_pll(dev_priv, pipe);
8553         else
8554                 vlv_disable_pll(dev_priv, pipe);
8555 }
8556
8557 static void i9xx_compute_dpll(struct intel_crtc *crtc,
8558                               struct intel_crtc_state *crtc_state,
8559                               struct dpll *reduced_clock)
8560 {
8561         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8562         u32 dpll;
8563         struct dpll *clock = &crtc_state->dpll;
8564
8565         i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
8566
8567         dpll = DPLL_VGA_MODE_DIS;
8568
8569         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
8570                 dpll |= DPLLB_MODE_LVDS;
8571         else
8572                 dpll |= DPLLB_MODE_DAC_SERIAL;
8573
8574         if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
8575             IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
8576                 dpll |= (crtc_state->pixel_multiplier - 1)
8577                         << SDVO_MULTIPLIER_SHIFT_HIRES;
8578         }
8579
8580         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
8581             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
8582                 dpll |= DPLL_SDVO_HIGH_SPEED;
8583
8584         if (intel_crtc_has_dp_encoder(crtc_state))
8585                 dpll |= DPLL_SDVO_HIGH_SPEED;
8586
8587         /* compute bitmask from p1 value */
8588         if (IS_PINEVIEW(dev_priv))
8589                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
8590         else {
8591                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8592                 if (IS_G4X(dev_priv) && reduced_clock)
8593                         dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
8594         }
8595         switch (clock->p2) {
8596         case 5:
8597                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
8598                 break;
8599         case 7:
8600                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
8601                 break;
8602         case 10:
8603                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
8604                 break;
8605         case 14:
8606                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
8607                 break;
8608         }
8609         if (INTEL_GEN(dev_priv) >= 4)
8610                 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
8611
8612         if (crtc_state->sdvo_tv_clock)
8613                 dpll |= PLL_REF_INPUT_TVCLKINBC;
8614         else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
8615                  intel_panel_use_ssc(dev_priv))
8616                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
8617         else
8618                 dpll |= PLL_REF_INPUT_DREFCLK;
8619
8620         dpll |= DPLL_VCO_ENABLE;
8621         crtc_state->dpll_hw_state.dpll = dpll;
8622
8623         if (INTEL_GEN(dev_priv) >= 4) {
8624                 u32 dpll_md = (crtc_state->pixel_multiplier - 1)
8625                         << DPLL_MD_UDI_MULTIPLIER_SHIFT;
8626                 crtc_state->dpll_hw_state.dpll_md = dpll_md;
8627         }
8628 }
8629
8630 static void i8xx_compute_dpll(struct intel_crtc *crtc,
8631                               struct intel_crtc_state *crtc_state,
8632                               struct dpll *reduced_clock)
8633 {
8634         struct drm_device *dev = crtc->base.dev;
8635         struct drm_i915_private *dev_priv = to_i915(dev);
8636         u32 dpll;
8637         struct dpll *clock = &crtc_state->dpll;
8638
8639         i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
8640
8641         dpll = DPLL_VGA_MODE_DIS;
8642
8643         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8644                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8645         } else {
8646                 if (clock->p1 == 2)
8647                         dpll |= PLL_P1_DIVIDE_BY_TWO;
8648                 else
8649                         dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8650                 if (clock->p2 == 4)
8651                         dpll |= PLL_P2_DIVIDE_BY_4;
8652         }
8653
8654         /*
8655          * Bspec:
8656          * "[Almador Errata}: For the correct operation of the muxed DVO pins
8657          *  (GDEVSELB/I2Cdata, GIRDBY/I2CClk) and (GFRAMEB/DVI_Data,
8658          *  GTRDYB/DVI_Clk): Bit 31 (DPLL VCO Enable) and Bit 30 (2X Clock
8659          *  Enable) must be set to “1” in both the DPLL A Control Register
8660          *  (06014h-06017h) and DPLL B Control Register (06018h-0601Bh)."
8661          *
8662          * For simplicity We simply keep both bits always enabled in
8663          * both DPLLS. The spec says we should disable the DVO 2X clock
8664          * when not needed, but this seems to work fine in practice.
8665          */
8666         if (IS_I830(dev_priv) ||
8667             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
8668                 dpll |= DPLL_DVO_2X_MODE;
8669
8670         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
8671             intel_panel_use_ssc(dev_priv))
8672                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
8673         else
8674                 dpll |= PLL_REF_INPUT_DREFCLK;
8675
8676         dpll |= DPLL_VCO_ENABLE;
8677         crtc_state->dpll_hw_state.dpll = dpll;
8678 }
8679
8680 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state)
8681 {
8682         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8683         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8684         enum pipe pipe = crtc->pipe;
8685         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
8686         const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
8687         u32 crtc_vtotal, crtc_vblank_end;
8688         int vsyncshift = 0;
8689
8690         /* We need to be careful not to changed the adjusted mode, for otherwise
8691          * the hw state checker will get angry at the mismatch. */
8692         crtc_vtotal = adjusted_mode->crtc_vtotal;
8693         crtc_vblank_end = adjusted_mode->crtc_vblank_end;
8694
8695         if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
8696                 /* the chip adds 2 halflines automatically */
8697                 crtc_vtotal -= 1;
8698                 crtc_vblank_end -= 1;
8699
8700                 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
8701                         vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
8702                 else
8703                         vsyncshift = adjusted_mode->crtc_hsync_start -
8704                                 adjusted_mode->crtc_htotal / 2;
8705                 if (vsyncshift < 0)
8706                         vsyncshift += adjusted_mode->crtc_htotal;
8707         }
8708
8709         if (INTEL_GEN(dev_priv) > 3)
8710                 intel_de_write(dev_priv, VSYNCSHIFT(cpu_transcoder),
8711                                vsyncshift);
8712
8713         intel_de_write(dev_priv, HTOTAL(cpu_transcoder),
8714                        (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16));
8715         intel_de_write(dev_priv, HBLANK(cpu_transcoder),
8716                        (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
8717         intel_de_write(dev_priv, HSYNC(cpu_transcoder),
8718                        (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
8719
8720         intel_de_write(dev_priv, VTOTAL(cpu_transcoder),
8721                        (adjusted_mode->crtc_vdisplay - 1) | ((crtc_vtotal - 1) << 16));
8722         intel_de_write(dev_priv, VBLANK(cpu_transcoder),
8723                        (adjusted_mode->crtc_vblank_start - 1) | ((crtc_vblank_end - 1) << 16));
8724         intel_de_write(dev_priv, VSYNC(cpu_transcoder),
8725                        (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
8726
8727         /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
8728          * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
8729          * documented on the DDI_FUNC_CTL register description, EDP Input Select
8730          * bits. */
8731         if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
8732             (pipe == PIPE_B || pipe == PIPE_C))
8733                 intel_de_write(dev_priv, VTOTAL(pipe),
8734                                intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
8735
8736 }
8737
8738 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
8739 {
8740         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8741         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8742         enum pipe pipe = crtc->pipe;
8743
8744         /* pipesrc controls the size that is scaled from, which should
8745          * always be the user's requested size.
8746          */
8747         intel_de_write(dev_priv, PIPESRC(pipe),
8748                        ((crtc_state->pipe_src_w - 1) << 16) | (crtc_state->pipe_src_h - 1));
8749 }
8750
8751 static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
8752 {
8753         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
8754         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
8755
8756         if (IS_GEN(dev_priv, 2))
8757                 return false;
8758
8759         if (INTEL_GEN(dev_priv) >= 9 ||
8760             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
8761                 return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW;
8762         else
8763                 return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK;
8764 }
8765
8766 static void intel_get_pipe_timings(struct intel_crtc *crtc,
8767                                    struct intel_crtc_state *pipe_config)
8768 {
8769         struct drm_device *dev = crtc->base.dev;
8770         struct drm_i915_private *dev_priv = to_i915(dev);
8771         enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
8772         u32 tmp;
8773
8774         tmp = intel_de_read(dev_priv, HTOTAL(cpu_transcoder));
8775         pipe_config->hw.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
8776         pipe_config->hw.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
8777
8778         if (!transcoder_is_dsi(cpu_transcoder)) {
8779                 tmp = intel_de_read(dev_priv, HBLANK(cpu_transcoder));
8780                 pipe_config->hw.adjusted_mode.crtc_hblank_start =
8781                                                         (tmp & 0xffff) + 1;
8782                 pipe_config->hw.adjusted_mode.crtc_hblank_end =
8783                                                 ((tmp >> 16) & 0xffff) + 1;
8784         }
8785         tmp = intel_de_read(dev_priv, HSYNC(cpu_transcoder));
8786         pipe_config->hw.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
8787         pipe_config->hw.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
8788
8789         tmp = intel_de_read(dev_priv, VTOTAL(cpu_transcoder));
8790         pipe_config->hw.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
8791         pipe_config->hw.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
8792
8793         if (!transcoder_is_dsi(cpu_transcoder)) {
8794                 tmp = intel_de_read(dev_priv, VBLANK(cpu_transcoder));
8795                 pipe_config->hw.adjusted_mode.crtc_vblank_start =
8796                                                         (tmp & 0xffff) + 1;
8797                 pipe_config->hw.adjusted_mode.crtc_vblank_end =
8798                                                 ((tmp >> 16) & 0xffff) + 1;
8799         }
8800         tmp = intel_de_read(dev_priv, VSYNC(cpu_transcoder));
8801         pipe_config->hw.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
8802         pipe_config->hw.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
8803
8804         if (intel_pipe_is_interlaced(pipe_config)) {
8805                 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
8806                 pipe_config->hw.adjusted_mode.crtc_vtotal += 1;
8807                 pipe_config->hw.adjusted_mode.crtc_vblank_end += 1;
8808         }
8809 }
8810
8811 static void intel_get_pipe_src_size(struct intel_crtc *crtc,
8812                                     struct intel_crtc_state *pipe_config)
8813 {
8814         struct drm_device *dev = crtc->base.dev;
8815         struct drm_i915_private *dev_priv = to_i915(dev);
8816         u32 tmp;
8817
8818         tmp = intel_de_read(dev_priv, PIPESRC(crtc->pipe));
8819         pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
8820         pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
8821
8822         pipe_config->hw.mode.vdisplay = pipe_config->pipe_src_h;
8823         pipe_config->hw.mode.hdisplay = pipe_config->pipe_src_w;
8824 }
8825
8826 void intel_mode_from_pipe_config(struct drm_display_mode *mode,
8827                                  struct intel_crtc_state *pipe_config)
8828 {
8829         mode->hdisplay = pipe_config->hw.adjusted_mode.crtc_hdisplay;
8830         mode->htotal = pipe_config->hw.adjusted_mode.crtc_htotal;
8831         mode->hsync_start = pipe_config->hw.adjusted_mode.crtc_hsync_start;
8832         mode->hsync_end = pipe_config->hw.adjusted_mode.crtc_hsync_end;
8833
8834         mode->vdisplay = pipe_config->hw.adjusted_mode.crtc_vdisplay;
8835         mode->vtotal = pipe_config->hw.adjusted_mode.crtc_vtotal;
8836         mode->vsync_start = pipe_config->hw.adjusted_mode.crtc_vsync_start;
8837         mode->vsync_end = pipe_config->hw.adjusted_mode.crtc_vsync_end;
8838
8839         mode->flags = pipe_config->hw.adjusted_mode.flags;
8840         mode->type = DRM_MODE_TYPE_DRIVER;
8841
8842         mode->clock = pipe_config->hw.adjusted_mode.crtc_clock;
8843
8844         mode->hsync = drm_mode_hsync(mode);
8845         mode->vrefresh = drm_mode_vrefresh(mode);
8846         drm_mode_set_name(mode);
8847 }
8848
8849 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
8850 {
8851         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8852         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8853         u32 pipeconf;
8854
8855         pipeconf = 0;
8856
8857         /* we keep both pipes enabled on 830 */
8858         if (IS_I830(dev_priv))
8859                 pipeconf |= intel_de_read(dev_priv, PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
8860
8861         if (crtc_state->double_wide)
8862                 pipeconf |= PIPECONF_DOUBLE_WIDE;
8863
8864         /* only g4x and later have fancy bpc/dither controls */
8865         if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
8866             IS_CHERRYVIEW(dev_priv)) {
8867                 /* Bspec claims that we can't use dithering for 30bpp pipes. */
8868                 if (crtc_state->dither && crtc_state->pipe_bpp != 30)
8869                         pipeconf |= PIPECONF_DITHER_EN |
8870                                     PIPECONF_DITHER_TYPE_SP;
8871
8872                 switch (crtc_state->pipe_bpp) {
8873                 case 18:
8874                         pipeconf |= PIPECONF_6BPC;
8875                         break;
8876                 case 24:
8877                         pipeconf |= PIPECONF_8BPC;
8878                         break;
8879                 case 30:
8880                         pipeconf |= PIPECONF_10BPC;
8881                         break;
8882                 default:
8883                         /* Case prevented by intel_choose_pipe_bpp_dither. */
8884                         BUG();
8885                 }
8886         }
8887
8888         if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
8889                 if (INTEL_GEN(dev_priv) < 4 ||
8890                     intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
8891                         pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
8892                 else
8893                         pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
8894         } else {
8895                 pipeconf |= PIPECONF_PROGRESSIVE;
8896         }
8897
8898         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
8899              crtc_state->limited_color_range)
8900                 pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
8901
8902         pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
8903
8904         pipeconf |= PIPECONF_FRAME_START_DELAY(0);
8905
8906         intel_de_write(dev_priv, PIPECONF(crtc->pipe), pipeconf);
8907         intel_de_posting_read(dev_priv, PIPECONF(crtc->pipe));
8908 }
8909
8910 static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
8911                                    struct intel_crtc_state *crtc_state)
8912 {
8913         struct drm_device *dev = crtc->base.dev;
8914         struct drm_i915_private *dev_priv = to_i915(dev);
8915         const struct intel_limit *limit;
8916         int refclk = 48000;
8917
8918         memset(&crtc_state->dpll_hw_state, 0,
8919                sizeof(crtc_state->dpll_hw_state));
8920
8921         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8922                 if (intel_panel_use_ssc(dev_priv)) {
8923                         refclk = dev_priv->vbt.lvds_ssc_freq;
8924                         drm_dbg_kms(&dev_priv->drm,
8925                                     "using SSC reference clock of %d kHz\n",
8926                                     refclk);
8927                 }
8928
8929                 limit = &intel_limits_i8xx_lvds;
8930         } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) {
8931                 limit = &intel_limits_i8xx_dvo;
8932         } else {
8933                 limit = &intel_limits_i8xx_dac;
8934         }
8935
8936         if (!crtc_state->clock_set &&
8937             !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8938                                  refclk, NULL, &crtc_state->dpll)) {
8939                 drm_err(&dev_priv->drm,
8940                         "Couldn't find PLL settings for mode!\n");
8941                 return -EINVAL;
8942         }
8943
8944         i8xx_compute_dpll(crtc, crtc_state, NULL);
8945
8946         return 0;
8947 }
8948
8949 static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
8950                                   struct intel_crtc_state *crtc_state)
8951 {
8952         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8953         const struct intel_limit *limit;
8954         int refclk = 96000;
8955
8956         memset(&crtc_state->dpll_hw_state, 0,
8957                sizeof(crtc_state->dpll_hw_state));
8958
8959         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8960                 if (intel_panel_use_ssc(dev_priv)) {
8961                         refclk = dev_priv->vbt.lvds_ssc_freq;
8962                         drm_dbg_kms(&dev_priv->drm,
8963                                     "using SSC reference clock of %d kHz\n",
8964                                     refclk);
8965                 }
8966
8967                 if (intel_is_dual_link_lvds(dev_priv))
8968                         limit = &intel_limits_g4x_dual_channel_lvds;
8969                 else
8970                         limit = &intel_limits_g4x_single_channel_lvds;
8971         } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
8972                    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
8973                 limit = &intel_limits_g4x_hdmi;
8974         } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) {
8975                 limit = &intel_limits_g4x_sdvo;
8976         } else {
8977                 /* The option is for other outputs */
8978                 limit = &intel_limits_i9xx_sdvo;
8979         }
8980
8981         if (!crtc_state->clock_set &&
8982             !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8983                                 refclk, NULL, &crtc_state->dpll)) {
8984                 drm_err(&dev_priv->drm,
8985                         "Couldn't find PLL settings for mode!\n");
8986                 return -EINVAL;
8987         }
8988
8989         i9xx_compute_dpll(crtc, crtc_state, NULL);
8990
8991         return 0;
8992 }
8993
8994 static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
8995                                   struct intel_crtc_state *crtc_state)
8996 {
8997         struct drm_device *dev = crtc->base.dev;
8998         struct drm_i915_private *dev_priv = to_i915(dev);
8999         const struct intel_limit *limit;
9000         int refclk = 96000;
9001
9002         memset(&crtc_state->dpll_hw_state, 0,
9003                sizeof(crtc_state->dpll_hw_state));
9004
9005         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
9006                 if (intel_panel_use_ssc(dev_priv)) {
9007                         refclk = dev_priv->vbt.lvds_ssc_freq;
9008                         drm_dbg_kms(&dev_priv->drm,
9009                                     "using SSC reference clock of %d kHz\n",
9010                                     refclk);
9011                 }
9012
9013                 limit = &pnv_limits_lvds;
9014         } else {
9015                 limit = &pnv_limits_sdvo;
9016         }
9017
9018         if (!crtc_state->clock_set &&
9019             !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
9020                                 refclk, NULL, &crtc_state->dpll)) {
9021                 drm_err(&dev_priv->drm,
9022                         "Couldn't find PLL settings for mode!\n");
9023                 return -EINVAL;
9024         }
9025
9026         i9xx_compute_dpll(crtc, crtc_state, NULL);
9027
9028         return 0;
9029 }
9030
9031 static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
9032                                    struct intel_crtc_state *crtc_state)
9033 {
9034         struct drm_device *dev = crtc->base.dev;
9035         struct drm_i915_private *dev_priv = to_i915(dev);
9036         const struct intel_limit *limit;
9037         int refclk = 96000;
9038
9039         memset(&crtc_state->dpll_hw_state, 0,
9040                sizeof(crtc_state->dpll_hw_state));
9041
9042         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
9043                 if (intel_panel_use_ssc(dev_priv)) {
9044                         refclk = dev_priv->vbt.lvds_ssc_freq;
9045                         drm_dbg_kms(&dev_priv->drm,
9046                                     "using SSC reference clock of %d kHz\n",
9047                                     refclk);
9048                 }
9049
9050                 limit = &intel_limits_i9xx_lvds;
9051         } else {
9052                 limit = &intel_limits_i9xx_sdvo;
9053         }
9054
9055         if (!crtc_state->clock_set &&
9056             !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
9057                                  refclk, NULL, &crtc_state->dpll)) {
9058                 drm_err(&dev_priv->drm,
9059                         "Couldn't find PLL settings for mode!\n");
9060                 return -EINVAL;
9061         }
9062
9063         i9xx_compute_dpll(crtc, crtc_state, NULL);
9064
9065         return 0;
9066 }
9067
9068 static int chv_crtc_compute_clock(struct intel_crtc *crtc,
9069                                   struct intel_crtc_state *crtc_state)
9070 {
9071         int refclk = 100000;
9072         const struct intel_limit *limit = &intel_limits_chv;
9073         struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
9074
9075         memset(&crtc_state->dpll_hw_state, 0,
9076                sizeof(crtc_state->dpll_hw_state));
9077
9078         if (!crtc_state->clock_set &&
9079             !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
9080                                 refclk, NULL, &crtc_state->dpll)) {
9081                 drm_err(&i915->drm, "Couldn't find PLL settings for mode!\n");
9082                 return -EINVAL;
9083         }
9084
9085         chv_compute_dpll(crtc, crtc_state);
9086
9087         return 0;
9088 }
9089
9090 static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
9091                                   struct intel_crtc_state *crtc_state)
9092 {
9093         int refclk = 100000;
9094         const struct intel_limit *limit = &intel_limits_vlv;
9095         struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
9096
9097         memset(&crtc_state->dpll_hw_state, 0,
9098                sizeof(crtc_state->dpll_hw_state));
9099
9100         if (!crtc_state->clock_set &&
9101             !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
9102                                 refclk, NULL, &crtc_state->dpll)) {
9103                 drm_err(&i915->drm,  "Couldn't find PLL settings for mode!\n");
9104                 return -EINVAL;
9105         }
9106
9107         vlv_compute_dpll(crtc, crtc_state);
9108
9109         return 0;
9110 }
9111
9112 static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
9113 {
9114         if (IS_I830(dev_priv))
9115                 return false;
9116
9117         return INTEL_GEN(dev_priv) >= 4 ||
9118                 IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
9119 }
9120
9121 static void i9xx_get_pfit_config(struct intel_crtc *crtc,
9122                                  struct intel_crtc_state *pipe_config)
9123 {
9124         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9125         u32 tmp;
9126
9127         if (!i9xx_has_pfit(dev_priv))
9128                 return;
9129
9130         tmp = intel_de_read(dev_priv, PFIT_CONTROL);
9131         if (!(tmp & PFIT_ENABLE))
9132                 return;
9133
9134         /* Check whether the pfit is attached to our pipe. */
9135         if (INTEL_GEN(dev_priv) < 4) {
9136                 if (crtc->pipe != PIPE_B)
9137                         return;
9138         } else {
9139                 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
9140                         return;
9141         }
9142
9143         pipe_config->gmch_pfit.control = tmp;
9144         pipe_config->gmch_pfit.pgm_ratios = intel_de_read(dev_priv,
9145                                                           PFIT_PGM_RATIOS);
9146 }
9147
9148 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
9149                                struct intel_crtc_state *pipe_config)
9150 {
9151         struct drm_device *dev = crtc->base.dev;
9152         struct drm_i915_private *dev_priv = to_i915(dev);
9153         enum pipe pipe = crtc->pipe;
9154         struct dpll clock;
9155         u32 mdiv;
9156         int refclk = 100000;
9157
9158         /* In case of DSI, DPLL will not be used */
9159         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
9160                 return;
9161
9162         vlv_dpio_get(dev_priv);
9163         mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
9164         vlv_dpio_put(dev_priv);
9165
9166         clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
9167         clock.m2 = mdiv & DPIO_M2DIV_MASK;
9168         clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
9169         clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
9170         clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
9171
9172         pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
9173 }
9174
9175 static void
9176 i9xx_get_initial_plane_config(struct intel_crtc *crtc,
9177                               struct intel_initial_plane_config *plane_config)
9178 {
9179         struct drm_device *dev = crtc->base.dev;
9180         struct drm_i915_private *dev_priv = to_i915(dev);
9181         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
9182         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
9183         enum pipe pipe;
9184         u32 val, base, offset;
9185         int fourcc, pixel_format;
9186         unsigned int aligned_height;
9187         struct drm_framebuffer *fb;
9188         struct intel_framebuffer *intel_fb;
9189
9190         if (!plane->get_hw_state(plane, &pipe))
9191                 return;
9192
9193         WARN_ON(pipe != crtc->pipe);
9194
9195         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
9196         if (!intel_fb) {
9197                 drm_dbg_kms(&dev_priv->drm, "failed to alloc fb\n");
9198                 return;
9199         }
9200
9201         fb = &intel_fb->base;
9202
9203         fb->dev = dev;
9204
9205         val = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
9206
9207         if (INTEL_GEN(dev_priv) >= 4) {
9208                 if (val & DISPPLANE_TILED) {
9209                         plane_config->tiling = I915_TILING_X;
9210                         fb->modifier = I915_FORMAT_MOD_X_TILED;
9211                 }
9212
9213                 if (val & DISPPLANE_ROTATE_180)
9214                         plane_config->rotation = DRM_MODE_ROTATE_180;
9215         }
9216
9217         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B &&
9218             val & DISPPLANE_MIRROR)
9219                 plane_config->rotation |= DRM_MODE_REFLECT_X;
9220
9221         pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
9222         fourcc = i9xx_format_to_fourcc(pixel_format);
9223         fb->format = drm_format_info(fourcc);
9224
9225         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
9226                 offset = intel_de_read(dev_priv, DSPOFFSET(i9xx_plane));
9227                 base = intel_de_read(dev_priv, DSPSURF(i9xx_plane)) & 0xfffff000;
9228         } else if (INTEL_GEN(dev_priv) >= 4) {
9229                 if (plane_config->tiling)
9230                         offset = intel_de_read(dev_priv,
9231                                                DSPTILEOFF(i9xx_plane));
9232                 else
9233                         offset = intel_de_read(dev_priv,
9234                                                DSPLINOFF(i9xx_plane));
9235                 base = intel_de_read(dev_priv, DSPSURF(i9xx_plane)) & 0xfffff000;
9236         } else {
9237                 base = intel_de_read(dev_priv, DSPADDR(i9xx_plane));
9238         }
9239         plane_config->base = base;
9240
9241         val = intel_de_read(dev_priv, PIPESRC(pipe));
9242         fb->width = ((val >> 16) & 0xfff) + 1;
9243         fb->height = ((val >> 0) & 0xfff) + 1;
9244
9245         val = intel_de_read(dev_priv, DSPSTRIDE(i9xx_plane));
9246         fb->pitches[0] = val & 0xffffffc0;
9247
9248         aligned_height = intel_fb_align_height(fb, 0, fb->height);
9249
9250         plane_config->size = fb->pitches[0] * aligned_height;
9251
9252         drm_dbg_kms(&dev_priv->drm,
9253                     "%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
9254                     crtc->base.name, plane->base.name, fb->width, fb->height,
9255                     fb->format->cpp[0] * 8, base, fb->pitches[0],
9256                     plane_config->size);
9257
9258         plane_config->fb = intel_fb;
9259 }
9260
9261 static void chv_crtc_clock_get(struct intel_crtc *crtc,
9262                                struct intel_crtc_state *pipe_config)
9263 {
9264         struct drm_device *dev = crtc->base.dev;
9265         struct drm_i915_private *dev_priv = to_i915(dev);
9266         enum pipe pipe = crtc->pipe;
9267         enum dpio_channel port = vlv_pipe_to_channel(pipe);
9268         struct dpll clock;
9269         u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
9270         int refclk = 100000;
9271
9272         /* In case of DSI, DPLL will not be used */
9273         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
9274                 return;
9275
9276         vlv_dpio_get(dev_priv);
9277         cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
9278         pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
9279         pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
9280         pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
9281         pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
9282         vlv_dpio_put(dev_priv);
9283
9284         clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
9285         clock.m2 = (pll_dw0 & 0xff) << 22;
9286         if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
9287                 clock.m2 |= pll_dw2 & 0x3fffff;
9288         clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
9289         clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
9290         clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
9291
9292         pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
9293 }
9294
9295 static enum intel_output_format
9296 bdw_get_pipemisc_output_format(struct intel_crtc *crtc)
9297 {
9298         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9299         u32 tmp;
9300
9301         tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
9302
9303         if (tmp & PIPEMISC_YUV420_ENABLE) {
9304                 /* We support 4:2:0 in full blend mode only */
9305                 WARN_ON((tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0);
9306
9307                 return INTEL_OUTPUT_FORMAT_YCBCR420;
9308         } else if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
9309                 return INTEL_OUTPUT_FORMAT_YCBCR444;
9310         } else {
9311                 return INTEL_OUTPUT_FORMAT_RGB;
9312         }
9313 }
9314
9315 static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
9316 {
9317         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
9318         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
9319         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9320         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
9321         u32 tmp;
9322
9323         tmp = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
9324
9325         if (tmp & DISPPLANE_GAMMA_ENABLE)
9326                 crtc_state->gamma_enable = true;
9327
9328         if (!HAS_GMCH(dev_priv) &&
9329             tmp & DISPPLANE_PIPE_CSC_ENABLE)
9330                 crtc_state->csc_enable = true;
9331 }
9332
9333 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
9334                                  struct intel_crtc_state *pipe_config)
9335 {
9336         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9337         enum intel_display_power_domain power_domain;
9338         intel_wakeref_t wakeref;
9339         u32 tmp;
9340         bool ret;
9341
9342         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
9343         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
9344         if (!wakeref)
9345                 return false;
9346
9347         pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
9348         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9349         pipe_config->shared_dpll = NULL;
9350         pipe_config->master_transcoder = INVALID_TRANSCODER;
9351
9352         ret = false;
9353
9354         tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
9355         if (!(tmp & PIPECONF_ENABLE))
9356                 goto out;
9357
9358         if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
9359             IS_CHERRYVIEW(dev_priv)) {
9360                 switch (tmp & PIPECONF_BPC_MASK) {
9361                 case PIPECONF_6BPC:
9362                         pipe_config->pipe_bpp = 18;
9363                         break;
9364                 case PIPECONF_8BPC:
9365                         pipe_config->pipe_bpp = 24;
9366                         break;
9367                 case PIPECONF_10BPC:
9368                         pipe_config->pipe_bpp = 30;
9369                         break;
9370                 default:
9371                         break;
9372                 }
9373         }
9374
9375         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
9376             (tmp & PIPECONF_COLOR_RANGE_SELECT))
9377                 pipe_config->limited_color_range = true;
9378
9379         pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_I9XX) >>
9380                 PIPECONF_GAMMA_MODE_SHIFT;
9381
9382         if (IS_CHERRYVIEW(dev_priv))
9383                 pipe_config->cgm_mode = intel_de_read(dev_priv,
9384                                                       CGM_PIPE_MODE(crtc->pipe));
9385
9386         i9xx_get_pipe_color_config(pipe_config);
9387         intel_color_get_config(pipe_config);
9388
9389         if (INTEL_GEN(dev_priv) < 4)
9390                 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
9391
9392         intel_get_pipe_timings(crtc, pipe_config);
9393         intel_get_pipe_src_size(crtc, pipe_config);
9394
9395         i9xx_get_pfit_config(crtc, pipe_config);
9396
9397         if (INTEL_GEN(dev_priv) >= 4) {
9398                 /* No way to read it out on pipes B and C */
9399                 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
9400                         tmp = dev_priv->chv_dpll_md[crtc->pipe];
9401                 else
9402                         tmp = intel_de_read(dev_priv, DPLL_MD(crtc->pipe));
9403                 pipe_config->pixel_multiplier =
9404                         ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
9405                          >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
9406                 pipe_config->dpll_hw_state.dpll_md = tmp;
9407         } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
9408                    IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
9409                 tmp = intel_de_read(dev_priv, DPLL(crtc->pipe));
9410                 pipe_config->pixel_multiplier =
9411                         ((tmp & SDVO_MULTIPLIER_MASK)
9412                          >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
9413         } else {
9414                 /* Note that on i915G/GM the pixel multiplier is in the sdvo
9415                  * port and will be fixed up in the encoder->get_config
9416                  * function. */
9417                 pipe_config->pixel_multiplier = 1;
9418         }
9419         pipe_config->dpll_hw_state.dpll = intel_de_read(dev_priv,
9420                                                         DPLL(crtc->pipe));
9421         if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
9422                 pipe_config->dpll_hw_state.fp0 = intel_de_read(dev_priv,
9423                                                                FP0(crtc->pipe));
9424                 pipe_config->dpll_hw_state.fp1 = intel_de_read(dev_priv,
9425                                                                FP1(crtc->pipe));
9426         } else {
9427                 /* Mask out read-only status bits. */
9428                 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
9429                                                      DPLL_PORTC_READY_MASK |
9430                                                      DPLL_PORTB_READY_MASK);
9431         }
9432
9433         if (IS_CHERRYVIEW(dev_priv))
9434                 chv_crtc_clock_get(crtc, pipe_config);
9435         else if (IS_VALLEYVIEW(dev_priv))
9436                 vlv_crtc_clock_get(crtc, pipe_config);
9437         else
9438                 i9xx_crtc_clock_get(crtc, pipe_config);
9439
9440         /*
9441          * Normally the dotclock is filled in by the encoder .get_config()
9442          * but in case the pipe is enabled w/o any ports we need a sane
9443          * default.
9444          */
9445         pipe_config->hw.adjusted_mode.crtc_clock =
9446                 pipe_config->port_clock / pipe_config->pixel_multiplier;
9447
9448         ret = true;
9449
9450 out:
9451         intel_display_power_put(dev_priv, power_domain, wakeref);
9452
9453         return ret;
9454 }
9455
9456 static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
9457 {
9458         struct intel_encoder *encoder;
9459         int i;
9460         u32 val, final;
9461         bool has_lvds = false;
9462         bool has_cpu_edp = false;
9463         bool has_panel = false;
9464         bool has_ck505 = false;
9465         bool can_ssc = false;
9466         bool using_ssc_source = false;
9467
9468         /* We need to take the global config into account */
9469         for_each_intel_encoder(&dev_priv->drm, encoder) {
9470                 switch (encoder->type) {
9471                 case INTEL_OUTPUT_LVDS:
9472                         has_panel = true;
9473                         has_lvds = true;
9474                         break;
9475                 case INTEL_OUTPUT_EDP:
9476                         has_panel = true;
9477                         if (encoder->port == PORT_A)
9478                                 has_cpu_edp = true;
9479                         break;
9480                 default:
9481                         break;
9482                 }
9483         }
9484
9485         if (HAS_PCH_IBX(dev_priv)) {
9486                 has_ck505 = dev_priv->vbt.display_clock_mode;
9487                 can_ssc = has_ck505;
9488         } else {
9489                 has_ck505 = false;
9490                 can_ssc = true;
9491         }
9492
9493         /* Check if any DPLLs are using the SSC source */
9494         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
9495                 u32 temp = intel_de_read(dev_priv, PCH_DPLL(i));
9496
9497                 if (!(temp & DPLL_VCO_ENABLE))
9498                         continue;
9499
9500                 if ((temp & PLL_REF_INPUT_MASK) ==
9501                     PLLB_REF_INPUT_SPREADSPECTRUMIN) {
9502                         using_ssc_source = true;
9503                         break;
9504                 }
9505         }
9506
9507         drm_dbg_kms(&dev_priv->drm,
9508                     "has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
9509                     has_panel, has_lvds, has_ck505, using_ssc_source);
9510
9511         /* Ironlake: try to setup display ref clock before DPLL
9512          * enabling. This is only under driver's control after
9513          * PCH B stepping, previous chipset stepping should be
9514          * ignoring this setting.
9515          */
9516         val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
9517
9518         /* As we must carefully and slowly disable/enable each source in turn,
9519          * compute the final state we want first and check if we need to
9520          * make any changes at all.
9521          */
9522         final = val;
9523         final &= ~DREF_NONSPREAD_SOURCE_MASK;
9524         if (has_ck505)
9525                 final |= DREF_NONSPREAD_CK505_ENABLE;
9526         else
9527                 final |= DREF_NONSPREAD_SOURCE_ENABLE;
9528
9529         final &= ~DREF_SSC_SOURCE_MASK;
9530         final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
9531         final &= ~DREF_SSC1_ENABLE;
9532
9533         if (has_panel) {
9534                 final |= DREF_SSC_SOURCE_ENABLE;
9535
9536                 if (intel_panel_use_ssc(dev_priv) && can_ssc)
9537                         final |= DREF_SSC1_ENABLE;
9538
9539                 if (has_cpu_edp) {
9540                         if (intel_panel_use_ssc(dev_priv) && can_ssc)
9541                                 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
9542                         else
9543                                 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
9544                 } else
9545                         final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
9546         } else if (using_ssc_source) {
9547                 final |= DREF_SSC_SOURCE_ENABLE;
9548                 final |= DREF_SSC1_ENABLE;
9549         }
9550
9551         if (final == val)
9552                 return;
9553
9554         /* Always enable nonspread source */
9555         val &= ~DREF_NONSPREAD_SOURCE_MASK;
9556
9557         if (has_ck505)
9558                 val |= DREF_NONSPREAD_CK505_ENABLE;
9559         else
9560                 val |= DREF_NONSPREAD_SOURCE_ENABLE;
9561
9562         if (has_panel) {
9563                 val &= ~DREF_SSC_SOURCE_MASK;
9564                 val |= DREF_SSC_SOURCE_ENABLE;
9565
9566                 /* SSC must be turned on before enabling the CPU output  */
9567                 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
9568                         drm_dbg_kms(&dev_priv->drm, "Using SSC on panel\n");
9569                         val |= DREF_SSC1_ENABLE;
9570                 } else
9571                         val &= ~DREF_SSC1_ENABLE;
9572
9573                 /* Get SSC going before enabling the outputs */
9574                 intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
9575                 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
9576                 udelay(200);
9577
9578                 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
9579
9580                 /* Enable CPU source on CPU attached eDP */
9581                 if (has_cpu_edp) {
9582                         if (intel_panel_use_ssc(dev_priv) && can_ssc) {
9583                                 drm_dbg_kms(&dev_priv->drm,
9584                                             "Using SSC on eDP\n");
9585                                 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
9586                         } else
9587                                 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
9588                 } else
9589                         val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
9590
9591                 intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
9592                 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
9593                 udelay(200);
9594         } else {
9595                 drm_dbg_kms(&dev_priv->drm, "Disabling CPU source output\n");
9596
9597                 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
9598
9599                 /* Turn off CPU output */
9600                 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
9601
9602                 intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
9603                 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
9604                 udelay(200);
9605
9606                 if (!using_ssc_source) {
9607                         drm_dbg_kms(&dev_priv->drm, "Disabling SSC source\n");
9608
9609                         /* Turn off the SSC source */
9610                         val &= ~DREF_SSC_SOURCE_MASK;
9611                         val |= DREF_SSC_SOURCE_DISABLE;
9612
9613                         /* Turn off SSC1 */
9614                         val &= ~DREF_SSC1_ENABLE;
9615
9616                         intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
9617                         intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
9618                         udelay(200);
9619                 }
9620         }
9621
9622         BUG_ON(val != final);
9623 }
9624
9625 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
9626 {
9627         u32 tmp;
9628
9629         tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
9630         tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
9631         intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
9632
9633         if (wait_for_us(intel_de_read(dev_priv, SOUTH_CHICKEN2) &
9634                         FDI_MPHY_IOSFSB_RESET_STATUS, 100))
9635                 drm_err(&dev_priv->drm, "FDI mPHY reset assert timeout\n");
9636
9637         tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
9638         tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
9639         intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
9640
9641         if (wait_for_us((intel_de_read(dev_priv, SOUTH_CHICKEN2) &
9642                          FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
9643                 drm_err(&dev_priv->drm, "FDI mPHY reset de-assert timeout\n");
9644 }
9645
9646 /* WaMPhyProgramming:hsw */
9647 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
9648 {
9649         u32 tmp;
9650
9651         tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
9652         tmp &= ~(0xFF << 24);
9653         tmp |= (0x12 << 24);
9654         intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
9655
9656         tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
9657         tmp |= (1 << 11);
9658         intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
9659
9660         tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
9661         tmp |= (1 << 11);
9662         intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
9663
9664         tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
9665         tmp |= (1 << 24) | (1 << 21) | (1 << 18);
9666         intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
9667
9668         tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
9669         tmp |= (1 << 24) | (1 << 21) | (1 << 18);
9670         intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
9671
9672         tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
9673         tmp &= ~(7 << 13);
9674         tmp |= (5 << 13);
9675         intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
9676
9677         tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
9678         tmp &= ~(7 << 13);
9679         tmp |= (5 << 13);
9680         intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
9681
9682         tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
9683         tmp &= ~0xFF;
9684         tmp |= 0x1C;
9685         intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
9686
9687         tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
9688         tmp &= ~0xFF;
9689         tmp |= 0x1C;
9690         intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
9691
9692         tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
9693         tmp &= ~(0xFF << 16);
9694         tmp |= (0x1C << 16);
9695         intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
9696
9697         tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
9698         tmp &= ~(0xFF << 16);
9699         tmp |= (0x1C << 16);
9700         intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
9701
9702         tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
9703         tmp |= (1 << 27);
9704         intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
9705
9706         tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
9707         tmp |= (1 << 27);
9708         intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
9709
9710         tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
9711         tmp &= ~(0xF << 28);
9712         tmp |= (4 << 28);
9713         intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
9714
9715         tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
9716         tmp &= ~(0xF << 28);
9717         tmp |= (4 << 28);
9718         intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
9719 }
9720
9721 /* Implements 3 different sequences from BSpec chapter "Display iCLK
9722  * Programming" based on the parameters passed:
9723  * - Sequence to enable CLKOUT_DP
9724  * - Sequence to enable CLKOUT_DP without spread
9725  * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
9726  */
9727 static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
9728                                  bool with_spread, bool with_fdi)
9729 {
9730         u32 reg, tmp;
9731
9732         if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
9733                 with_spread = true;
9734         if (WARN(HAS_PCH_LPT_LP(dev_priv) &&
9735             with_fdi, "LP PCH doesn't have FDI\n"))
9736                 with_fdi = false;
9737
9738         mutex_lock(&dev_priv->sb_lock);
9739
9740         tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
9741         tmp &= ~SBI_SSCCTL_DISABLE;
9742         tmp |= SBI_SSCCTL_PATHALT;
9743         intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9744
9745         udelay(24);
9746
9747         if (with_spread) {
9748                 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
9749                 tmp &= ~SBI_SSCCTL_PATHALT;
9750                 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9751
9752                 if (with_fdi) {
9753                         lpt_reset_fdi_mphy(dev_priv);
9754                         lpt_program_fdi_mphy(dev_priv);
9755                 }
9756         }
9757
9758         reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
9759         tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
9760         tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
9761         intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
9762
9763         mutex_unlock(&dev_priv->sb_lock);
9764 }
9765
9766 /* Sequence to disable CLKOUT_DP */
9767 void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
9768 {
9769         u32 reg, tmp;
9770
9771         mutex_lock(&dev_priv->sb_lock);
9772
9773         reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
9774         tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
9775         tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
9776         intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
9777
9778         tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
9779         if (!(tmp & SBI_SSCCTL_DISABLE)) {
9780                 if (!(tmp & SBI_SSCCTL_PATHALT)) {
9781                         tmp |= SBI_SSCCTL_PATHALT;
9782                         intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9783                         udelay(32);
9784                 }
9785                 tmp |= SBI_SSCCTL_DISABLE;
9786                 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9787         }
9788
9789         mutex_unlock(&dev_priv->sb_lock);
9790 }
9791
9792 #define BEND_IDX(steps) ((50 + (steps)) / 5)
9793
9794 static const u16 sscdivintphase[] = {
9795         [BEND_IDX( 50)] = 0x3B23,
9796         [BEND_IDX( 45)] = 0x3B23,
9797         [BEND_IDX( 40)] = 0x3C23,
9798         [BEND_IDX( 35)] = 0x3C23,
9799         [BEND_IDX( 30)] = 0x3D23,
9800         [BEND_IDX( 25)] = 0x3D23,
9801         [BEND_IDX( 20)] = 0x3E23,
9802         [BEND_IDX( 15)] = 0x3E23,
9803         [BEND_IDX( 10)] = 0x3F23,
9804         [BEND_IDX(  5)] = 0x3F23,
9805         [BEND_IDX(  0)] = 0x0025,
9806         [BEND_IDX( -5)] = 0x0025,
9807         [BEND_IDX(-10)] = 0x0125,
9808         [BEND_IDX(-15)] = 0x0125,
9809         [BEND_IDX(-20)] = 0x0225,
9810         [BEND_IDX(-25)] = 0x0225,
9811         [BEND_IDX(-30)] = 0x0325,
9812         [BEND_IDX(-35)] = 0x0325,
9813         [BEND_IDX(-40)] = 0x0425,
9814         [BEND_IDX(-45)] = 0x0425,
9815         [BEND_IDX(-50)] = 0x0525,
9816 };
9817
9818 /*
9819  * Bend CLKOUT_DP
9820  * steps -50 to 50 inclusive, in steps of 5
9821  * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
9822  * change in clock period = -(steps / 10) * 5.787 ps
9823  */
9824 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
9825 {
9826         u32 tmp;
9827         int idx = BEND_IDX(steps);
9828
9829         if (WARN_ON(steps % 5 != 0))
9830                 return;
9831
9832         if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase)))
9833                 return;
9834
9835         mutex_lock(&dev_priv->sb_lock);
9836
9837         if (steps % 10 != 0)
9838                 tmp = 0xAAAAAAAB;
9839         else
9840                 tmp = 0x00000000;
9841         intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
9842
9843         tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
9844         tmp &= 0xffff0000;
9845         tmp |= sscdivintphase[idx];
9846         intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
9847
9848         mutex_unlock(&dev_priv->sb_lock);
9849 }
9850
9851 #undef BEND_IDX
9852
9853 static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv)
9854 {
9855         u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
9856         u32 ctl = intel_de_read(dev_priv, SPLL_CTL);
9857
9858         if ((ctl & SPLL_PLL_ENABLE) == 0)
9859                 return false;
9860
9861         if ((ctl & SPLL_REF_MASK) == SPLL_REF_MUXED_SSC &&
9862             (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
9863                 return true;
9864
9865         if (IS_BROADWELL(dev_priv) &&
9866             (ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW)
9867                 return true;
9868
9869         return false;
9870 }
9871
9872 static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
9873                                enum intel_dpll_id id)
9874 {
9875         u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
9876         u32 ctl = intel_de_read(dev_priv, WRPLL_CTL(id));
9877
9878         if ((ctl & WRPLL_PLL_ENABLE) == 0)
9879                 return false;
9880
9881         if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC)
9882                 return true;
9883
9884         if ((IS_BROADWELL(dev_priv) || IS_HSW_ULT(dev_priv)) &&
9885             (ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW &&
9886             (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
9887                 return true;
9888
9889         return false;
9890 }
9891
9892 static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
9893 {
9894         struct intel_encoder *encoder;
9895         bool has_fdi = false;
9896
9897         for_each_intel_encoder(&dev_priv->drm, encoder) {
9898                 switch (encoder->type) {
9899                 case INTEL_OUTPUT_ANALOG:
9900                         has_fdi = true;
9901                         break;
9902                 default:
9903                         break;
9904                 }
9905         }
9906
9907         /*
9908          * The BIOS may have decided to use the PCH SSC
9909          * reference so we must not disable it until the
9910          * relevant PLLs have stopped relying on it. We'll
9911          * just leave the PCH SSC reference enabled in case
9912          * any active PLL is using it. It will get disabled
9913          * after runtime suspend if we don't have FDI.
9914          *
9915          * TODO: Move the whole reference clock handling
9916          * to the modeset sequence proper so that we can
9917          * actually enable/disable/reconfigure these things
9918          * safely. To do that we need to introduce a real
9919          * clock hierarchy. That would also allow us to do
9920          * clock bending finally.
9921          */
9922         dev_priv->pch_ssc_use = 0;
9923
9924         if (spll_uses_pch_ssc(dev_priv)) {
9925                 drm_dbg_kms(&dev_priv->drm, "SPLL using PCH SSC\n");
9926                 dev_priv->pch_ssc_use |= BIT(DPLL_ID_SPLL);
9927         }
9928
9929         if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) {
9930                 drm_dbg_kms(&dev_priv->drm, "WRPLL1 using PCH SSC\n");
9931                 dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL1);
9932         }
9933
9934         if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) {
9935                 drm_dbg_kms(&dev_priv->drm, "WRPLL2 using PCH SSC\n");
9936                 dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL2);
9937         }
9938
9939         if (dev_priv->pch_ssc_use)
9940                 return;
9941
9942         if (has_fdi) {
9943                 lpt_bend_clkout_dp(dev_priv, 0);
9944                 lpt_enable_clkout_dp(dev_priv, true, true);
9945         } else {
9946                 lpt_disable_clkout_dp(dev_priv);
9947         }
9948 }
9949
9950 /*
9951  * Initialize reference clocks when the driver loads
9952  */
9953 void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
9954 {
9955         if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
9956                 ilk_init_pch_refclk(dev_priv);
9957         else if (HAS_PCH_LPT(dev_priv))
9958                 lpt_init_pch_refclk(dev_priv);
9959 }
9960
9961 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
9962 {
9963         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
9964         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9965         enum pipe pipe = crtc->pipe;
9966         u32 val;
9967
9968         val = 0;
9969
9970         switch (crtc_state->pipe_bpp) {
9971         case 18:
9972                 val |= PIPECONF_6BPC;
9973                 break;
9974         case 24:
9975                 val |= PIPECONF_8BPC;
9976                 break;
9977         case 30:
9978                 val |= PIPECONF_10BPC;
9979                 break;
9980         case 36:
9981                 val |= PIPECONF_12BPC;
9982                 break;
9983         default:
9984                 /* Case prevented by intel_choose_pipe_bpp_dither. */
9985                 BUG();
9986         }
9987
9988         if (crtc_state->dither)
9989                 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
9990
9991         if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
9992                 val |= PIPECONF_INTERLACED_ILK;
9993         else
9994                 val |= PIPECONF_PROGRESSIVE;
9995
9996         /*
9997          * This would end up with an odd purple hue over
9998          * the entire display. Make sure we don't do it.
9999          */
10000         WARN_ON(crtc_state->limited_color_range &&
10001                 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
10002
10003         if (crtc_state->limited_color_range)
10004                 val |= PIPECONF_COLOR_RANGE_SELECT;
10005
10006         if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
10007                 val |= PIPECONF_OUTPUT_COLORSPACE_YUV709;
10008
10009         val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
10010
10011         val |= PIPECONF_FRAME_START_DELAY(0);
10012
10013         intel_de_write(dev_priv, PIPECONF(pipe), val);
10014         intel_de_posting_read(dev_priv, PIPECONF(pipe));
10015 }
10016
10017 static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state)
10018 {
10019         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
10020         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10021         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
10022         u32 val = 0;
10023
10024         if (IS_HASWELL(dev_priv) && crtc_state->dither)
10025                 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
10026
10027         if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
10028                 val |= PIPECONF_INTERLACED_ILK;
10029         else
10030                 val |= PIPECONF_PROGRESSIVE;
10031
10032         if (IS_HASWELL(dev_priv) &&
10033             crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
10034                 val |= PIPECONF_OUTPUT_COLORSPACE_YUV_HSW;
10035
10036         intel_de_write(dev_priv, PIPECONF(cpu_transcoder), val);
10037         intel_de_posting_read(dev_priv, PIPECONF(cpu_transcoder));
10038 }
10039
10040 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
10041 {
10042         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
10043         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10044         u32 val = 0;
10045
10046         switch (crtc_state->pipe_bpp) {
10047         case 18:
10048                 val |= PIPEMISC_DITHER_6_BPC;
10049                 break;
10050         case 24:
10051                 val |= PIPEMISC_DITHER_8_BPC;
10052                 break;
10053         case 30:
10054                 val |= PIPEMISC_DITHER_10_BPC;
10055                 break;
10056         case 36:
10057                 val |= PIPEMISC_DITHER_12_BPC;
10058                 break;
10059         default:
10060                 MISSING_CASE(crtc_state->pipe_bpp);
10061                 break;
10062         }
10063
10064         if (crtc_state->dither)
10065                 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
10066
10067         if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
10068             crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
10069                 val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
10070
10071         if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
10072                 val |= PIPEMISC_YUV420_ENABLE |
10073                         PIPEMISC_YUV420_MODE_FULL_BLEND;
10074
10075         if (INTEL_GEN(dev_priv) >= 11 &&
10076             (crtc_state->active_planes & ~(icl_hdr_plane_mask() |
10077                                            BIT(PLANE_CURSOR))) == 0)
10078                 val |= PIPEMISC_HDR_MODE_PRECISION;
10079
10080         intel_de_write(dev_priv, PIPEMISC(crtc->pipe), val);
10081 }
10082
10083 int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
10084 {
10085         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10086         u32 tmp;
10087
10088         tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
10089
10090         switch (tmp & PIPEMISC_DITHER_BPC_MASK) {
10091         case PIPEMISC_DITHER_6_BPC:
10092                 return 18;
10093         case PIPEMISC_DITHER_8_BPC:
10094                 return 24;
10095         case PIPEMISC_DITHER_10_BPC:
10096                 return 30;
10097         case PIPEMISC_DITHER_12_BPC:
10098                 return 36;
10099         default:
10100                 MISSING_CASE(tmp);
10101                 return 0;
10102         }
10103 }
10104
10105 int ilk_get_lanes_required(int target_clock, int link_bw, int bpp)
10106 {
10107         /*
10108          * Account for spread spectrum to avoid
10109          * oversubscribing the link. Max center spread
10110          * is 2.5%; use 5% for safety's sake.
10111          */
10112         u32 bps = target_clock * bpp * 21 / 20;
10113         return DIV_ROUND_UP(bps, link_bw * 8);
10114 }
10115
10116 static bool ilk_needs_fb_cb_tune(struct dpll *dpll, int factor)
10117 {
10118         return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
10119 }
10120
10121 static void ilk_compute_dpll(struct intel_crtc *crtc,
10122                              struct intel_crtc_state *crtc_state,
10123                              struct dpll *reduced_clock)
10124 {
10125         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10126         u32 dpll, fp, fp2;
10127         int factor;
10128
10129         /* Enable autotuning of the PLL clock (if permissible) */
10130         factor = 21;
10131         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
10132                 if ((intel_panel_use_ssc(dev_priv) &&
10133                      dev_priv->vbt.lvds_ssc_freq == 100000) ||
10134                     (HAS_PCH_IBX(dev_priv) &&
10135                      intel_is_dual_link_lvds(dev_priv)))
10136                         factor = 25;
10137         } else if (crtc_state->sdvo_tv_clock) {
10138                 factor = 20;
10139         }
10140
10141         fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
10142
10143         if (ilk_needs_fb_cb_tune(&crtc_state->dpll, factor))
10144                 fp |= FP_CB_TUNE;
10145
10146         if (reduced_clock) {
10147                 fp2 = i9xx_dpll_compute_fp(reduced_clock);
10148
10149                 if (reduced_clock->m < factor * reduced_clock->n)
10150                         fp2 |= FP_CB_TUNE;
10151         } else {
10152                 fp2 = fp;
10153         }
10154
10155         dpll = 0;
10156
10157         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
10158                 dpll |= DPLLB_MODE_LVDS;
10159         else
10160                 dpll |= DPLLB_MODE_DAC_SERIAL;
10161
10162         dpll |= (crtc_state->pixel_multiplier - 1)
10163                 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
10164
10165         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
10166             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
10167                 dpll |= DPLL_SDVO_HIGH_SPEED;
10168
10169         if (intel_crtc_has_dp_encoder(crtc_state))
10170                 dpll |= DPLL_SDVO_HIGH_SPEED;
10171
10172         /*
10173          * The high speed IO clock is only really required for
10174          * SDVO/HDMI/DP, but we also enable it for CRT to make it
10175          * possible to share the DPLL between CRT and HDMI. Enabling
10176          * the clock needlessly does no real harm, except use up a
10177          * bit of power potentially.
10178          *
10179          * We'll limit this to IVB with 3 pipes, since it has only two
10180          * DPLLs and so DPLL sharing is the only way to get three pipes
10181          * driving PCH ports at the same time. On SNB we could do this,
10182          * and potentially avoid enabling the second DPLL, but it's not
10183          * clear if it''s a win or loss power wise. No point in doing
10184          * this on ILK at all since it has a fixed DPLL<->pipe mapping.
10185          */
10186         if (INTEL_NUM_PIPES(dev_priv) == 3 &&
10187             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
10188                 dpll |= DPLL_SDVO_HIGH_SPEED;
10189
10190         /* compute bitmask from p1 value */
10191         dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
10192         /* also FPA1 */
10193         dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
10194
10195         switch (crtc_state->dpll.p2) {
10196         case 5:
10197                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
10198                 break;
10199         case 7:
10200                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
10201                 break;
10202         case 10:
10203                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
10204                 break;
10205         case 14:
10206                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
10207                 break;
10208         }
10209
10210         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
10211             intel_panel_use_ssc(dev_priv))
10212                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
10213         else
10214                 dpll |= PLL_REF_INPUT_DREFCLK;
10215
10216         dpll |= DPLL_VCO_ENABLE;
10217
10218         crtc_state->dpll_hw_state.dpll = dpll;
10219         crtc_state->dpll_hw_state.fp0 = fp;
10220         crtc_state->dpll_hw_state.fp1 = fp2;
10221 }
10222
10223 static int ilk_crtc_compute_clock(struct intel_crtc *crtc,
10224                                   struct intel_crtc_state *crtc_state)
10225 {
10226         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10227         struct intel_atomic_state *state =
10228                 to_intel_atomic_state(crtc_state->uapi.state);
10229         const struct intel_limit *limit;
10230         int refclk = 120000;
10231
10232         memset(&crtc_state->dpll_hw_state, 0,
10233                sizeof(crtc_state->dpll_hw_state));
10234
10235         /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
10236         if (!crtc_state->has_pch_encoder)
10237                 return 0;
10238
10239         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
10240                 if (intel_panel_use_ssc(dev_priv)) {
10241                         drm_dbg_kms(&dev_priv->drm,
10242                                     "using SSC reference clock of %d kHz\n",
10243                                     dev_priv->vbt.lvds_ssc_freq);
10244                         refclk = dev_priv->vbt.lvds_ssc_freq;
10245                 }
10246
10247                 if (intel_is_dual_link_lvds(dev_priv)) {
10248                         if (refclk == 100000)
10249                                 limit = &ilk_limits_dual_lvds_100m;
10250                         else
10251                                 limit = &ilk_limits_dual_lvds;
10252                 } else {
10253                         if (refclk == 100000)
10254                                 limit = &ilk_limits_single_lvds_100m;
10255                         else
10256                                 limit = &ilk_limits_single_lvds;
10257                 }
10258         } else {
10259                 limit = &ilk_limits_dac;
10260         }
10261
10262         if (!crtc_state->clock_set &&
10263             !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
10264                                 refclk, NULL, &crtc_state->dpll)) {
10265                 drm_err(&dev_priv->drm,
10266                         "Couldn't find PLL settings for mode!\n");
10267                 return -EINVAL;
10268         }
10269
10270         ilk_compute_dpll(crtc, crtc_state, NULL);
10271
10272         if (!intel_reserve_shared_dplls(state, crtc, NULL)) {
10273                 drm_dbg_kms(&dev_priv->drm,
10274                             "failed to find PLL for pipe %c\n",
10275                             pipe_name(crtc->pipe));
10276                 return -EINVAL;
10277         }
10278
10279         return 0;
10280 }
10281
10282 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
10283                                          struct intel_link_m_n *m_n)
10284 {
10285         struct drm_device *dev = crtc->base.dev;
10286         struct drm_i915_private *dev_priv = to_i915(dev);
10287         enum pipe pipe = crtc->pipe;
10288
10289         m_n->link_m = intel_de_read(dev_priv, PCH_TRANS_LINK_M1(pipe));
10290         m_n->link_n = intel_de_read(dev_priv, PCH_TRANS_LINK_N1(pipe));
10291         m_n->gmch_m = intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe))
10292                 & ~TU_SIZE_MASK;
10293         m_n->gmch_n = intel_de_read(dev_priv, PCH_TRANS_DATA_N1(pipe));
10294         m_n->tu = ((intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe))
10295                     & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
10296 }
10297
10298 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
10299                                          enum transcoder transcoder,
10300                                          struct intel_link_m_n *m_n,
10301                                          struct intel_link_m_n *m2_n2)
10302 {
10303         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10304         enum pipe pipe = crtc->pipe;
10305
10306         if (INTEL_GEN(dev_priv) >= 5) {
10307                 m_n->link_m = intel_de_read(dev_priv,
10308                                             PIPE_LINK_M1(transcoder));
10309                 m_n->link_n = intel_de_read(dev_priv,
10310                                             PIPE_LINK_N1(transcoder));
10311                 m_n->gmch_m = intel_de_read(dev_priv,
10312                                             PIPE_DATA_M1(transcoder))
10313                         & ~TU_SIZE_MASK;
10314                 m_n->gmch_n = intel_de_read(dev_priv,
10315                                             PIPE_DATA_N1(transcoder));
10316                 m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M1(transcoder))
10317                             & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
10318
10319                 if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) {
10320                         m2_n2->link_m = intel_de_read(dev_priv,
10321                                                       PIPE_LINK_M2(transcoder));
10322                         m2_n2->link_n = intel_de_read(dev_priv,
10323                                                              PIPE_LINK_N2(transcoder));
10324                         m2_n2->gmch_m = intel_de_read(dev_priv,
10325                                                              PIPE_DATA_M2(transcoder))
10326                                         & ~TU_SIZE_MASK;
10327                         m2_n2->gmch_n = intel_de_read(dev_priv,
10328                                                              PIPE_DATA_N2(transcoder));
10329                         m2_n2->tu = ((intel_de_read(dev_priv, PIPE_DATA_M2(transcoder))
10330                                         & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
10331                 }
10332         } else {
10333                 m_n->link_m = intel_de_read(dev_priv, PIPE_LINK_M_G4X(pipe));
10334                 m_n->link_n = intel_de_read(dev_priv, PIPE_LINK_N_G4X(pipe));
10335                 m_n->gmch_m = intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe))
10336                         & ~TU_SIZE_MASK;
10337                 m_n->gmch_n = intel_de_read(dev_priv, PIPE_DATA_N_G4X(pipe));
10338                 m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe))
10339                             & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
10340         }
10341 }
10342
10343 void intel_dp_get_m_n(struct intel_crtc *crtc,
10344                       struct intel_crtc_state *pipe_config)
10345 {
10346         if (pipe_config->has_pch_encoder)
10347                 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
10348         else
10349                 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
10350                                              &pipe_config->dp_m_n,
10351                                              &pipe_config->dp_m2_n2);
10352 }
10353
10354 static void ilk_get_fdi_m_n_config(struct intel_crtc *crtc,
10355                                    struct intel_crtc_state *pipe_config)
10356 {
10357         intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
10358                                      &pipe_config->fdi_m_n, NULL);
10359 }
10360
10361 static void skl_get_pfit_config(struct intel_crtc *crtc,
10362                                 struct intel_crtc_state *pipe_config)
10363 {
10364         struct drm_device *dev = crtc->base.dev;
10365         struct drm_i915_private *dev_priv = to_i915(dev);
10366         struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
10367         u32 ps_ctrl = 0;
10368         int id = -1;
10369         int i;
10370
10371         /* find scaler attached to this pipe */
10372         for (i = 0; i < crtc->num_scalers; i++) {
10373                 ps_ctrl = intel_de_read(dev_priv, SKL_PS_CTRL(crtc->pipe, i));
10374                 if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) {
10375                         id = i;
10376                         pipe_config->pch_pfit.enabled = true;
10377                         pipe_config->pch_pfit.pos = intel_de_read(dev_priv,
10378                                                                   SKL_PS_WIN_POS(crtc->pipe, i));
10379                         pipe_config->pch_pfit.size = intel_de_read(dev_priv,
10380                                                                    SKL_PS_WIN_SZ(crtc->pipe, i));
10381                         scaler_state->scalers[i].in_use = true;
10382                         break;
10383                 }
10384         }
10385
10386         scaler_state->scaler_id = id;
10387         if (id >= 0) {
10388                 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
10389         } else {
10390                 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
10391         }
10392 }
10393
10394 static void
10395 skl_get_initial_plane_config(struct intel_crtc *crtc,
10396                              struct intel_initial_plane_config *plane_config)
10397 {
10398         struct drm_device *dev = crtc->base.dev;
10399         struct drm_i915_private *dev_priv = to_i915(dev);
10400         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
10401         enum plane_id plane_id = plane->id;
10402         enum pipe pipe;
10403         u32 val, base, offset, stride_mult, tiling, alpha;
10404         int fourcc, pixel_format;
10405         unsigned int aligned_height;
10406         struct drm_framebuffer *fb;
10407         struct intel_framebuffer *intel_fb;
10408
10409         if (!plane->get_hw_state(plane, &pipe))
10410                 return;
10411
10412         WARN_ON(pipe != crtc->pipe);
10413
10414         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
10415         if (!intel_fb) {
10416                 drm_dbg_kms(&dev_priv->drm, "failed to alloc fb\n");
10417                 return;
10418         }
10419
10420         fb = &intel_fb->base;
10421
10422         fb->dev = dev;
10423
10424         val = intel_de_read(dev_priv, PLANE_CTL(pipe, plane_id));
10425
10426         if (INTEL_GEN(dev_priv) >= 11)
10427                 pixel_format = val & ICL_PLANE_CTL_FORMAT_MASK;
10428         else
10429                 pixel_format = val & PLANE_CTL_FORMAT_MASK;
10430
10431         if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
10432                 alpha = intel_de_read(dev_priv,
10433                                       PLANE_COLOR_CTL(pipe, plane_id));
10434                 alpha &= PLANE_COLOR_ALPHA_MASK;
10435         } else {
10436                 alpha = val & PLANE_CTL_ALPHA_MASK;
10437         }
10438
10439         fourcc = skl_format_to_fourcc(pixel_format,
10440                                       val & PLANE_CTL_ORDER_RGBX, alpha);
10441         fb->format = drm_format_info(fourcc);
10442
10443         tiling = val & PLANE_CTL_TILED_MASK;
10444         switch (tiling) {
10445         case PLANE_CTL_TILED_LINEAR:
10446                 fb->modifier = DRM_FORMAT_MOD_LINEAR;
10447                 break;
10448         case PLANE_CTL_TILED_X:
10449                 plane_config->tiling = I915_TILING_X;
10450                 fb->modifier = I915_FORMAT_MOD_X_TILED;
10451                 break;
10452         case PLANE_CTL_TILED_Y:
10453                 plane_config->tiling = I915_TILING_Y;
10454                 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
10455                         fb->modifier = INTEL_GEN(dev_priv) >= 12 ?
10456                                 I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS :
10457                                 I915_FORMAT_MOD_Y_TILED_CCS;
10458                 else if (val & PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE)
10459                         fb->modifier = I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS;
10460                 else
10461                         fb->modifier = I915_FORMAT_MOD_Y_TILED;
10462                 break;
10463         case PLANE_CTL_TILED_YF:
10464                 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
10465                         fb->modifier = I915_FORMAT_MOD_Yf_TILED_CCS;
10466                 else
10467                         fb->modifier = I915_FORMAT_MOD_Yf_TILED;
10468                 break;
10469         default:
10470                 MISSING_CASE(tiling);
10471                 goto error;
10472         }
10473
10474         /*
10475          * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
10476          * while i915 HW rotation is clockwise, thats why this swapping.
10477          */
10478         switch (val & PLANE_CTL_ROTATE_MASK) {
10479         case PLANE_CTL_ROTATE_0:
10480                 plane_config->rotation = DRM_MODE_ROTATE_0;
10481                 break;
10482         case PLANE_CTL_ROTATE_90:
10483                 plane_config->rotation = DRM_MODE_ROTATE_270;
10484                 break;
10485         case PLANE_CTL_ROTATE_180:
10486                 plane_config->rotation = DRM_MODE_ROTATE_180;
10487                 break;
10488         case PLANE_CTL_ROTATE_270:
10489                 plane_config->rotation = DRM_MODE_ROTATE_90;
10490                 break;
10491         }
10492
10493         if (INTEL_GEN(dev_priv) >= 10 &&
10494             val & PLANE_CTL_FLIP_HORIZONTAL)
10495                 plane_config->rotation |= DRM_MODE_REFLECT_X;
10496
10497         base = intel_de_read(dev_priv, PLANE_SURF(pipe, plane_id)) & 0xfffff000;
10498         plane_config->base = base;
10499
10500         offset = intel_de_read(dev_priv, PLANE_OFFSET(pipe, plane_id));
10501
10502         val = intel_de_read(dev_priv, PLANE_SIZE(pipe, plane_id));
10503         fb->height = ((val >> 16) & 0xffff) + 1;
10504         fb->width = ((val >> 0) & 0xffff) + 1;
10505
10506         val = intel_de_read(dev_priv, PLANE_STRIDE(pipe, plane_id));
10507         stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0);
10508         fb->pitches[0] = (val & 0x3ff) * stride_mult;
10509
10510         aligned_height = intel_fb_align_height(fb, 0, fb->height);
10511
10512         plane_config->size = fb->pitches[0] * aligned_height;
10513
10514         drm_dbg_kms(&dev_priv->drm,
10515                     "%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
10516                     crtc->base.name, plane->base.name, fb->width, fb->height,
10517                     fb->format->cpp[0] * 8, base, fb->pitches[0],
10518                     plane_config->size);
10519
10520         plane_config->fb = intel_fb;
10521         return;
10522
10523 error:
10524         kfree(intel_fb);
10525 }
10526
10527 static void ilk_get_pfit_config(struct intel_crtc *crtc,
10528                                 struct intel_crtc_state *pipe_config)
10529 {
10530         struct drm_device *dev = crtc->base.dev;
10531         struct drm_i915_private *dev_priv = to_i915(dev);
10532         u32 tmp;
10533
10534         tmp = intel_de_read(dev_priv, PF_CTL(crtc->pipe));
10535
10536         if (tmp & PF_ENABLE) {
10537                 pipe_config->pch_pfit.enabled = true;
10538                 pipe_config->pch_pfit.pos = intel_de_read(dev_priv,
10539                                                           PF_WIN_POS(crtc->pipe));
10540                 pipe_config->pch_pfit.size = intel_de_read(dev_priv,
10541                                                            PF_WIN_SZ(crtc->pipe));
10542
10543                 /* We currently do not free assignements of panel fitters on
10544                  * ivb/hsw (since we don't use the higher upscaling modes which
10545                  * differentiates them) so just WARN about this case for now. */
10546                 if (IS_GEN(dev_priv, 7)) {
10547                         WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
10548                                 PF_PIPE_SEL_IVB(crtc->pipe));
10549                 }
10550         }
10551 }
10552
10553 static bool ilk_get_pipe_config(struct intel_crtc *crtc,
10554                                 struct intel_crtc_state *pipe_config)
10555 {
10556         struct drm_device *dev = crtc->base.dev;
10557         struct drm_i915_private *dev_priv = to_i915(dev);
10558         enum intel_display_power_domain power_domain;
10559         intel_wakeref_t wakeref;
10560         u32 tmp;
10561         bool ret;
10562
10563         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
10564         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
10565         if (!wakeref)
10566                 return false;
10567
10568         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
10569         pipe_config->shared_dpll = NULL;
10570         pipe_config->master_transcoder = INVALID_TRANSCODER;
10571
10572         ret = false;
10573         tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
10574         if (!(tmp & PIPECONF_ENABLE))
10575                 goto out;
10576
10577         switch (tmp & PIPECONF_BPC_MASK) {
10578         case PIPECONF_6BPC:
10579                 pipe_config->pipe_bpp = 18;
10580                 break;
10581         case PIPECONF_8BPC:
10582                 pipe_config->pipe_bpp = 24;
10583                 break;
10584         case PIPECONF_10BPC:
10585                 pipe_config->pipe_bpp = 30;
10586                 break;
10587         case PIPECONF_12BPC:
10588                 pipe_config->pipe_bpp = 36;
10589                 break;
10590         default:
10591                 break;
10592         }
10593
10594         if (tmp & PIPECONF_COLOR_RANGE_SELECT)
10595                 pipe_config->limited_color_range = true;
10596
10597         switch (tmp & PIPECONF_OUTPUT_COLORSPACE_MASK) {
10598         case PIPECONF_OUTPUT_COLORSPACE_YUV601:
10599         case PIPECONF_OUTPUT_COLORSPACE_YUV709:
10600                 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
10601                 break;
10602         default:
10603                 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
10604                 break;
10605         }
10606
10607         pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >>
10608                 PIPECONF_GAMMA_MODE_SHIFT;
10609
10610         pipe_config->csc_mode = intel_de_read(dev_priv,
10611                                               PIPE_CSC_MODE(crtc->pipe));
10612
10613         i9xx_get_pipe_color_config(pipe_config);
10614         intel_color_get_config(pipe_config);
10615
10616         if (intel_de_read(dev_priv, PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
10617                 struct intel_shared_dpll *pll;
10618                 enum intel_dpll_id pll_id;
10619
10620                 pipe_config->has_pch_encoder = true;
10621
10622                 tmp = intel_de_read(dev_priv, FDI_RX_CTL(crtc->pipe));
10623                 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
10624                                           FDI_DP_PORT_WIDTH_SHIFT) + 1;
10625
10626                 ilk_get_fdi_m_n_config(crtc, pipe_config);
10627
10628                 if (HAS_PCH_IBX(dev_priv)) {
10629                         /*
10630                          * The pipe->pch transcoder and pch transcoder->pll
10631                          * mapping is fixed.
10632                          */
10633                         pll_id = (enum intel_dpll_id) crtc->pipe;
10634                 } else {
10635                         tmp = intel_de_read(dev_priv, PCH_DPLL_SEL);
10636                         if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
10637                                 pll_id = DPLL_ID_PCH_PLL_B;
10638                         else
10639                                 pll_id= DPLL_ID_PCH_PLL_A;
10640                 }
10641
10642                 pipe_config->shared_dpll =
10643                         intel_get_shared_dpll_by_id(dev_priv, pll_id);
10644                 pll = pipe_config->shared_dpll;
10645
10646                 WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
10647                                                 &pipe_config->dpll_hw_state));
10648
10649                 tmp = pipe_config->dpll_hw_state.dpll;
10650                 pipe_config->pixel_multiplier =
10651                         ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
10652                          >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
10653
10654                 ilk_pch_clock_get(crtc, pipe_config);
10655         } else {
10656                 pipe_config->pixel_multiplier = 1;
10657         }
10658
10659         intel_get_pipe_timings(crtc, pipe_config);
10660         intel_get_pipe_src_size(crtc, pipe_config);
10661
10662         ilk_get_pfit_config(crtc, pipe_config);
10663
10664         ret = true;
10665
10666 out:
10667         intel_display_power_put(dev_priv, power_domain, wakeref);
10668
10669         return ret;
10670 }
10671
10672 static int hsw_crtc_compute_clock(struct intel_crtc *crtc,
10673                                   struct intel_crtc_state *crtc_state)
10674 {
10675         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10676         struct intel_atomic_state *state =
10677                 to_intel_atomic_state(crtc_state->uapi.state);
10678
10679         if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) ||
10680             INTEL_GEN(dev_priv) >= 11) {
10681                 struct intel_encoder *encoder =
10682                         intel_get_crtc_new_encoder(state, crtc_state);
10683
10684                 if (!intel_reserve_shared_dplls(state, crtc, encoder)) {
10685                         drm_dbg_kms(&dev_priv->drm,
10686                                     "failed to find PLL for pipe %c\n",
10687                                     pipe_name(crtc->pipe));
10688                         return -EINVAL;
10689                 }
10690         }
10691
10692         return 0;
10693 }
10694
10695 static void cnl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
10696                             struct intel_crtc_state *pipe_config)
10697 {
10698         enum intel_dpll_id id;
10699         u32 temp;
10700
10701         temp = intel_de_read(dev_priv, DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
10702         id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
10703
10704         if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL2))
10705                 return;
10706
10707         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
10708 }
10709
10710 static void icl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
10711                             struct intel_crtc_state *pipe_config)
10712 {
10713         enum phy phy = intel_port_to_phy(dev_priv, port);
10714         enum icl_port_dpll_id port_dpll_id;
10715         enum intel_dpll_id id;
10716         u32 temp;
10717
10718         if (intel_phy_is_combo(dev_priv, phy)) {
10719                 temp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0) &
10720                         ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
10721                 id = temp >> ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy);
10722                 port_dpll_id = ICL_PORT_DPLL_DEFAULT;
10723         } else if (intel_phy_is_tc(dev_priv, phy)) {
10724                 u32 clk_sel = intel_de_read(dev_priv, DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK;
10725
10726                 if (clk_sel == DDI_CLK_SEL_MG) {
10727                         id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
10728                                                                     port));
10729                         port_dpll_id = ICL_PORT_DPLL_MG_PHY;
10730                 } else {
10731                         WARN_ON(clk_sel < DDI_CLK_SEL_TBT_162);
10732                         id = DPLL_ID_ICL_TBTPLL;
10733                         port_dpll_id = ICL_PORT_DPLL_DEFAULT;
10734                 }
10735         } else {
10736                 WARN(1, "Invalid port %x\n", port);
10737                 return;
10738         }
10739
10740         pipe_config->icl_port_dplls[port_dpll_id].pll =
10741                 intel_get_shared_dpll_by_id(dev_priv, id);
10742
10743         icl_set_active_port_dpll(pipe_config, port_dpll_id);
10744 }
10745
10746 static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
10747                                 enum port port,
10748                                 struct intel_crtc_state *pipe_config)
10749 {
10750         enum intel_dpll_id id;
10751
10752         switch (port) {
10753         case PORT_A:
10754                 id = DPLL_ID_SKL_DPLL0;
10755                 break;
10756         case PORT_B:
10757                 id = DPLL_ID_SKL_DPLL1;
10758                 break;
10759         case PORT_C:
10760                 id = DPLL_ID_SKL_DPLL2;
10761                 break;
10762         default:
10763                 drm_err(&dev_priv->drm, "Incorrect port type\n");
10764                 return;
10765         }
10766
10767         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
10768 }
10769
10770 static void skl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
10771                             struct intel_crtc_state *pipe_config)
10772 {
10773         enum intel_dpll_id id;
10774         u32 temp;
10775
10776         temp = intel_de_read(dev_priv, DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
10777         id = temp >> (port * 3 + 1);
10778
10779         if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL3))
10780                 return;
10781
10782         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
10783 }
10784
10785 static void hsw_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
10786                             struct intel_crtc_state *pipe_config)
10787 {
10788         enum intel_dpll_id id;
10789         u32 ddi_pll_sel = intel_de_read(dev_priv, PORT_CLK_SEL(port));
10790
10791         switch (ddi_pll_sel) {
10792         case PORT_CLK_SEL_WRPLL1:
10793                 id = DPLL_ID_WRPLL1;
10794                 break;
10795         case PORT_CLK_SEL_WRPLL2:
10796                 id = DPLL_ID_WRPLL2;
10797                 break;
10798         case PORT_CLK_SEL_SPLL:
10799                 id = DPLL_ID_SPLL;
10800                 break;
10801         case PORT_CLK_SEL_LCPLL_810:
10802                 id = DPLL_ID_LCPLL_810;
10803                 break;
10804         case PORT_CLK_SEL_LCPLL_1350:
10805                 id = DPLL_ID_LCPLL_1350;
10806                 break;
10807         case PORT_CLK_SEL_LCPLL_2700:
10808                 id = DPLL_ID_LCPLL_2700;
10809                 break;
10810         default:
10811                 MISSING_CASE(ddi_pll_sel);
10812                 /* fall through */
10813         case PORT_CLK_SEL_NONE:
10814                 return;
10815         }
10816
10817         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
10818 }
10819
10820 static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
10821                                      struct intel_crtc_state *pipe_config,
10822                                      u64 *power_domain_mask,
10823                                      intel_wakeref_t *wakerefs)
10824 {
10825         struct drm_device *dev = crtc->base.dev;
10826         struct drm_i915_private *dev_priv = to_i915(dev);
10827         enum intel_display_power_domain power_domain;
10828         unsigned long panel_transcoder_mask = 0;
10829         unsigned long enabled_panel_transcoders = 0;
10830         enum transcoder panel_transcoder;
10831         intel_wakeref_t wf;
10832         u32 tmp;
10833
10834         if (INTEL_GEN(dev_priv) >= 11)
10835                 panel_transcoder_mask |=
10836                         BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
10837
10838         if (HAS_TRANSCODER_EDP(dev_priv))
10839                 panel_transcoder_mask |= BIT(TRANSCODER_EDP);
10840
10841         /*
10842          * The pipe->transcoder mapping is fixed with the exception of the eDP
10843          * and DSI transcoders handled below.
10844          */
10845         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
10846
10847         /*
10848          * XXX: Do intel_display_power_get_if_enabled before reading this (for
10849          * consistency and less surprising code; it's in always on power).
10850          */
10851         for_each_set_bit(panel_transcoder,
10852                          &panel_transcoder_mask,
10853                          ARRAY_SIZE(INTEL_INFO(dev_priv)->trans_offsets)) {
10854                 bool force_thru = false;
10855                 enum pipe trans_pipe;
10856
10857                 tmp = intel_de_read(dev_priv,
10858                                     TRANS_DDI_FUNC_CTL(panel_transcoder));
10859                 if (!(tmp & TRANS_DDI_FUNC_ENABLE))
10860                         continue;
10861
10862                 /*
10863                  * Log all enabled ones, only use the first one.
10864                  *
10865                  * FIXME: This won't work for two separate DSI displays.
10866                  */
10867                 enabled_panel_transcoders |= BIT(panel_transcoder);
10868                 if (enabled_panel_transcoders != BIT(panel_transcoder))
10869                         continue;
10870
10871                 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
10872                 default:
10873                         WARN(1, "unknown pipe linked to transcoder %s\n",
10874                              transcoder_name(panel_transcoder));
10875                         /* fall through */
10876                 case TRANS_DDI_EDP_INPUT_A_ONOFF:
10877                         force_thru = true;
10878                         /* fall through */
10879                 case TRANS_DDI_EDP_INPUT_A_ON:
10880                         trans_pipe = PIPE_A;
10881                         break;
10882                 case TRANS_DDI_EDP_INPUT_B_ONOFF:
10883                         trans_pipe = PIPE_B;
10884                         break;
10885                 case TRANS_DDI_EDP_INPUT_C_ONOFF:
10886                         trans_pipe = PIPE_C;
10887                         break;
10888                 case TRANS_DDI_EDP_INPUT_D_ONOFF:
10889                         trans_pipe = PIPE_D;
10890                         break;
10891                 }
10892
10893                 if (trans_pipe == crtc->pipe) {
10894                         pipe_config->cpu_transcoder = panel_transcoder;
10895                         pipe_config->pch_pfit.force_thru = force_thru;
10896                 }
10897         }
10898
10899         /*
10900          * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1
10901          */
10902         WARN_ON((enabled_panel_transcoders & BIT(TRANSCODER_EDP)) &&
10903                 enabled_panel_transcoders != BIT(TRANSCODER_EDP));
10904
10905         power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
10906         WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
10907
10908         wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
10909         if (!wf)
10910                 return false;
10911
10912         wakerefs[power_domain] = wf;
10913         *power_domain_mask |= BIT_ULL(power_domain);
10914
10915         tmp = intel_de_read(dev_priv, PIPECONF(pipe_config->cpu_transcoder));
10916
10917         return tmp & PIPECONF_ENABLE;
10918 }
10919
10920 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
10921                                          struct intel_crtc_state *pipe_config,
10922                                          u64 *power_domain_mask,
10923                                          intel_wakeref_t *wakerefs)
10924 {
10925         struct drm_device *dev = crtc->base.dev;
10926         struct drm_i915_private *dev_priv = to_i915(dev);
10927         enum intel_display_power_domain power_domain;
10928         enum transcoder cpu_transcoder;
10929         intel_wakeref_t wf;
10930         enum port port;
10931         u32 tmp;
10932
10933         for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
10934                 if (port == PORT_A)
10935                         cpu_transcoder = TRANSCODER_DSI_A;
10936                 else
10937                         cpu_transcoder = TRANSCODER_DSI_C;
10938
10939                 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
10940                 WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
10941
10942                 wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
10943                 if (!wf)
10944                         continue;
10945
10946                 wakerefs[power_domain] = wf;
10947                 *power_domain_mask |= BIT_ULL(power_domain);
10948
10949                 /*
10950                  * The PLL needs to be enabled with a valid divider
10951                  * configuration, otherwise accessing DSI registers will hang
10952                  * the machine. See BSpec North Display Engine
10953                  * registers/MIPI[BXT]. We can break out here early, since we
10954                  * need the same DSI PLL to be enabled for both DSI ports.
10955                  */
10956                 if (!bxt_dsi_pll_is_enabled(dev_priv))
10957                         break;
10958
10959                 /* XXX: this works for video mode only */
10960                 tmp = intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port));
10961                 if (!(tmp & DPI_ENABLE))
10962                         continue;
10963
10964                 tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
10965                 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
10966                         continue;
10967
10968                 pipe_config->cpu_transcoder = cpu_transcoder;
10969                 break;
10970         }
10971
10972         return transcoder_is_dsi(pipe_config->cpu_transcoder);
10973 }
10974
10975 static void hsw_get_ddi_port_state(struct intel_crtc *crtc,
10976                                    struct intel_crtc_state *pipe_config)
10977 {
10978         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10979         enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
10980         struct intel_shared_dpll *pll;
10981         enum port port;
10982         u32 tmp;
10983
10984         if (transcoder_is_dsi(cpu_transcoder)) {
10985                 port = (cpu_transcoder == TRANSCODER_DSI_A) ?
10986                                                 PORT_A : PORT_B;
10987         } else {
10988                 tmp = intel_de_read(dev_priv,
10989                                     TRANS_DDI_FUNC_CTL(cpu_transcoder));
10990                 if (INTEL_GEN(dev_priv) >= 12)
10991                         port = TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
10992                 else
10993                         port = TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
10994         }
10995
10996         if (INTEL_GEN(dev_priv) >= 11)
10997                 icl_get_ddi_pll(dev_priv, port, pipe_config);
10998         else if (IS_CANNONLAKE(dev_priv))
10999                 cnl_get_ddi_pll(dev_priv, port, pipe_config);
11000         else if (IS_GEN9_BC(dev_priv))
11001                 skl_get_ddi_pll(dev_priv, port, pipe_config);
11002         else if (IS_GEN9_LP(dev_priv))
11003                 bxt_get_ddi_pll(dev_priv, port, pipe_config);
11004         else
11005                 hsw_get_ddi_pll(dev_priv, port, pipe_config);
11006
11007         pll = pipe_config->shared_dpll;
11008         if (pll) {
11009                 WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
11010                                                 &pipe_config->dpll_hw_state));
11011         }
11012
11013         /*
11014          * Haswell has only FDI/PCH transcoder A. It is which is connected to
11015          * DDI E. So just check whether this pipe is wired to DDI E and whether
11016          * the PCH transcoder is on.
11017          */
11018         if (INTEL_GEN(dev_priv) < 9 &&
11019             (port == PORT_E) && intel_de_read(dev_priv, LPT_TRANSCONF) & TRANS_ENABLE) {
11020                 pipe_config->has_pch_encoder = true;
11021
11022                 tmp = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
11023                 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
11024                                           FDI_DP_PORT_WIDTH_SHIFT) + 1;
11025
11026                 ilk_get_fdi_m_n_config(crtc, pipe_config);
11027         }
11028 }
11029
11030 static enum transcoder transcoder_master_readout(struct drm_i915_private *dev_priv,
11031                                                  enum transcoder cpu_transcoder)
11032 {
11033         u32 trans_port_sync, master_select;
11034
11035         trans_port_sync = intel_de_read(dev_priv,
11036                                         TRANS_DDI_FUNC_CTL2(cpu_transcoder));
11037
11038         if ((trans_port_sync & PORT_SYNC_MODE_ENABLE) == 0)
11039                 return INVALID_TRANSCODER;
11040
11041         master_select = trans_port_sync &
11042                         PORT_SYNC_MODE_MASTER_SELECT_MASK;
11043         if (master_select == 0)
11044                 return TRANSCODER_EDP;
11045         else
11046                 return master_select - 1;
11047 }
11048
11049 static void icl_get_trans_port_sync_config(struct intel_crtc_state *crtc_state)
11050 {
11051         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
11052         u32 transcoders;
11053         enum transcoder cpu_transcoder;
11054
11055         crtc_state->master_transcoder = transcoder_master_readout(dev_priv,
11056                                                                   crtc_state->cpu_transcoder);
11057
11058         transcoders = BIT(TRANSCODER_A) |
11059                 BIT(TRANSCODER_B) |
11060                 BIT(TRANSCODER_C) |
11061                 BIT(TRANSCODER_D);
11062         for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
11063                 enum intel_display_power_domain power_domain;
11064                 intel_wakeref_t trans_wakeref;
11065
11066                 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
11067                 trans_wakeref = intel_display_power_get_if_enabled(dev_priv,
11068                                                                    power_domain);
11069
11070                 if (!trans_wakeref)
11071                         continue;
11072
11073                 if (transcoder_master_readout(dev_priv, cpu_transcoder) ==
11074                     crtc_state->cpu_transcoder)
11075                         crtc_state->sync_mode_slaves_mask |= BIT(cpu_transcoder);
11076
11077                 intel_display_power_put(dev_priv, power_domain, trans_wakeref);
11078         }
11079
11080         WARN_ON(crtc_state->master_transcoder != INVALID_TRANSCODER &&
11081                 crtc_state->sync_mode_slaves_mask);
11082 }
11083
11084 static bool hsw_get_pipe_config(struct intel_crtc *crtc,
11085                                 struct intel_crtc_state *pipe_config)
11086 {
11087         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11088         intel_wakeref_t wakerefs[POWER_DOMAIN_NUM], wf;
11089         enum intel_display_power_domain power_domain;
11090         u64 power_domain_mask;
11091         bool active;
11092         u32 tmp;
11093
11094         pipe_config->master_transcoder = INVALID_TRANSCODER;
11095
11096         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
11097         wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
11098         if (!wf)
11099                 return false;
11100
11101         wakerefs[power_domain] = wf;
11102         power_domain_mask = BIT_ULL(power_domain);
11103
11104         pipe_config->shared_dpll = NULL;
11105
11106         active = hsw_get_transcoder_state(crtc, pipe_config,
11107                                           &power_domain_mask, wakerefs);
11108
11109         if (IS_GEN9_LP(dev_priv) &&
11110             bxt_get_dsi_transcoder_state(crtc, pipe_config,
11111                                          &power_domain_mask, wakerefs)) {
11112                 WARN_ON(active);
11113                 active = true;
11114         }
11115
11116         if (!active)
11117                 goto out;
11118
11119         if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
11120             INTEL_GEN(dev_priv) >= 11) {
11121                 hsw_get_ddi_port_state(crtc, pipe_config);
11122                 intel_get_pipe_timings(crtc, pipe_config);
11123         }
11124
11125         intel_get_pipe_src_size(crtc, pipe_config);
11126
11127         if (IS_HASWELL(dev_priv)) {
11128                 u32 tmp = intel_de_read(dev_priv,
11129                                         PIPECONF(pipe_config->cpu_transcoder));
11130
11131                 if (tmp & PIPECONF_OUTPUT_COLORSPACE_YUV_HSW)
11132                         pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
11133                 else
11134                         pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
11135         } else {
11136                 pipe_config->output_format =
11137                         bdw_get_pipemisc_output_format(crtc);
11138
11139                 /*
11140                  * Currently there is no interface defined to
11141                  * check user preference between RGB/YCBCR444
11142                  * or YCBCR420. So the only possible case for
11143                  * YCBCR444 usage is driving YCBCR420 output
11144                  * with LSPCON, when pipe is configured for
11145                  * YCBCR444 output and LSPCON takes care of
11146                  * downsampling it.
11147                  */
11148                 pipe_config->lspcon_downsampling =
11149                         pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444;
11150         }
11151
11152         pipe_config->gamma_mode = intel_de_read(dev_priv,
11153                                                 GAMMA_MODE(crtc->pipe));
11154
11155         pipe_config->csc_mode = intel_de_read(dev_priv,
11156                                               PIPE_CSC_MODE(crtc->pipe));
11157
11158         if (INTEL_GEN(dev_priv) >= 9) {
11159                 tmp = intel_de_read(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe));
11160
11161                 if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
11162                         pipe_config->gamma_enable = true;
11163
11164                 if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE)
11165                         pipe_config->csc_enable = true;
11166         } else {
11167                 i9xx_get_pipe_color_config(pipe_config);
11168         }
11169
11170         intel_color_get_config(pipe_config);
11171
11172         tmp = intel_de_read(dev_priv, WM_LINETIME(crtc->pipe));
11173         pipe_config->linetime = REG_FIELD_GET(HSW_LINETIME_MASK, tmp);
11174         if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
11175                 pipe_config->ips_linetime =
11176                         REG_FIELD_GET(HSW_IPS_LINETIME_MASK, tmp);
11177
11178         power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
11179         WARN_ON(power_domain_mask & BIT_ULL(power_domain));
11180
11181         wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
11182         if (wf) {
11183                 wakerefs[power_domain] = wf;
11184                 power_domain_mask |= BIT_ULL(power_domain);
11185
11186                 if (INTEL_GEN(dev_priv) >= 9)
11187                         skl_get_pfit_config(crtc, pipe_config);
11188                 else
11189                         ilk_get_pfit_config(crtc, pipe_config);
11190         }
11191
11192         if (hsw_crtc_supports_ips(crtc)) {
11193                 if (IS_HASWELL(dev_priv))
11194                         pipe_config->ips_enabled = intel_de_read(dev_priv,
11195                                                                  IPS_CTL) & IPS_ENABLE;
11196                 else {
11197                         /*
11198                          * We cannot readout IPS state on broadwell, set to
11199                          * true so we can set it to a defined state on first
11200                          * commit.
11201                          */
11202                         pipe_config->ips_enabled = true;
11203                 }
11204         }
11205
11206         if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
11207             !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
11208                 pipe_config->pixel_multiplier =
11209                         intel_de_read(dev_priv,
11210                                       PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
11211         } else {
11212                 pipe_config->pixel_multiplier = 1;
11213         }
11214
11215         if (INTEL_GEN(dev_priv) >= 11 &&
11216             !transcoder_is_dsi(pipe_config->cpu_transcoder))
11217                 icl_get_trans_port_sync_config(pipe_config);
11218
11219 out:
11220         for_each_power_domain(power_domain, power_domain_mask)
11221                 intel_display_power_put(dev_priv,
11222                                         power_domain, wakerefs[power_domain]);
11223
11224         return active;
11225 }
11226
11227 static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
11228 {
11229         struct drm_i915_private *dev_priv =
11230                 to_i915(plane_state->uapi.plane->dev);
11231         const struct drm_framebuffer *fb = plane_state->hw.fb;
11232         const struct drm_i915_gem_object *obj = intel_fb_obj(fb);
11233         u32 base;
11234
11235         if (INTEL_INFO(dev_priv)->display.cursor_needs_physical)
11236                 base = obj->phys_handle->busaddr;
11237         else
11238                 base = intel_plane_ggtt_offset(plane_state);
11239
11240         return base + plane_state->color_plane[0].offset;
11241 }
11242
11243 static u32 intel_cursor_position(const struct intel_plane_state *plane_state)
11244 {
11245         int x = plane_state->uapi.dst.x1;
11246         int y = plane_state->uapi.dst.y1;
11247         u32 pos = 0;
11248
11249         if (x < 0) {
11250                 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
11251                 x = -x;
11252         }
11253         pos |= x << CURSOR_X_SHIFT;
11254
11255         if (y < 0) {
11256                 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
11257                 y = -y;
11258         }
11259         pos |= y << CURSOR_Y_SHIFT;
11260
11261         return pos;
11262 }
11263
11264 static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state)
11265 {
11266         const struct drm_mode_config *config =
11267                 &plane_state->uapi.plane->dev->mode_config;
11268         int width = drm_rect_width(&plane_state->uapi.dst);
11269         int height = drm_rect_height(&plane_state->uapi.dst);
11270
11271         return width > 0 && width <= config->cursor_width &&
11272                 height > 0 && height <= config->cursor_height;
11273 }
11274
11275 static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
11276 {
11277         struct drm_i915_private *dev_priv =
11278                 to_i915(plane_state->uapi.plane->dev);
11279         unsigned int rotation = plane_state->hw.rotation;
11280         int src_x, src_y;
11281         u32 offset;
11282         int ret;
11283
11284         ret = intel_plane_compute_gtt(plane_state);
11285         if (ret)
11286                 return ret;
11287
11288         if (!plane_state->uapi.visible)
11289                 return 0;
11290
11291         src_x = plane_state->uapi.src.x1 >> 16;
11292         src_y = plane_state->uapi.src.y1 >> 16;
11293
11294         intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
11295         offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
11296                                                     plane_state, 0);
11297
11298         if (src_x != 0 || src_y != 0) {
11299                 drm_dbg_kms(&dev_priv->drm,
11300                             "Arbitrary cursor panning not supported\n");
11301                 return -EINVAL;
11302         }
11303
11304         /*
11305          * Put the final coordinates back so that the src
11306          * coordinate checks will see the right values.
11307          */
11308         drm_rect_translate_to(&plane_state->uapi.src,
11309                               src_x << 16, src_y << 16);
11310
11311         /* ILK+ do this automagically in hardware */
11312         if (HAS_GMCH(dev_priv) && rotation & DRM_MODE_ROTATE_180) {
11313                 const struct drm_framebuffer *fb = plane_state->hw.fb;
11314                 int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
11315                 int src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
11316
11317                 offset += (src_h * src_w - 1) * fb->format->cpp[0];
11318         }
11319
11320         plane_state->color_plane[0].offset = offset;
11321         plane_state->color_plane[0].x = src_x;
11322         plane_state->color_plane[0].y = src_y;
11323
11324         return 0;
11325 }
11326
11327 static int intel_check_cursor(struct intel_crtc_state *crtc_state,
11328                               struct intel_plane_state *plane_state)
11329 {
11330         const struct drm_framebuffer *fb = plane_state->hw.fb;
11331         struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
11332         int ret;
11333
11334         if (fb && fb->modifier != DRM_FORMAT_MOD_LINEAR) {
11335                 drm_dbg_kms(&i915->drm, "cursor cannot be tiled\n");
11336                 return -EINVAL;
11337         }
11338
11339         ret = drm_atomic_helper_check_plane_state(&plane_state->uapi,
11340                                                   &crtc_state->uapi,
11341                                                   DRM_PLANE_HELPER_NO_SCALING,
11342                                                   DRM_PLANE_HELPER_NO_SCALING,
11343                                                   true, true);
11344         if (ret)
11345                 return ret;
11346
11347         /* Use the unclipped src/dst rectangles, which we program to hw */
11348         plane_state->uapi.src = drm_plane_state_src(&plane_state->uapi);
11349         plane_state->uapi.dst = drm_plane_state_dest(&plane_state->uapi);
11350
11351         ret = intel_cursor_check_surface(plane_state);
11352         if (ret)
11353                 return ret;
11354
11355         if (!plane_state->uapi.visible)
11356                 return 0;
11357
11358         ret = intel_plane_check_src_coordinates(plane_state);
11359         if (ret)
11360                 return ret;
11361
11362         return 0;
11363 }
11364
11365 static unsigned int
11366 i845_cursor_max_stride(struct intel_plane *plane,
11367                        u32 pixel_format, u64 modifier,
11368                        unsigned int rotation)
11369 {
11370         return 2048;
11371 }
11372
11373 static u32 i845_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
11374 {
11375         u32 cntl = 0;
11376
11377         if (crtc_state->gamma_enable)
11378                 cntl |= CURSOR_GAMMA_ENABLE;
11379
11380         return cntl;
11381 }
11382
11383 static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
11384                            const struct intel_plane_state *plane_state)
11385 {
11386         return CURSOR_ENABLE |
11387                 CURSOR_FORMAT_ARGB |
11388                 CURSOR_STRIDE(plane_state->color_plane[0].stride);
11389 }
11390
11391 static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state)
11392 {
11393         int width = drm_rect_width(&plane_state->uapi.dst);
11394
11395         /*
11396          * 845g/865g are only limited by the width of their cursors,
11397          * the height is arbitrary up to the precision of the register.
11398          */
11399         return intel_cursor_size_ok(plane_state) && IS_ALIGNED(width, 64);
11400 }
11401
11402 static int i845_check_cursor(struct intel_crtc_state *crtc_state,
11403                              struct intel_plane_state *plane_state)
11404 {
11405         const struct drm_framebuffer *fb = plane_state->hw.fb;
11406         struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
11407         int ret;
11408
11409         ret = intel_check_cursor(crtc_state, plane_state);
11410         if (ret)
11411                 return ret;
11412
11413         /* if we want to turn off the cursor ignore width and height */
11414         if (!fb)
11415                 return 0;
11416
11417         /* Check for which cursor types we support */
11418         if (!i845_cursor_size_ok(plane_state)) {
11419                 drm_dbg_kms(&i915->drm,
11420                             "Cursor dimension %dx%d not supported\n",
11421                             drm_rect_width(&plane_state->uapi.dst),
11422                             drm_rect_height(&plane_state->uapi.dst));
11423                 return -EINVAL;
11424         }
11425
11426         WARN_ON(plane_state->uapi.visible &&
11427                 plane_state->color_plane[0].stride != fb->pitches[0]);
11428
11429         switch (fb->pitches[0]) {
11430         case 256:
11431         case 512:
11432         case 1024:
11433         case 2048:
11434                 break;
11435         default:
11436                  drm_dbg_kms(&i915->drm, "Invalid cursor stride (%u)\n",
11437                              fb->pitches[0]);
11438                 return -EINVAL;
11439         }
11440
11441         plane_state->ctl = i845_cursor_ctl(crtc_state, plane_state);
11442
11443         return 0;
11444 }
11445
11446 static void i845_update_cursor(struct intel_plane *plane,
11447                                const struct intel_crtc_state *crtc_state,
11448                                const struct intel_plane_state *plane_state)
11449 {
11450         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11451         u32 cntl = 0, base = 0, pos = 0, size = 0;
11452         unsigned long irqflags;
11453
11454         if (plane_state && plane_state->uapi.visible) {
11455                 unsigned int width = drm_rect_width(&plane_state->uapi.dst);
11456                 unsigned int height = drm_rect_height(&plane_state->uapi.dst);
11457
11458                 cntl = plane_state->ctl |
11459                         i845_cursor_ctl_crtc(crtc_state);
11460
11461                 size = (height << 12) | width;
11462
11463                 base = intel_cursor_base(plane_state);
11464                 pos = intel_cursor_position(plane_state);
11465         }
11466
11467         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
11468
11469         /* On these chipsets we can only modify the base/size/stride
11470          * whilst the cursor is disabled.
11471          */
11472         if (plane->cursor.base != base ||
11473             plane->cursor.size != size ||
11474             plane->cursor.cntl != cntl) {
11475                 intel_de_write_fw(dev_priv, CURCNTR(PIPE_A), 0);
11476                 intel_de_write_fw(dev_priv, CURBASE(PIPE_A), base);
11477                 intel_de_write_fw(dev_priv, CURSIZE, size);
11478                 intel_de_write_fw(dev_priv, CURPOS(PIPE_A), pos);
11479                 intel_de_write_fw(dev_priv, CURCNTR(PIPE_A), cntl);
11480
11481                 plane->cursor.base = base;
11482                 plane->cursor.size = size;
11483                 plane->cursor.cntl = cntl;
11484         } else {
11485                 intel_de_write_fw(dev_priv, CURPOS(PIPE_A), pos);
11486         }
11487
11488         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
11489 }
11490
11491 static void i845_disable_cursor(struct intel_plane *plane,
11492                                 const struct intel_crtc_state *crtc_state)
11493 {
11494         i845_update_cursor(plane, crtc_state, NULL);
11495 }
11496
11497 static bool i845_cursor_get_hw_state(struct intel_plane *plane,
11498                                      enum pipe *pipe)
11499 {
11500         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11501         enum intel_display_power_domain power_domain;
11502         intel_wakeref_t wakeref;
11503         bool ret;
11504
11505         power_domain = POWER_DOMAIN_PIPE(PIPE_A);
11506         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
11507         if (!wakeref)
11508                 return false;
11509
11510         ret = intel_de_read(dev_priv, CURCNTR(PIPE_A)) & CURSOR_ENABLE;
11511
11512         *pipe = PIPE_A;
11513
11514         intel_display_power_put(dev_priv, power_domain, wakeref);
11515
11516         return ret;
11517 }
11518
11519 static unsigned int
11520 i9xx_cursor_max_stride(struct intel_plane *plane,
11521                        u32 pixel_format, u64 modifier,
11522                        unsigned int rotation)
11523 {
11524         return plane->base.dev->mode_config.cursor_width * 4;
11525 }
11526
11527 static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
11528 {
11529         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
11530         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11531         u32 cntl = 0;
11532
11533         if (INTEL_GEN(dev_priv) >= 11)
11534                 return cntl;
11535
11536         if (crtc_state->gamma_enable)
11537                 cntl = MCURSOR_GAMMA_ENABLE;
11538
11539         if (crtc_state->csc_enable)
11540                 cntl |= MCURSOR_PIPE_CSC_ENABLE;
11541
11542         if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
11543                 cntl |= MCURSOR_PIPE_SELECT(crtc->pipe);
11544
11545         return cntl;
11546 }
11547
11548 static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
11549                            const struct intel_plane_state *plane_state)
11550 {
11551         struct drm_i915_private *dev_priv =
11552                 to_i915(plane_state->uapi.plane->dev);
11553         u32 cntl = 0;
11554
11555         if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
11556                 cntl |= MCURSOR_TRICKLE_FEED_DISABLE;
11557
11558         switch (drm_rect_width(&plane_state->uapi.dst)) {
11559         case 64:
11560                 cntl |= MCURSOR_MODE_64_ARGB_AX;
11561                 break;
11562         case 128:
11563                 cntl |= MCURSOR_MODE_128_ARGB_AX;
11564                 break;
11565         case 256:
11566                 cntl |= MCURSOR_MODE_256_ARGB_AX;
11567                 break;
11568         default:
11569                 MISSING_CASE(drm_rect_width(&plane_state->uapi.dst));
11570                 return 0;
11571         }
11572
11573         if (plane_state->hw.rotation & DRM_MODE_ROTATE_180)
11574                 cntl |= MCURSOR_ROTATE_180;
11575
11576         return cntl;
11577 }
11578
11579 static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
11580 {
11581         struct drm_i915_private *dev_priv =
11582                 to_i915(plane_state->uapi.plane->dev);
11583         int width = drm_rect_width(&plane_state->uapi.dst);
11584         int height = drm_rect_height(&plane_state->uapi.dst);
11585
11586         if (!intel_cursor_size_ok(plane_state))
11587                 return false;
11588
11589         /* Cursor width is limited to a few power-of-two sizes */
11590         switch (width) {
11591         case 256:
11592         case 128:
11593         case 64:
11594                 break;
11595         default:
11596                 return false;
11597         }
11598
11599         /*
11600          * IVB+ have CUR_FBC_CTL which allows an arbitrary cursor
11601          * height from 8 lines up to the cursor width, when the
11602          * cursor is not rotated. Everything else requires square
11603          * cursors.
11604          */
11605         if (HAS_CUR_FBC(dev_priv) &&
11606             plane_state->hw.rotation & DRM_MODE_ROTATE_0) {
11607                 if (height < 8 || height > width)
11608                         return false;
11609         } else {
11610                 if (height != width)
11611                         return false;
11612         }
11613
11614         return true;
11615 }
11616
11617 static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
11618                              struct intel_plane_state *plane_state)
11619 {
11620         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
11621         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11622         const struct drm_framebuffer *fb = plane_state->hw.fb;
11623         enum pipe pipe = plane->pipe;
11624         int ret;
11625
11626         ret = intel_check_cursor(crtc_state, plane_state);
11627         if (ret)
11628                 return ret;
11629
11630         /* if we want to turn off the cursor ignore width and height */
11631         if (!fb)
11632                 return 0;
11633
11634         /* Check for which cursor types we support */
11635         if (!i9xx_cursor_size_ok(plane_state)) {
11636                 drm_dbg(&dev_priv->drm,
11637                         "Cursor dimension %dx%d not supported\n",
11638                         drm_rect_width(&plane_state->uapi.dst),
11639                         drm_rect_height(&plane_state->uapi.dst));
11640                 return -EINVAL;
11641         }
11642
11643         WARN_ON(plane_state->uapi.visible &&
11644                 plane_state->color_plane[0].stride != fb->pitches[0]);
11645
11646         if (fb->pitches[0] !=
11647             drm_rect_width(&plane_state->uapi.dst) * fb->format->cpp[0]) {
11648                 drm_dbg_kms(&dev_priv->drm,
11649                             "Invalid cursor stride (%u) (cursor width %d)\n",
11650                             fb->pitches[0],
11651                             drm_rect_width(&plane_state->uapi.dst));
11652                 return -EINVAL;
11653         }
11654
11655         /*
11656          * There's something wrong with the cursor on CHV pipe C.
11657          * If it straddles the left edge of the screen then
11658          * moving it away from the edge or disabling it often
11659          * results in a pipe underrun, and often that can lead to
11660          * dead pipe (constant underrun reported, and it scans
11661          * out just a solid color). To recover from that, the
11662          * display power well must be turned off and on again.
11663          * Refuse the put the cursor into that compromised position.
11664          */
11665         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C &&
11666             plane_state->uapi.visible && plane_state->uapi.dst.x1 < 0) {
11667                 drm_dbg_kms(&dev_priv->drm,
11668                             "CHV cursor C not allowed to straddle the left screen edge\n");
11669                 return -EINVAL;
11670         }
11671
11672         plane_state->ctl = i9xx_cursor_ctl(crtc_state, plane_state);
11673
11674         return 0;
11675 }
11676
11677 static void i9xx_update_cursor(struct intel_plane *plane,
11678                                const struct intel_crtc_state *crtc_state,
11679                                const struct intel_plane_state *plane_state)
11680 {
11681         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11682         enum pipe pipe = plane->pipe;
11683         u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0;
11684         unsigned long irqflags;
11685
11686         if (plane_state && plane_state->uapi.visible) {
11687                 unsigned width = drm_rect_width(&plane_state->uapi.dst);
11688                 unsigned height = drm_rect_height(&plane_state->uapi.dst);
11689
11690                 cntl = plane_state->ctl |
11691                         i9xx_cursor_ctl_crtc(crtc_state);
11692
11693                 if (width != height)
11694                         fbc_ctl = CUR_FBC_CTL_EN | (height - 1);
11695
11696                 base = intel_cursor_base(plane_state);
11697                 pos = intel_cursor_position(plane_state);
11698         }
11699
11700         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
11701
11702         /*
11703          * On some platforms writing CURCNTR first will also
11704          * cause CURPOS to be armed by the CURBASE write.
11705          * Without the CURCNTR write the CURPOS write would
11706          * arm itself. Thus we always update CURCNTR before
11707          * CURPOS.
11708          *
11709          * On other platforms CURPOS always requires the
11710          * CURBASE write to arm the update. Additonally
11711          * a write to any of the cursor register will cancel
11712          * an already armed cursor update. Thus leaving out
11713          * the CURBASE write after CURPOS could lead to a
11714          * cursor that doesn't appear to move, or even change
11715          * shape. Thus we always write CURBASE.
11716          *
11717          * The other registers are armed by by the CURBASE write
11718          * except when the plane is getting enabled at which time
11719          * the CURCNTR write arms the update.
11720          */
11721
11722         if (INTEL_GEN(dev_priv) >= 9)
11723                 skl_write_cursor_wm(plane, crtc_state);
11724
11725         if (plane->cursor.base != base ||
11726             plane->cursor.size != fbc_ctl ||
11727             plane->cursor.cntl != cntl) {
11728                 if (HAS_CUR_FBC(dev_priv))
11729                         intel_de_write_fw(dev_priv, CUR_FBC_CTL(pipe),
11730                                           fbc_ctl);
11731                 intel_de_write_fw(dev_priv, CURCNTR(pipe), cntl);
11732                 intel_de_write_fw(dev_priv, CURPOS(pipe), pos);
11733                 intel_de_write_fw(dev_priv, CURBASE(pipe), base);
11734
11735                 plane->cursor.base = base;
11736                 plane->cursor.size = fbc_ctl;
11737                 plane->cursor.cntl = cntl;
11738         } else {
11739                 intel_de_write_fw(dev_priv, CURPOS(pipe), pos);
11740                 intel_de_write_fw(dev_priv, CURBASE(pipe), base);
11741         }
11742
11743         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
11744 }
11745
11746 static void i9xx_disable_cursor(struct intel_plane *plane,
11747                                 const struct intel_crtc_state *crtc_state)
11748 {
11749         i9xx_update_cursor(plane, crtc_state, NULL);
11750 }
11751
11752 static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
11753                                      enum pipe *pipe)
11754 {
11755         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11756         enum intel_display_power_domain power_domain;
11757         intel_wakeref_t wakeref;
11758         bool ret;
11759         u32 val;
11760
11761         /*
11762          * Not 100% correct for planes that can move between pipes,
11763          * but that's only the case for gen2-3 which don't have any
11764          * display power wells.
11765          */
11766         power_domain = POWER_DOMAIN_PIPE(plane->pipe);
11767         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
11768         if (!wakeref)
11769                 return false;
11770
11771         val = intel_de_read(dev_priv, CURCNTR(plane->pipe));
11772
11773         ret = val & MCURSOR_MODE;
11774
11775         if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
11776                 *pipe = plane->pipe;
11777         else
11778                 *pipe = (val & MCURSOR_PIPE_SELECT_MASK) >>
11779                         MCURSOR_PIPE_SELECT_SHIFT;
11780
11781         intel_display_power_put(dev_priv, power_domain, wakeref);
11782
11783         return ret;
11784 }
11785
11786 /* VESA 640x480x72Hz mode to set on the pipe */
11787 static const struct drm_display_mode load_detect_mode = {
11788         DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
11789                  704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
11790 };
11791
11792 struct drm_framebuffer *
11793 intel_framebuffer_create(struct drm_i915_gem_object *obj,
11794                          struct drm_mode_fb_cmd2 *mode_cmd)
11795 {
11796         struct intel_framebuffer *intel_fb;
11797         int ret;
11798
11799         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
11800         if (!intel_fb)
11801                 return ERR_PTR(-ENOMEM);
11802
11803         ret = intel_framebuffer_init(intel_fb, obj, mode_cmd);
11804         if (ret)
11805                 goto err;
11806
11807         return &intel_fb->base;
11808
11809 err:
11810         kfree(intel_fb);
11811         return ERR_PTR(ret);
11812 }
11813
11814 static int intel_modeset_disable_planes(struct drm_atomic_state *state,
11815                                         struct drm_crtc *crtc)
11816 {
11817         struct drm_plane *plane;
11818         struct drm_plane_state *plane_state;
11819         int ret, i;
11820
11821         ret = drm_atomic_add_affected_planes(state, crtc);
11822         if (ret)
11823                 return ret;
11824
11825         for_each_new_plane_in_state(state, plane, plane_state, i) {
11826                 if (plane_state->crtc != crtc)
11827                         continue;
11828
11829                 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
11830                 if (ret)
11831                         return ret;
11832
11833                 drm_atomic_set_fb_for_plane(plane_state, NULL);
11834         }
11835
11836         return 0;
11837 }
11838
11839 int intel_get_load_detect_pipe(struct drm_connector *connector,
11840                                struct intel_load_detect_pipe *old,
11841                                struct drm_modeset_acquire_ctx *ctx)
11842 {
11843         struct intel_crtc *intel_crtc;
11844         struct intel_encoder *intel_encoder =
11845                 intel_attached_encoder(to_intel_connector(connector));
11846         struct drm_crtc *possible_crtc;
11847         struct drm_encoder *encoder = &intel_encoder->base;
11848         struct drm_crtc *crtc = NULL;
11849         struct drm_device *dev = encoder->dev;
11850         struct drm_i915_private *dev_priv = to_i915(dev);
11851         struct drm_mode_config *config = &dev->mode_config;
11852         struct drm_atomic_state *state = NULL, *restore_state = NULL;
11853         struct drm_connector_state *connector_state;
11854         struct intel_crtc_state *crtc_state;
11855         int ret, i = -1;
11856
11857         drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
11858                     connector->base.id, connector->name,
11859                     encoder->base.id, encoder->name);
11860
11861         old->restore_state = NULL;
11862
11863         WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
11864
11865         /*
11866          * Algorithm gets a little messy:
11867          *
11868          *   - if the connector already has an assigned crtc, use it (but make
11869          *     sure it's on first)
11870          *
11871          *   - try to find the first unused crtc that can drive this connector,
11872          *     and use that if we find one
11873          */
11874
11875         /* See if we already have a CRTC for this connector */
11876         if (connector->state->crtc) {
11877                 crtc = connector->state->crtc;
11878
11879                 ret = drm_modeset_lock(&crtc->mutex, ctx);
11880                 if (ret)
11881                         goto fail;
11882
11883                 /* Make sure the crtc and connector are running */
11884                 goto found;
11885         }
11886
11887         /* Find an unused one (if possible) */
11888         for_each_crtc(dev, possible_crtc) {
11889                 i++;
11890                 if (!(encoder->possible_crtcs & (1 << i)))
11891                         continue;
11892
11893                 ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
11894                 if (ret)
11895                         goto fail;
11896
11897                 if (possible_crtc->state->enable) {
11898                         drm_modeset_unlock(&possible_crtc->mutex);
11899                         continue;
11900                 }
11901
11902                 crtc = possible_crtc;
11903                 break;
11904         }
11905
11906         /*
11907          * If we didn't find an unused CRTC, don't use any.
11908          */
11909         if (!crtc) {
11910                 drm_dbg_kms(&dev_priv->drm,
11911                             "no pipe available for load-detect\n");
11912                 ret = -ENODEV;
11913                 goto fail;
11914         }
11915
11916 found:
11917         intel_crtc = to_intel_crtc(crtc);
11918
11919         state = drm_atomic_state_alloc(dev);
11920         restore_state = drm_atomic_state_alloc(dev);
11921         if (!state || !restore_state) {
11922                 ret = -ENOMEM;
11923                 goto fail;
11924         }
11925
11926         state->acquire_ctx = ctx;
11927         restore_state->acquire_ctx = ctx;
11928
11929         connector_state = drm_atomic_get_connector_state(state, connector);
11930         if (IS_ERR(connector_state)) {
11931                 ret = PTR_ERR(connector_state);
11932                 goto fail;
11933         }
11934
11935         ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
11936         if (ret)
11937                 goto fail;
11938
11939         crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
11940         if (IS_ERR(crtc_state)) {
11941                 ret = PTR_ERR(crtc_state);
11942                 goto fail;
11943         }
11944
11945         crtc_state->uapi.active = true;
11946
11947         ret = drm_atomic_set_mode_for_crtc(&crtc_state->uapi,
11948                                            &load_detect_mode);
11949         if (ret)
11950                 goto fail;
11951
11952         ret = intel_modeset_disable_planes(state, crtc);
11953         if (ret)
11954                 goto fail;
11955
11956         ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
11957         if (!ret)
11958                 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
11959         if (!ret)
11960                 ret = drm_atomic_add_affected_planes(restore_state, crtc);
11961         if (ret) {
11962                 drm_dbg_kms(&dev_priv->drm,
11963                             "Failed to create a copy of old state to restore: %i\n",
11964                             ret);
11965                 goto fail;
11966         }
11967
11968         ret = drm_atomic_commit(state);
11969         if (ret) {
11970                 drm_dbg_kms(&dev_priv->drm,
11971                             "failed to set mode on load-detect pipe\n");
11972                 goto fail;
11973         }
11974
11975         old->restore_state = restore_state;
11976         drm_atomic_state_put(state);
11977
11978         /* let the connector get through one full cycle before testing */
11979         intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
11980         return true;
11981
11982 fail:
11983         if (state) {
11984                 drm_atomic_state_put(state);
11985                 state = NULL;
11986         }
11987         if (restore_state) {
11988                 drm_atomic_state_put(restore_state);
11989                 restore_state = NULL;
11990         }
11991
11992         if (ret == -EDEADLK)
11993                 return ret;
11994
11995         return false;
11996 }
11997
11998 void intel_release_load_detect_pipe(struct drm_connector *connector,
11999                                     struct intel_load_detect_pipe *old,
12000                                     struct drm_modeset_acquire_ctx *ctx)
12001 {
12002         struct intel_encoder *intel_encoder =
12003                 intel_attached_encoder(to_intel_connector(connector));
12004         struct drm_i915_private *i915 = to_i915(intel_encoder->base.dev);
12005         struct drm_encoder *encoder = &intel_encoder->base;
12006         struct drm_atomic_state *state = old->restore_state;
12007         int ret;
12008
12009         drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
12010                     connector->base.id, connector->name,
12011                     encoder->base.id, encoder->name);
12012
12013         if (!state)
12014                 return;
12015
12016         ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
12017         if (ret)
12018                 drm_dbg_kms(&i915->drm,
12019                             "Couldn't release load detect pipe: %i\n", ret);
12020         drm_atomic_state_put(state);
12021 }
12022
12023 static int i9xx_pll_refclk(struct drm_device *dev,
12024                            const struct intel_crtc_state *pipe_config)
12025 {
12026         struct drm_i915_private *dev_priv = to_i915(dev);
12027         u32 dpll = pipe_config->dpll_hw_state.dpll;
12028
12029         if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
12030                 return dev_priv->vbt.lvds_ssc_freq;
12031         else if (HAS_PCH_SPLIT(dev_priv))
12032                 return 120000;
12033         else if (!IS_GEN(dev_priv, 2))
12034                 return 96000;
12035         else
12036                 return 48000;
12037 }
12038
12039 /* Returns the clock of the currently programmed mode of the given pipe. */
12040 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
12041                                 struct intel_crtc_state *pipe_config)
12042 {
12043         struct drm_device *dev = crtc->base.dev;
12044         struct drm_i915_private *dev_priv = to_i915(dev);
12045         enum pipe pipe = crtc->pipe;
12046         u32 dpll = pipe_config->dpll_hw_state.dpll;
12047         u32 fp;
12048         struct dpll clock;
12049         int port_clock;
12050         int refclk = i9xx_pll_refclk(dev, pipe_config);
12051
12052         if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
12053                 fp = pipe_config->dpll_hw_state.fp0;
12054         else
12055                 fp = pipe_config->dpll_hw_state.fp1;
12056
12057         clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
12058         if (IS_PINEVIEW(dev_priv)) {
12059                 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
12060                 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
12061         } else {
12062                 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
12063                 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
12064         }
12065
12066         if (!IS_GEN(dev_priv, 2)) {
12067                 if (IS_PINEVIEW(dev_priv))
12068                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
12069                                 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
12070                 else
12071                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
12072                                DPLL_FPA01_P1_POST_DIV_SHIFT);
12073
12074                 switch (dpll & DPLL_MODE_MASK) {
12075                 case DPLLB_MODE_DAC_SERIAL:
12076                         clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
12077                                 5 : 10;
12078                         break;
12079                 case DPLLB_MODE_LVDS:
12080                         clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
12081                                 7 : 14;
12082                         break;
12083                 default:
12084                         drm_dbg_kms(&dev_priv->drm,
12085                                     "Unknown DPLL mode %08x in programmed "
12086                                     "mode\n", (int)(dpll & DPLL_MODE_MASK));
12087                         return;
12088                 }
12089
12090                 if (IS_PINEVIEW(dev_priv))
12091                         port_clock = pnv_calc_dpll_params(refclk, &clock);
12092                 else
12093                         port_clock = i9xx_calc_dpll_params(refclk, &clock);
12094         } else {
12095                 u32 lvds = IS_I830(dev_priv) ? 0 : intel_de_read(dev_priv,
12096                                                                  LVDS);
12097                 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
12098
12099                 if (is_lvds) {
12100                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
12101                                        DPLL_FPA01_P1_POST_DIV_SHIFT);
12102
12103                         if (lvds & LVDS_CLKB_POWER_UP)
12104                                 clock.p2 = 7;
12105                         else
12106                                 clock.p2 = 14;
12107                 } else {
12108                         if (dpll & PLL_P1_DIVIDE_BY_TWO)
12109                                 clock.p1 = 2;
12110                         else {
12111                                 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
12112                                             DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
12113                         }
12114                         if (dpll & PLL_P2_DIVIDE_BY_4)
12115                                 clock.p2 = 4;
12116                         else
12117                                 clock.p2 = 2;
12118                 }
12119
12120                 port_clock = i9xx_calc_dpll_params(refclk, &clock);
12121         }
12122
12123         /*
12124          * This value includes pixel_multiplier. We will use
12125          * port_clock to compute adjusted_mode.crtc_clock in the
12126          * encoder's get_config() function.
12127          */
12128         pipe_config->port_clock = port_clock;
12129 }
12130
12131 int intel_dotclock_calculate(int link_freq,
12132                              const struct intel_link_m_n *m_n)
12133 {
12134         /*
12135          * The calculation for the data clock is:
12136          * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
12137          * But we want to avoid losing precison if possible, so:
12138          * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
12139          *
12140          * and the link clock is simpler:
12141          * link_clock = (m * link_clock) / n
12142          */
12143
12144         if (!m_n->link_n)
12145                 return 0;
12146
12147         return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
12148 }
12149
12150 static void ilk_pch_clock_get(struct intel_crtc *crtc,
12151                               struct intel_crtc_state *pipe_config)
12152 {
12153         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12154
12155         /* read out port_clock from the DPLL */
12156         i9xx_crtc_clock_get(crtc, pipe_config);
12157
12158         /*
12159          * In case there is an active pipe without active ports,
12160          * we may need some idea for the dotclock anyway.
12161          * Calculate one based on the FDI configuration.
12162          */
12163         pipe_config->hw.adjusted_mode.crtc_clock =
12164                 intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
12165                                          &pipe_config->fdi_m_n);
12166 }
12167
12168 static void intel_crtc_state_reset(struct intel_crtc_state *crtc_state,
12169                                    struct intel_crtc *crtc)
12170 {
12171         memset(crtc_state, 0, sizeof(*crtc_state));
12172
12173         __drm_atomic_helper_crtc_state_reset(&crtc_state->uapi, &crtc->base);
12174
12175         crtc_state->cpu_transcoder = INVALID_TRANSCODER;
12176         crtc_state->master_transcoder = INVALID_TRANSCODER;
12177         crtc_state->hsw_workaround_pipe = INVALID_PIPE;
12178         crtc_state->output_format = INTEL_OUTPUT_FORMAT_INVALID;
12179         crtc_state->scaler_state.scaler_id = -1;
12180         crtc_state->mst_master_transcoder = INVALID_TRANSCODER;
12181 }
12182
12183 static struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc)
12184 {
12185         struct intel_crtc_state *crtc_state;
12186
12187         crtc_state = kmalloc(sizeof(*crtc_state), GFP_KERNEL);
12188
12189         if (crtc_state)
12190                 intel_crtc_state_reset(crtc_state, crtc);
12191
12192         return crtc_state;
12193 }
12194
12195 /* Returns the currently programmed mode of the given encoder. */
12196 struct drm_display_mode *
12197 intel_encoder_current_mode(struct intel_encoder *encoder)
12198 {
12199         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
12200         struct intel_crtc_state *crtc_state;
12201         struct drm_display_mode *mode;
12202         struct intel_crtc *crtc;
12203         enum pipe pipe;
12204
12205         if (!encoder->get_hw_state(encoder, &pipe))
12206                 return NULL;
12207
12208         crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
12209
12210         mode = kzalloc(sizeof(*mode), GFP_KERNEL);
12211         if (!mode)
12212                 return NULL;
12213
12214         crtc_state = intel_crtc_state_alloc(crtc);
12215         if (!crtc_state) {
12216                 kfree(mode);
12217                 return NULL;
12218         }
12219
12220         if (!dev_priv->display.get_pipe_config(crtc, crtc_state)) {
12221                 kfree(crtc_state);
12222                 kfree(mode);
12223                 return NULL;
12224         }
12225
12226         encoder->get_config(encoder, crtc_state);
12227
12228         intel_mode_from_pipe_config(mode, crtc_state);
12229
12230         kfree(crtc_state);
12231
12232         return mode;
12233 }
12234
12235 static void intel_crtc_destroy(struct drm_crtc *crtc)
12236 {
12237         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12238
12239         drm_crtc_cleanup(crtc);
12240         kfree(intel_crtc);
12241 }
12242
12243 /**
12244  * intel_wm_need_update - Check whether watermarks need updating
12245  * @cur: current plane state
12246  * @new: new plane state
12247  *
12248  * Check current plane state versus the new one to determine whether
12249  * watermarks need to be recalculated.
12250  *
12251  * Returns true or false.
12252  */
12253 static bool intel_wm_need_update(const struct intel_plane_state *cur,
12254                                  struct intel_plane_state *new)
12255 {
12256         /* Update watermarks on tiling or size changes. */
12257         if (new->uapi.visible != cur->uapi.visible)
12258                 return true;
12259
12260         if (!cur->hw.fb || !new->hw.fb)
12261                 return false;
12262
12263         if (cur->hw.fb->modifier != new->hw.fb->modifier ||
12264             cur->hw.rotation != new->hw.rotation ||
12265             drm_rect_width(&new->uapi.src) != drm_rect_width(&cur->uapi.src) ||
12266             drm_rect_height(&new->uapi.src) != drm_rect_height(&cur->uapi.src) ||
12267             drm_rect_width(&new->uapi.dst) != drm_rect_width(&cur->uapi.dst) ||
12268             drm_rect_height(&new->uapi.dst) != drm_rect_height(&cur->uapi.dst))
12269                 return true;
12270
12271         return false;
12272 }
12273
12274 static bool needs_scaling(const struct intel_plane_state *state)
12275 {
12276         int src_w = drm_rect_width(&state->uapi.src) >> 16;
12277         int src_h = drm_rect_height(&state->uapi.src) >> 16;
12278         int dst_w = drm_rect_width(&state->uapi.dst);
12279         int dst_h = drm_rect_height(&state->uapi.dst);
12280
12281         return (src_w != dst_w || src_h != dst_h);
12282 }
12283
12284 int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
12285                                     struct intel_crtc_state *crtc_state,
12286                                     const struct intel_plane_state *old_plane_state,
12287                                     struct intel_plane_state *plane_state)
12288 {
12289         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
12290         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
12291         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12292         bool mode_changed = needs_modeset(crtc_state);
12293         bool was_crtc_enabled = old_crtc_state->hw.active;
12294         bool is_crtc_enabled = crtc_state->hw.active;
12295         bool turn_off, turn_on, visible, was_visible;
12296         int ret;
12297
12298         if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
12299                 ret = skl_update_scaler_plane(crtc_state, plane_state);
12300                 if (ret)
12301                         return ret;
12302         }
12303
12304         was_visible = old_plane_state->uapi.visible;
12305         visible = plane_state->uapi.visible;
12306
12307         if (!was_crtc_enabled && WARN_ON(was_visible))
12308                 was_visible = false;
12309
12310         /*
12311          * Visibility is calculated as if the crtc was on, but
12312          * after scaler setup everything depends on it being off
12313          * when the crtc isn't active.
12314          *
12315          * FIXME this is wrong for watermarks. Watermarks should also
12316          * be computed as if the pipe would be active. Perhaps move
12317          * per-plane wm computation to the .check_plane() hook, and
12318          * only combine the results from all planes in the current place?
12319          */
12320         if (!is_crtc_enabled) {
12321                 plane_state->uapi.visible = visible = false;
12322                 crtc_state->active_planes &= ~BIT(plane->id);
12323                 crtc_state->data_rate[plane->id] = 0;
12324                 crtc_state->min_cdclk[plane->id] = 0;
12325         }
12326
12327         if (!was_visible && !visible)
12328                 return 0;
12329
12330         turn_off = was_visible && (!visible || mode_changed);
12331         turn_on = visible && (!was_visible || mode_changed);
12332
12333         drm_dbg_atomic(&dev_priv->drm,
12334                        "[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
12335                        crtc->base.base.id, crtc->base.name,
12336                        plane->base.base.id, plane->base.name,
12337                        was_visible, visible,
12338                        turn_off, turn_on, mode_changed);
12339
12340         if (turn_on) {
12341                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
12342                         crtc_state->update_wm_pre = true;
12343
12344                 /* must disable cxsr around plane enable/disable */
12345                 if (plane->id != PLANE_CURSOR)
12346                         crtc_state->disable_cxsr = true;
12347         } else if (turn_off) {
12348                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
12349                         crtc_state->update_wm_post = true;
12350
12351                 /* must disable cxsr around plane enable/disable */
12352                 if (plane->id != PLANE_CURSOR)
12353                         crtc_state->disable_cxsr = true;
12354         } else if (intel_wm_need_update(old_plane_state, plane_state)) {
12355                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
12356                         /* FIXME bollocks */
12357                         crtc_state->update_wm_pre = true;
12358                         crtc_state->update_wm_post = true;
12359                 }
12360         }
12361
12362         if (visible || was_visible)
12363                 crtc_state->fb_bits |= plane->frontbuffer_bit;
12364
12365         /*
12366          * ILK/SNB DVSACNTR/Sprite Enable
12367          * IVB SPR_CTL/Sprite Enable
12368          * "When in Self Refresh Big FIFO mode, a write to enable the
12369          *  plane will be internally buffered and delayed while Big FIFO
12370          *  mode is exiting."
12371          *
12372          * Which means that enabling the sprite can take an extra frame
12373          * when we start in big FIFO mode (LP1+). Thus we need to drop
12374          * down to LP0 and wait for vblank in order to make sure the
12375          * sprite gets enabled on the next vblank after the register write.
12376          * Doing otherwise would risk enabling the sprite one frame after
12377          * we've already signalled flip completion. We can resume LP1+
12378          * once the sprite has been enabled.
12379          *
12380          *
12381          * WaCxSRDisabledForSpriteScaling:ivb
12382          * IVB SPR_SCALE/Scaling Enable
12383          * "Low Power watermarks must be disabled for at least one
12384          *  frame before enabling sprite scaling, and kept disabled
12385          *  until sprite scaling is disabled."
12386          *
12387          * ILK/SNB DVSASCALE/Scaling Enable
12388          * "When in Self Refresh Big FIFO mode, scaling enable will be
12389          *  masked off while Big FIFO mode is exiting."
12390          *
12391          * Despite the w/a only being listed for IVB we assume that
12392          * the ILK/SNB note has similar ramifications, hence we apply
12393          * the w/a on all three platforms.
12394          *
12395          * With experimental results seems this is needed also for primary
12396          * plane, not only sprite plane.
12397          */
12398         if (plane->id != PLANE_CURSOR &&
12399             (IS_GEN_RANGE(dev_priv, 5, 6) ||
12400              IS_IVYBRIDGE(dev_priv)) &&
12401             (turn_on || (!needs_scaling(old_plane_state) &&
12402                          needs_scaling(plane_state))))
12403                 crtc_state->disable_lp_wm = true;
12404
12405         return 0;
12406 }
12407
12408 static bool encoders_cloneable(const struct intel_encoder *a,
12409                                const struct intel_encoder *b)
12410 {
12411         /* masks could be asymmetric, so check both ways */
12412         return a == b || (a->cloneable & (1 << b->type) &&
12413                           b->cloneable & (1 << a->type));
12414 }
12415
12416 static bool check_single_encoder_cloning(struct drm_atomic_state *state,
12417                                          struct intel_crtc *crtc,
12418                                          struct intel_encoder *encoder)
12419 {
12420         struct intel_encoder *source_encoder;
12421         struct drm_connector *connector;
12422         struct drm_connector_state *connector_state;
12423         int i;
12424
12425         for_each_new_connector_in_state(state, connector, connector_state, i) {
12426                 if (connector_state->crtc != &crtc->base)
12427                         continue;
12428
12429                 source_encoder =
12430                         to_intel_encoder(connector_state->best_encoder);
12431                 if (!encoders_cloneable(encoder, source_encoder))
12432                         return false;
12433         }
12434
12435         return true;
12436 }
12437
12438 static int icl_add_linked_planes(struct intel_atomic_state *state)
12439 {
12440         struct intel_plane *plane, *linked;
12441         struct intel_plane_state *plane_state, *linked_plane_state;
12442         int i;
12443
12444         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
12445                 linked = plane_state->planar_linked_plane;
12446
12447                 if (!linked)
12448                         continue;
12449
12450                 linked_plane_state = intel_atomic_get_plane_state(state, linked);
12451                 if (IS_ERR(linked_plane_state))
12452                         return PTR_ERR(linked_plane_state);
12453
12454                 WARN_ON(linked_plane_state->planar_linked_plane != plane);
12455                 WARN_ON(linked_plane_state->planar_slave == plane_state->planar_slave);
12456         }
12457
12458         return 0;
12459 }
12460
12461 static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
12462 {
12463         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
12464         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12465         struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
12466         struct intel_plane *plane, *linked;
12467         struct intel_plane_state *plane_state;
12468         int i;
12469
12470         if (INTEL_GEN(dev_priv) < 11)
12471                 return 0;
12472
12473         /*
12474          * Destroy all old plane links and make the slave plane invisible
12475          * in the crtc_state->active_planes mask.
12476          */
12477         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
12478                 if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane)
12479                         continue;
12480
12481                 plane_state->planar_linked_plane = NULL;
12482                 if (plane_state->planar_slave && !plane_state->uapi.visible) {
12483                         crtc_state->active_planes &= ~BIT(plane->id);
12484                         crtc_state->update_planes |= BIT(plane->id);
12485                 }
12486
12487                 plane_state->planar_slave = false;
12488         }
12489
12490         if (!crtc_state->nv12_planes)
12491                 return 0;
12492
12493         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
12494                 struct intel_plane_state *linked_state = NULL;
12495
12496                 if (plane->pipe != crtc->pipe ||
12497                     !(crtc_state->nv12_planes & BIT(plane->id)))
12498                         continue;
12499
12500                 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
12501                         if (!icl_is_nv12_y_plane(linked->id))
12502                                 continue;
12503
12504                         if (crtc_state->active_planes & BIT(linked->id))
12505                                 continue;
12506
12507                         linked_state = intel_atomic_get_plane_state(state, linked);
12508                         if (IS_ERR(linked_state))
12509                                 return PTR_ERR(linked_state);
12510
12511                         break;
12512                 }
12513
12514                 if (!linked_state) {
12515                         drm_dbg_kms(&dev_priv->drm,
12516                                     "Need %d free Y planes for planar YUV\n",
12517                                     hweight8(crtc_state->nv12_planes));
12518
12519                         return -EINVAL;
12520                 }
12521
12522                 plane_state->planar_linked_plane = linked;
12523
12524                 linked_state->planar_slave = true;
12525                 linked_state->planar_linked_plane = plane;
12526                 crtc_state->active_planes |= BIT(linked->id);
12527                 crtc_state->update_planes |= BIT(linked->id);
12528                 drm_dbg_kms(&dev_priv->drm, "Using %s as Y plane for %s\n",
12529                             linked->base.name, plane->base.name);
12530
12531                 /* Copy parameters to slave plane */
12532                 linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE;
12533                 linked_state->color_ctl = plane_state->color_ctl;
12534                 linked_state->view = plane_state->view;
12535                 memcpy(linked_state->color_plane, plane_state->color_plane,
12536                        sizeof(linked_state->color_plane));
12537
12538                 intel_plane_copy_uapi_to_hw_state(linked_state, plane_state);
12539                 linked_state->uapi.src = plane_state->uapi.src;
12540                 linked_state->uapi.dst = plane_state->uapi.dst;
12541
12542                 if (icl_is_hdr_plane(dev_priv, plane->id)) {
12543                         if (linked->id == PLANE_SPRITE5)
12544                                 plane_state->cus_ctl |= PLANE_CUS_PLANE_7;
12545                         else if (linked->id == PLANE_SPRITE4)
12546                                 plane_state->cus_ctl |= PLANE_CUS_PLANE_6;
12547                         else
12548                                 MISSING_CASE(linked->id);
12549                 }
12550         }
12551
12552         return 0;
12553 }
12554
12555 static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
12556 {
12557         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
12558         struct intel_atomic_state *state =
12559                 to_intel_atomic_state(new_crtc_state->uapi.state);
12560         const struct intel_crtc_state *old_crtc_state =
12561                 intel_atomic_get_old_crtc_state(state, crtc);
12562
12563         return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes;
12564 }
12565
12566 static bool
12567 intel_atomic_is_master_connector(struct intel_crtc_state *crtc_state)
12568 {
12569         struct drm_crtc *crtc = crtc_state->uapi.crtc;
12570         struct drm_atomic_state *state = crtc_state->uapi.state;
12571         struct drm_connector *connector;
12572         struct drm_connector_state *connector_state;
12573         int i;
12574
12575         for_each_new_connector_in_state(state, connector, connector_state, i) {
12576                 if (connector_state->crtc != crtc)
12577                         continue;
12578                 if (connector->has_tile &&
12579                     connector->tile_h_loc == connector->num_h_tile - 1 &&
12580                     connector->tile_v_loc == connector->num_v_tile - 1)
12581                         return true;
12582         }
12583
12584         return false;
12585 }
12586
12587 static void reset_port_sync_mode_state(struct intel_crtc_state *crtc_state)
12588 {
12589         crtc_state->master_transcoder = INVALID_TRANSCODER;
12590         crtc_state->sync_mode_slaves_mask = 0;
12591 }
12592
12593 static int icl_compute_port_sync_crtc_state(struct drm_connector *connector,
12594                                             struct intel_crtc_state *crtc_state,
12595                                             int num_tiled_conns)
12596 {
12597         struct drm_crtc *crtc = crtc_state->uapi.crtc;
12598         struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
12599         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
12600         struct drm_connector *master_connector;
12601         struct drm_connector_list_iter conn_iter;
12602         struct drm_crtc *master_crtc = NULL;
12603         struct drm_crtc_state *master_crtc_state;
12604         struct intel_crtc_state *master_pipe_config;
12605
12606         if (INTEL_GEN(dev_priv) < 11)
12607                 return 0;
12608
12609         if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP))
12610                 return 0;
12611
12612         /*
12613          * In case of tiled displays there could be one or more slaves but there is
12614          * only one master. Lets make the CRTC used by the connector corresponding
12615          * to the last horizonal and last vertical tile a master/genlock CRTC.
12616          * All the other CRTCs corresponding to other tiles of the same Tile group
12617          * are the slave CRTCs and hold a pointer to their genlock CRTC.
12618          * If all tiles not present do not make master slave assignments.
12619          */
12620         if (!connector->has_tile ||
12621             crtc_state->hw.mode.hdisplay != connector->tile_h_size ||
12622             crtc_state->hw.mode.vdisplay != connector->tile_v_size ||
12623             num_tiled_conns < connector->num_h_tile * connector->num_v_tile) {
12624                 reset_port_sync_mode_state(crtc_state);
12625                 return 0;
12626         }
12627         /* Last Horizontal and last vertical tile connector is a master
12628          * Master's crtc state is already populated in slave for port sync
12629          */
12630         if (connector->tile_h_loc == connector->num_h_tile - 1 &&
12631             connector->tile_v_loc == connector->num_v_tile - 1)
12632                 return 0;
12633
12634         /* Loop through all connectors and configure the Slave crtc_state
12635          * to point to the correct master.
12636          */
12637         drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
12638         drm_for_each_connector_iter(master_connector, &conn_iter) {
12639                 struct drm_connector_state *master_conn_state = NULL;
12640
12641                 if (!(master_connector->has_tile &&
12642                       master_connector->tile_group->id == connector->tile_group->id))
12643                         continue;
12644                 if (master_connector->tile_h_loc != master_connector->num_h_tile - 1 ||
12645                     master_connector->tile_v_loc != master_connector->num_v_tile - 1)
12646                         continue;
12647
12648                 master_conn_state = drm_atomic_get_connector_state(&state->base,
12649                                                                    master_connector);
12650                 if (IS_ERR(master_conn_state)) {
12651                         drm_connector_list_iter_end(&conn_iter);
12652                         return PTR_ERR(master_conn_state);
12653                 }
12654                 if (master_conn_state->crtc) {
12655                         master_crtc = master_conn_state->crtc;
12656                         break;
12657                 }
12658         }
12659         drm_connector_list_iter_end(&conn_iter);
12660
12661         if (!master_crtc) {
12662                 drm_dbg_kms(&dev_priv->drm,
12663                             "Could not find Master CRTC for Slave CRTC %d\n",
12664                             crtc->base.id);
12665                 return -EINVAL;
12666         }
12667
12668         master_crtc_state = drm_atomic_get_crtc_state(&state->base,
12669                                                       master_crtc);
12670         if (IS_ERR(master_crtc_state))
12671                 return PTR_ERR(master_crtc_state);
12672
12673         master_pipe_config = to_intel_crtc_state(master_crtc_state);
12674         crtc_state->master_transcoder = master_pipe_config->cpu_transcoder;
12675         master_pipe_config->sync_mode_slaves_mask |=
12676                 BIT(crtc_state->cpu_transcoder);
12677         drm_dbg_kms(&dev_priv->drm,
12678                     "Master Transcoder = %s added for Slave CRTC = %d, slave transcoder bitmask = %d\n",
12679                     transcoder_name(crtc_state->master_transcoder),
12680                     crtc->base.id,
12681                     master_pipe_config->sync_mode_slaves_mask);
12682
12683         return 0;
12684 }
12685
12686 static u16 hsw_linetime_wm(const struct intel_crtc_state *crtc_state)
12687 {
12688         const struct drm_display_mode *adjusted_mode =
12689                 &crtc_state->hw.adjusted_mode;
12690
12691         if (!crtc_state->hw.enable)
12692                 return 0;
12693
12694         return DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
12695                                  adjusted_mode->crtc_clock);
12696 }
12697
12698 static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state,
12699                                const struct intel_cdclk_state *cdclk_state)
12700 {
12701         const struct drm_display_mode *adjusted_mode =
12702                 &crtc_state->hw.adjusted_mode;
12703
12704         if (!crtc_state->hw.enable)
12705                 return 0;
12706
12707         return DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
12708                                  cdclk_state->logical.cdclk);
12709 }
12710
12711 static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state)
12712 {
12713         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
12714         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12715         const struct drm_display_mode *adjusted_mode =
12716                 &crtc_state->hw.adjusted_mode;
12717         u16 linetime_wm;
12718
12719         if (!crtc_state->hw.enable)
12720                 return 0;
12721
12722         linetime_wm = DIV_ROUND_UP(adjusted_mode->crtc_htotal * 1000 * 8,
12723                                    crtc_state->pixel_rate);
12724
12725         /* Display WA #1135: BXT:ALL GLK:ALL */
12726         if (IS_GEN9_LP(dev_priv) && dev_priv->ipc_enabled)
12727                 linetime_wm /= 2;
12728
12729         return linetime_wm;
12730 }
12731
12732 static int hsw_compute_linetime_wm(struct intel_atomic_state *state,
12733                                    struct intel_crtc *crtc)
12734 {
12735         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12736         struct intel_crtc_state *crtc_state =
12737                 intel_atomic_get_new_crtc_state(state, crtc);
12738         const struct intel_cdclk_state *cdclk_state;
12739
12740         if (INTEL_GEN(dev_priv) >= 9)
12741                 crtc_state->linetime = skl_linetime_wm(crtc_state);
12742         else
12743                 crtc_state->linetime = hsw_linetime_wm(crtc_state);
12744
12745         if (!hsw_crtc_supports_ips(crtc))
12746                 return 0;
12747
12748         cdclk_state = intel_atomic_get_cdclk_state(state);
12749         if (IS_ERR(cdclk_state))
12750                 return PTR_ERR(cdclk_state);
12751
12752         crtc_state->ips_linetime = hsw_ips_linetime_wm(crtc_state,
12753                                                        cdclk_state);
12754
12755         return 0;
12756 }
12757
12758 static int intel_crtc_atomic_check(struct intel_atomic_state *state,
12759                                    struct intel_crtc *crtc)
12760 {
12761         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12762         struct intel_crtc_state *crtc_state =
12763                 intel_atomic_get_new_crtc_state(state, crtc);
12764         bool mode_changed = needs_modeset(crtc_state);
12765         int ret;
12766
12767         if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv) &&
12768             mode_changed && !crtc_state->hw.active)
12769                 crtc_state->update_wm_post = true;
12770
12771         if (mode_changed && crtc_state->hw.enable &&
12772             dev_priv->display.crtc_compute_clock &&
12773             !WARN_ON(crtc_state->shared_dpll)) {
12774                 ret = dev_priv->display.crtc_compute_clock(crtc, crtc_state);
12775                 if (ret)
12776                         return ret;
12777         }
12778
12779         /*
12780          * May need to update pipe gamma enable bits
12781          * when C8 planes are getting enabled/disabled.
12782          */
12783         if (c8_planes_changed(crtc_state))
12784                 crtc_state->uapi.color_mgmt_changed = true;
12785
12786         if (mode_changed || crtc_state->update_pipe ||
12787             crtc_state->uapi.color_mgmt_changed) {
12788                 ret = intel_color_check(crtc_state);
12789                 if (ret)
12790                         return ret;
12791         }
12792
12793         if (dev_priv->display.compute_pipe_wm) {
12794                 ret = dev_priv->display.compute_pipe_wm(crtc_state);
12795                 if (ret) {
12796                         drm_dbg_kms(&dev_priv->drm,
12797                                     "Target pipe watermarks are invalid\n");
12798                         return ret;
12799                 }
12800         }
12801
12802         if (dev_priv->display.compute_intermediate_wm) {
12803                 if (WARN_ON(!dev_priv->display.compute_pipe_wm))
12804                         return 0;
12805
12806                 /*
12807                  * Calculate 'intermediate' watermarks that satisfy both the
12808                  * old state and the new state.  We can program these
12809                  * immediately.
12810                  */
12811                 ret = dev_priv->display.compute_intermediate_wm(crtc_state);
12812                 if (ret) {
12813                         drm_dbg_kms(&dev_priv->drm,
12814                                     "No valid intermediate pipe watermarks are possible\n");
12815                         return ret;
12816                 }
12817         }
12818
12819         if (INTEL_GEN(dev_priv) >= 9) {
12820                 if (mode_changed || crtc_state->update_pipe)
12821                         ret = skl_update_scaler_crtc(crtc_state);
12822                 if (!ret)
12823                         ret = intel_atomic_setup_scalers(dev_priv, crtc,
12824                                                          crtc_state);
12825                 if (ret)
12826                         return ret;
12827         }
12828
12829         if (HAS_IPS(dev_priv)) {
12830                 ret = hsw_compute_ips_config(crtc_state);
12831                 if (ret)
12832                         return ret;
12833         }
12834
12835         if (INTEL_GEN(dev_priv) >= 9 ||
12836             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
12837                 ret = hsw_compute_linetime_wm(state, crtc);
12838                 if (ret)
12839                         return ret;
12840
12841         }
12842
12843         return 0;
12844 }
12845
12846 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
12847 {
12848         struct intel_connector *connector;
12849         struct drm_connector_list_iter conn_iter;
12850
12851         drm_connector_list_iter_begin(dev, &conn_iter);
12852         for_each_intel_connector_iter(connector, &conn_iter) {
12853                 if (connector->base.state->crtc)
12854                         drm_connector_put(&connector->base);
12855
12856                 if (connector->base.encoder) {
12857                         connector->base.state->best_encoder =
12858                                 connector->base.encoder;
12859                         connector->base.state->crtc =
12860                                 connector->base.encoder->crtc;
12861
12862                         drm_connector_get(&connector->base);
12863                 } else {
12864                         connector->base.state->best_encoder = NULL;
12865                         connector->base.state->crtc = NULL;
12866                 }
12867         }
12868         drm_connector_list_iter_end(&conn_iter);
12869 }
12870
12871 static int
12872 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
12873                       struct intel_crtc_state *pipe_config)
12874 {
12875         struct drm_connector *connector = conn_state->connector;
12876         struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
12877         const struct drm_display_info *info = &connector->display_info;
12878         int bpp;
12879
12880         switch (conn_state->max_bpc) {
12881         case 6 ... 7:
12882                 bpp = 6 * 3;
12883                 break;
12884         case 8 ... 9:
12885                 bpp = 8 * 3;
12886                 break;
12887         case 10 ... 11:
12888                 bpp = 10 * 3;
12889                 break;
12890         case 12:
12891                 bpp = 12 * 3;
12892                 break;
12893         default:
12894                 return -EINVAL;
12895         }
12896
12897         if (bpp < pipe_config->pipe_bpp) {
12898                 drm_dbg_kms(&i915->drm,
12899                             "[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
12900                             "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
12901                             connector->base.id, connector->name,
12902                             bpp, 3 * info->bpc,
12903                             3 * conn_state->max_requested_bpc,
12904                             pipe_config->pipe_bpp);
12905
12906                 pipe_config->pipe_bpp = bpp;
12907         }
12908
12909         return 0;
12910 }
12911
12912 static int
12913 compute_baseline_pipe_bpp(struct intel_crtc *crtc,
12914                           struct intel_crtc_state *pipe_config)
12915 {
12916         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12917         struct drm_atomic_state *state = pipe_config->uapi.state;
12918         struct drm_connector *connector;
12919         struct drm_connector_state *connector_state;
12920         int bpp, i;
12921
12922         if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
12923             IS_CHERRYVIEW(dev_priv)))
12924                 bpp = 10*3;
12925         else if (INTEL_GEN(dev_priv) >= 5)
12926                 bpp = 12*3;
12927         else
12928                 bpp = 8*3;
12929
12930         pipe_config->pipe_bpp = bpp;
12931
12932         /* Clamp display bpp to connector max bpp */
12933         for_each_new_connector_in_state(state, connector, connector_state, i) {
12934                 int ret;
12935
12936                 if (connector_state->crtc != &crtc->base)
12937                         continue;
12938
12939                 ret = compute_sink_pipe_bpp(connector_state, pipe_config);
12940                 if (ret)
12941                         return ret;
12942         }
12943
12944         return 0;
12945 }
12946
12947 static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
12948 {
12949         DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
12950                       "type: 0x%x flags: 0x%x\n",
12951                       mode->crtc_clock,
12952                       mode->crtc_hdisplay, mode->crtc_hsync_start,
12953                       mode->crtc_hsync_end, mode->crtc_htotal,
12954                       mode->crtc_vdisplay, mode->crtc_vsync_start,
12955                       mode->crtc_vsync_end, mode->crtc_vtotal,
12956                       mode->type, mode->flags);
12957 }
12958
12959 static inline void
12960 intel_dump_m_n_config(const struct intel_crtc_state *pipe_config,
12961                       const char *id, unsigned int lane_count,
12962                       const struct intel_link_m_n *m_n)
12963 {
12964         struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
12965
12966         drm_dbg_kms(&i915->drm,
12967                     "%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
12968                     id, lane_count,
12969                     m_n->gmch_m, m_n->gmch_n,
12970                     m_n->link_m, m_n->link_n, m_n->tu);
12971 }
12972
12973 static void
12974 intel_dump_infoframe(struct drm_i915_private *dev_priv,
12975                      const union hdmi_infoframe *frame)
12976 {
12977         if (!drm_debug_enabled(DRM_UT_KMS))
12978                 return;
12979
12980         hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame);
12981 }
12982
12983 #define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
12984
12985 static const char * const output_type_str[] = {
12986         OUTPUT_TYPE(UNUSED),
12987         OUTPUT_TYPE(ANALOG),
12988         OUTPUT_TYPE(DVO),
12989         OUTPUT_TYPE(SDVO),
12990         OUTPUT_TYPE(LVDS),
12991         OUTPUT_TYPE(TVOUT),
12992         OUTPUT_TYPE(HDMI),
12993         OUTPUT_TYPE(DP),
12994         OUTPUT_TYPE(EDP),
12995         OUTPUT_TYPE(DSI),
12996         OUTPUT_TYPE(DDI),
12997         OUTPUT_TYPE(DP_MST),
12998 };
12999
13000 #undef OUTPUT_TYPE
13001
13002 static void snprintf_output_types(char *buf, size_t len,
13003                                   unsigned int output_types)
13004 {
13005         char *str = buf;
13006         int i;
13007
13008         str[0] = '\0';
13009
13010         for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
13011                 int r;
13012
13013                 if ((output_types & BIT(i)) == 0)
13014                         continue;
13015
13016                 r = snprintf(str, len, "%s%s",
13017                              str != buf ? "," : "", output_type_str[i]);
13018                 if (r >= len)
13019                         break;
13020                 str += r;
13021                 len -= r;
13022
13023                 output_types &= ~BIT(i);
13024         }
13025
13026         WARN_ON_ONCE(output_types != 0);
13027 }
13028
13029 static const char * const output_format_str[] = {
13030         [INTEL_OUTPUT_FORMAT_INVALID] = "Invalid",
13031         [INTEL_OUTPUT_FORMAT_RGB] = "RGB",
13032         [INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
13033         [INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
13034 };
13035
13036 static const char *output_formats(enum intel_output_format format)
13037 {
13038         if (format >= ARRAY_SIZE(output_format_str))
13039                 format = INTEL_OUTPUT_FORMAT_INVALID;
13040         return output_format_str[format];
13041 }
13042
13043 static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
13044 {
13045         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
13046         struct drm_i915_private *i915 = to_i915(plane->base.dev);
13047         const struct drm_framebuffer *fb = plane_state->hw.fb;
13048         struct drm_format_name_buf format_name;
13049
13050         if (!fb) {
13051                 drm_dbg_kms(&i915->drm,
13052                             "[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
13053                             plane->base.base.id, plane->base.name,
13054                             yesno(plane_state->uapi.visible));
13055                 return;
13056         }
13057
13058         drm_dbg_kms(&i915->drm,
13059                     "[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %s, visible: %s\n",
13060                     plane->base.base.id, plane->base.name,
13061                     fb->base.id, fb->width, fb->height,
13062                     drm_get_format_name(fb->format->format, &format_name),
13063                     yesno(plane_state->uapi.visible));
13064         drm_dbg_kms(&i915->drm, "\trotation: 0x%x, scaler: %d\n",
13065                     plane_state->hw.rotation, plane_state->scaler_id);
13066         if (plane_state->uapi.visible)
13067                 drm_dbg_kms(&i915->drm,
13068                             "\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
13069                             DRM_RECT_FP_ARG(&plane_state->uapi.src),
13070                             DRM_RECT_ARG(&plane_state->uapi.dst));
13071 }
13072
13073 static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
13074                                    struct intel_atomic_state *state,
13075                                    const char *context)
13076 {
13077         struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
13078         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13079         const struct intel_plane_state *plane_state;
13080         struct intel_plane *plane;
13081         char buf[64];
13082         int i;
13083
13084         drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] enable: %s %s\n",
13085                     crtc->base.base.id, crtc->base.name,
13086                     yesno(pipe_config->hw.enable), context);
13087
13088         if (!pipe_config->hw.enable)
13089                 goto dump_planes;
13090
13091         snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
13092         drm_dbg_kms(&dev_priv->drm,
13093                     "active: %s, output_types: %s (0x%x), output format: %s\n",
13094                     yesno(pipe_config->hw.active),
13095                     buf, pipe_config->output_types,
13096                     output_formats(pipe_config->output_format));
13097
13098         drm_dbg_kms(&dev_priv->drm,
13099                     "cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
13100                     transcoder_name(pipe_config->cpu_transcoder),
13101                     pipe_config->pipe_bpp, pipe_config->dither);
13102
13103         if (pipe_config->has_pch_encoder)
13104                 intel_dump_m_n_config(pipe_config, "fdi",
13105                                       pipe_config->fdi_lanes,
13106                                       &pipe_config->fdi_m_n);
13107
13108         if (intel_crtc_has_dp_encoder(pipe_config)) {
13109                 intel_dump_m_n_config(pipe_config, "dp m_n",
13110                                 pipe_config->lane_count, &pipe_config->dp_m_n);
13111                 if (pipe_config->has_drrs)
13112                         intel_dump_m_n_config(pipe_config, "dp m2_n2",
13113                                               pipe_config->lane_count,
13114                                               &pipe_config->dp_m2_n2);
13115         }
13116
13117         drm_dbg_kms(&dev_priv->drm,
13118                     "audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
13119                     pipe_config->has_audio, pipe_config->has_infoframe,
13120                     pipe_config->infoframes.enable);
13121
13122         if (pipe_config->infoframes.enable &
13123             intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL))
13124                 drm_dbg_kms(&dev_priv->drm, "GCP: 0x%x\n",
13125                             pipe_config->infoframes.gcp);
13126         if (pipe_config->infoframes.enable &
13127             intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
13128                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi);
13129         if (pipe_config->infoframes.enable &
13130             intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD))
13131                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.spd);
13132         if (pipe_config->infoframes.enable &
13133             intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
13134                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi);
13135
13136         drm_dbg_kms(&dev_priv->drm, "requested mode:\n");
13137         drm_mode_debug_printmodeline(&pipe_config->hw.mode);
13138         drm_dbg_kms(&dev_priv->drm, "adjusted mode:\n");
13139         drm_mode_debug_printmodeline(&pipe_config->hw.adjusted_mode);
13140         intel_dump_crtc_timings(&pipe_config->hw.adjusted_mode);
13141         drm_dbg_kms(&dev_priv->drm,
13142                     "port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
13143                     pipe_config->port_clock,
13144                     pipe_config->pipe_src_w, pipe_config->pipe_src_h,
13145                     pipe_config->pixel_rate);
13146
13147         drm_dbg_kms(&dev_priv->drm, "linetime: %d, ips linetime: %d\n",
13148                     pipe_config->linetime, pipe_config->ips_linetime);
13149
13150         if (INTEL_GEN(dev_priv) >= 9)
13151                 drm_dbg_kms(&dev_priv->drm,
13152                             "num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
13153                             crtc->num_scalers,
13154                             pipe_config->scaler_state.scaler_users,
13155                             pipe_config->scaler_state.scaler_id);
13156
13157         if (HAS_GMCH(dev_priv))
13158                 drm_dbg_kms(&dev_priv->drm,
13159                             "gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
13160                             pipe_config->gmch_pfit.control,
13161                             pipe_config->gmch_pfit.pgm_ratios,
13162                             pipe_config->gmch_pfit.lvds_border_bits);
13163         else
13164                 drm_dbg_kms(&dev_priv->drm,
13165                             "pch pfit: pos: 0x%08x, size: 0x%08x, %s, force thru: %s\n",
13166                             pipe_config->pch_pfit.pos,
13167                             pipe_config->pch_pfit.size,
13168                             enableddisabled(pipe_config->pch_pfit.enabled),
13169                             yesno(pipe_config->pch_pfit.force_thru));
13170
13171         drm_dbg_kms(&dev_priv->drm, "ips: %i, double wide: %i\n",
13172                     pipe_config->ips_enabled, pipe_config->double_wide);
13173
13174         intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
13175
13176         if (IS_CHERRYVIEW(dev_priv))
13177                 drm_dbg_kms(&dev_priv->drm,
13178                             "cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
13179                             pipe_config->cgm_mode, pipe_config->gamma_mode,
13180                             pipe_config->gamma_enable, pipe_config->csc_enable);
13181         else
13182                 drm_dbg_kms(&dev_priv->drm,
13183                             "csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
13184                             pipe_config->csc_mode, pipe_config->gamma_mode,
13185                             pipe_config->gamma_enable, pipe_config->csc_enable);
13186
13187         drm_dbg_kms(&dev_priv->drm, "MST master transcoder: %s\n",
13188                     transcoder_name(pipe_config->mst_master_transcoder));
13189
13190 dump_planes:
13191         if (!state)
13192                 return;
13193
13194         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
13195                 if (plane->pipe == crtc->pipe)
13196                         intel_dump_plane_state(plane_state);
13197         }
13198 }
13199
13200 static bool check_digital_port_conflicts(struct intel_atomic_state *state)
13201 {
13202         struct drm_device *dev = state->base.dev;
13203         struct drm_connector *connector;
13204         struct drm_connector_list_iter conn_iter;
13205         unsigned int used_ports = 0;
13206         unsigned int used_mst_ports = 0;
13207         bool ret = true;
13208
13209         /*
13210          * We're going to peek into connector->state,
13211          * hence connection_mutex must be held.
13212          */
13213         drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex);
13214
13215         /*
13216          * Walk the connector list instead of the encoder
13217          * list to detect the problem on ddi platforms
13218          * where there's just one encoder per digital port.
13219          */
13220         drm_connector_list_iter_begin(dev, &conn_iter);
13221         drm_for_each_connector_iter(connector, &conn_iter) {
13222                 struct drm_connector_state *connector_state;
13223                 struct intel_encoder *encoder;
13224
13225                 connector_state =
13226                         drm_atomic_get_new_connector_state(&state->base,
13227                                                            connector);
13228                 if (!connector_state)
13229                         connector_state = connector->state;
13230
13231                 if (!connector_state->best_encoder)
13232                         continue;
13233
13234                 encoder = to_intel_encoder(connector_state->best_encoder);
13235
13236                 WARN_ON(!connector_state->crtc);
13237
13238                 switch (encoder->type) {
13239                         unsigned int port_mask;
13240                 case INTEL_OUTPUT_DDI:
13241                         if (WARN_ON(!HAS_DDI(to_i915(dev))))
13242                                 break;
13243                         /* else, fall through */
13244                 case INTEL_OUTPUT_DP:
13245                 case INTEL_OUTPUT_HDMI:
13246                 case INTEL_OUTPUT_EDP:
13247                         port_mask = 1 << encoder->port;
13248
13249                         /* the same port mustn't appear more than once */
13250                         if (used_ports & port_mask)
13251                                 ret = false;
13252
13253                         used_ports |= port_mask;
13254                         break;
13255                 case INTEL_OUTPUT_DP_MST:
13256                         used_mst_ports |=
13257                                 1 << encoder->port;
13258                         break;
13259                 default:
13260                         break;
13261                 }
13262         }
13263         drm_connector_list_iter_end(&conn_iter);
13264
13265         /* can't mix MST and SST/HDMI on the same port */
13266         if (used_ports & used_mst_ports)
13267                 return false;
13268
13269         return ret;
13270 }
13271
13272 static void
13273 intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_crtc_state *crtc_state)
13274 {
13275         intel_crtc_copy_color_blobs(crtc_state);
13276 }
13277
13278 static void
13279 intel_crtc_copy_uapi_to_hw_state(struct intel_crtc_state *crtc_state)
13280 {
13281         crtc_state->hw.enable = crtc_state->uapi.enable;
13282         crtc_state->hw.active = crtc_state->uapi.active;
13283         crtc_state->hw.mode = crtc_state->uapi.mode;
13284         crtc_state->hw.adjusted_mode = crtc_state->uapi.adjusted_mode;
13285         intel_crtc_copy_uapi_to_hw_state_nomodeset(crtc_state);
13286 }
13287
13288 static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state)
13289 {
13290         crtc_state->uapi.enable = crtc_state->hw.enable;
13291         crtc_state->uapi.active = crtc_state->hw.active;
13292         WARN_ON(drm_atomic_set_mode_for_crtc(&crtc_state->uapi, &crtc_state->hw.mode) < 0);
13293
13294         crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode;
13295
13296         /* copy color blobs to uapi */
13297         drm_property_replace_blob(&crtc_state->uapi.degamma_lut,
13298                                   crtc_state->hw.degamma_lut);
13299         drm_property_replace_blob(&crtc_state->uapi.gamma_lut,
13300                                   crtc_state->hw.gamma_lut);
13301         drm_property_replace_blob(&crtc_state->uapi.ctm,
13302                                   crtc_state->hw.ctm);
13303 }
13304
13305 static int
13306 intel_crtc_prepare_cleared_state(struct intel_crtc_state *crtc_state)
13307 {
13308         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
13309         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13310         struct intel_crtc_state *saved_state;
13311
13312         saved_state = intel_crtc_state_alloc(crtc);
13313         if (!saved_state)
13314                 return -ENOMEM;
13315
13316         /* free the old crtc_state->hw members */
13317         intel_crtc_free_hw_state(crtc_state);
13318
13319         /* FIXME: before the switch to atomic started, a new pipe_config was
13320          * kzalloc'd. Code that depends on any field being zero should be
13321          * fixed, so that the crtc_state can be safely duplicated. For now,
13322          * only fields that are know to not cause problems are preserved. */
13323
13324         saved_state->uapi = crtc_state->uapi;
13325         saved_state->scaler_state = crtc_state->scaler_state;
13326         saved_state->shared_dpll = crtc_state->shared_dpll;
13327         saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
13328         memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls,
13329                sizeof(saved_state->icl_port_dplls));
13330         saved_state->crc_enabled = crtc_state->crc_enabled;
13331         if (IS_G4X(dev_priv) ||
13332             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
13333                 saved_state->wm = crtc_state->wm;
13334         /*
13335          * Save the slave bitmask which gets filled for master crtc state during
13336          * slave atomic check call. For all other CRTCs reset the port sync variables
13337          * crtc_state->master_transcoder needs to be set to INVALID
13338          */
13339         reset_port_sync_mode_state(saved_state);
13340         if (intel_atomic_is_master_connector(crtc_state))
13341                 saved_state->sync_mode_slaves_mask =
13342                         crtc_state->sync_mode_slaves_mask;
13343
13344         memcpy(crtc_state, saved_state, sizeof(*crtc_state));
13345         kfree(saved_state);
13346
13347         intel_crtc_copy_uapi_to_hw_state(crtc_state);
13348
13349         return 0;
13350 }
13351
13352 static int
13353 intel_modeset_pipe_config(struct intel_crtc_state *pipe_config)
13354 {
13355         struct drm_crtc *crtc = pipe_config->uapi.crtc;
13356         struct drm_atomic_state *state = pipe_config->uapi.state;
13357         struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
13358         struct drm_connector *connector;
13359         struct drm_connector_state *connector_state;
13360         int base_bpp, ret;
13361         int i, tile_group_id = -1, num_tiled_conns = 0;
13362         bool retry = true;
13363
13364         pipe_config->cpu_transcoder =
13365                 (enum transcoder) to_intel_crtc(crtc)->pipe;
13366
13367         /*
13368          * Sanitize sync polarity flags based on requested ones. If neither
13369          * positive or negative polarity is requested, treat this as meaning
13370          * negative polarity.
13371          */
13372         if (!(pipe_config->hw.adjusted_mode.flags &
13373               (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
13374                 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
13375
13376         if (!(pipe_config->hw.adjusted_mode.flags &
13377               (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
13378                 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
13379
13380         ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
13381                                         pipe_config);
13382         if (ret)
13383                 return ret;
13384
13385         base_bpp = pipe_config->pipe_bpp;
13386
13387         /*
13388          * Determine the real pipe dimensions. Note that stereo modes can
13389          * increase the actual pipe size due to the frame doubling and
13390          * insertion of additional space for blanks between the frame. This
13391          * is stored in the crtc timings. We use the requested mode to do this
13392          * computation to clearly distinguish it from the adjusted mode, which
13393          * can be changed by the connectors in the below retry loop.
13394          */
13395         drm_mode_get_hv_timing(&pipe_config->hw.mode,
13396                                &pipe_config->pipe_src_w,
13397                                &pipe_config->pipe_src_h);
13398
13399         for_each_new_connector_in_state(state, connector, connector_state, i) {
13400                 struct intel_encoder *encoder =
13401                         to_intel_encoder(connector_state->best_encoder);
13402
13403                 if (connector_state->crtc != crtc)
13404                         continue;
13405
13406                 if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
13407                         drm_dbg_kms(&i915->drm,
13408                                     "rejecting invalid cloning configuration\n");
13409                         return -EINVAL;
13410                 }
13411
13412                 /*
13413                  * Determine output_types before calling the .compute_config()
13414                  * hooks so that the hooks can use this information safely.
13415                  */
13416                 if (encoder->compute_output_type)
13417                         pipe_config->output_types |=
13418                                 BIT(encoder->compute_output_type(encoder, pipe_config,
13419                                                                  connector_state));
13420                 else
13421                         pipe_config->output_types |= BIT(encoder->type);
13422         }
13423
13424 encoder_retry:
13425         /* Ensure the port clock defaults are reset when retrying. */
13426         pipe_config->port_clock = 0;
13427         pipe_config->pixel_multiplier = 1;
13428
13429         /* Fill in default crtc timings, allow encoders to overwrite them. */
13430         drm_mode_set_crtcinfo(&pipe_config->hw.adjusted_mode,
13431                               CRTC_STEREO_DOUBLE);
13432
13433         /* Get tile_group_id of tiled connector */
13434         for_each_new_connector_in_state(state, connector, connector_state, i) {
13435                 if (connector_state->crtc == crtc &&
13436                     connector->has_tile) {
13437                         tile_group_id = connector->tile_group->id;
13438                         break;
13439                 }
13440         }
13441
13442         /* Get total number of tiled connectors in state that belong to
13443          * this tile group.
13444          */
13445         for_each_new_connector_in_state(state, connector, connector_state, i) {
13446                 if (connector->has_tile &&
13447                     connector->tile_group->id == tile_group_id)
13448                         num_tiled_conns++;
13449         }
13450
13451         /* Pass our mode to the connectors and the CRTC to give them a chance to
13452          * adjust it according to limitations or connector properties, and also
13453          * a chance to reject the mode entirely.
13454          */
13455         for_each_new_connector_in_state(state, connector, connector_state, i) {
13456                 struct intel_encoder *encoder =
13457                         to_intel_encoder(connector_state->best_encoder);
13458
13459                 if (connector_state->crtc != crtc)
13460                         continue;
13461
13462                 ret = icl_compute_port_sync_crtc_state(connector, pipe_config,
13463                                                        num_tiled_conns);
13464                 if (ret) {
13465                         drm_dbg_kms(&i915->drm,
13466                                     "Cannot assign Sync Mode CRTCs: %d\n",
13467                                     ret);
13468                         return ret;
13469                 }
13470
13471                 ret = encoder->compute_config(encoder, pipe_config,
13472                                               connector_state);
13473                 if (ret < 0) {
13474                         if (ret != -EDEADLK)
13475                                 drm_dbg_kms(&i915->drm,
13476                                             "Encoder config failure: %d\n",
13477                                             ret);
13478                         return ret;
13479                 }
13480         }
13481
13482         /* Set default port clock if not overwritten by the encoder. Needs to be
13483          * done afterwards in case the encoder adjusts the mode. */
13484         if (!pipe_config->port_clock)
13485                 pipe_config->port_clock = pipe_config->hw.adjusted_mode.crtc_clock
13486                         * pipe_config->pixel_multiplier;
13487
13488         ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
13489         if (ret == -EDEADLK)
13490                 return ret;
13491         if (ret < 0) {
13492                 drm_dbg_kms(&i915->drm, "CRTC fixup failed\n");
13493                 return ret;
13494         }
13495
13496         if (ret == RETRY) {
13497                 if (WARN(!retry, "loop in pipe configuration computation\n"))
13498                         return -EINVAL;
13499
13500                 drm_dbg_kms(&i915->drm, "CRTC bw constrained, retrying\n");
13501                 retry = false;
13502                 goto encoder_retry;
13503         }
13504
13505         /* Dithering seems to not pass-through bits correctly when it should, so
13506          * only enable it on 6bpc panels and when its not a compliance
13507          * test requesting 6bpc video pattern.
13508          */
13509         pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
13510                 !pipe_config->dither_force_disable;
13511         drm_dbg_kms(&i915->drm,
13512                     "hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
13513                     base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
13514
13515         /*
13516          * Make drm_calc_timestamping_constants in
13517          * drm_atomic_helper_update_legacy_modeset_state() happy
13518          */
13519         pipe_config->uapi.adjusted_mode = pipe_config->hw.adjusted_mode;
13520
13521         return 0;
13522 }
13523
13524 bool intel_fuzzy_clock_check(int clock1, int clock2)
13525 {
13526         int diff;
13527
13528         if (clock1 == clock2)
13529                 return true;
13530
13531         if (!clock1 || !clock2)
13532                 return false;
13533
13534         diff = abs(clock1 - clock2);
13535
13536         if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
13537                 return true;
13538
13539         return false;
13540 }
13541
13542 static bool
13543 intel_compare_m_n(unsigned int m, unsigned int n,
13544                   unsigned int m2, unsigned int n2,
13545                   bool exact)
13546 {
13547         if (m == m2 && n == n2)
13548                 return true;
13549
13550         if (exact || !m || !n || !m2 || !n2)
13551                 return false;
13552
13553         BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
13554
13555         if (n > n2) {
13556                 while (n > n2) {
13557                         m2 <<= 1;
13558                         n2 <<= 1;
13559                 }
13560         } else if (n < n2) {
13561                 while (n < n2) {
13562                         m <<= 1;
13563                         n <<= 1;
13564                 }
13565         }
13566
13567         if (n != n2)
13568                 return false;
13569
13570         return intel_fuzzy_clock_check(m, m2);
13571 }
13572
13573 static bool
13574 intel_compare_link_m_n(const struct intel_link_m_n *m_n,
13575                        const struct intel_link_m_n *m2_n2,
13576                        bool exact)
13577 {
13578         return m_n->tu == m2_n2->tu &&
13579                 intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
13580                                   m2_n2->gmch_m, m2_n2->gmch_n, exact) &&
13581                 intel_compare_m_n(m_n->link_m, m_n->link_n,
13582                                   m2_n2->link_m, m2_n2->link_n, exact);
13583 }
13584
13585 static bool
13586 intel_compare_infoframe(const union hdmi_infoframe *a,
13587                         const union hdmi_infoframe *b)
13588 {
13589         return memcmp(a, b, sizeof(*a)) == 0;
13590 }
13591
13592 static void
13593 pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
13594                                bool fastset, const char *name,
13595                                const union hdmi_infoframe *a,
13596                                const union hdmi_infoframe *b)
13597 {
13598         if (fastset) {
13599                 if (!drm_debug_enabled(DRM_UT_KMS))
13600                         return;
13601
13602                 drm_dbg_kms(&dev_priv->drm,
13603                             "fastset mismatch in %s infoframe\n", name);
13604                 drm_dbg_kms(&dev_priv->drm, "expected:\n");
13605                 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
13606                 drm_dbg_kms(&dev_priv->drm, "found:\n");
13607                 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
13608         } else {
13609                 drm_err(&dev_priv->drm, "mismatch in %s infoframe\n", name);
13610                 drm_err(&dev_priv->drm, "expected:\n");
13611                 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
13612                 drm_err(&dev_priv->drm, "found:\n");
13613                 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
13614         }
13615 }
13616
13617 static void __printf(4, 5)
13618 pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
13619                      const char *name, const char *format, ...)
13620 {
13621         struct drm_i915_private *i915 = to_i915(crtc->base.dev);
13622         struct va_format vaf;
13623         va_list args;
13624
13625         va_start(args, format);
13626         vaf.fmt = format;
13627         vaf.va = &args;
13628
13629         if (fastset)
13630                 drm_dbg_kms(&i915->drm,
13631                             "[CRTC:%d:%s] fastset mismatch in %s %pV\n",
13632                             crtc->base.base.id, crtc->base.name, name, &vaf);
13633         else
13634                 drm_err(&i915->drm, "[CRTC:%d:%s] mismatch in %s %pV\n",
13635                         crtc->base.base.id, crtc->base.name, name, &vaf);
13636
13637         va_end(args);
13638 }
13639
13640 static bool fastboot_enabled(struct drm_i915_private *dev_priv)
13641 {
13642         if (i915_modparams.fastboot != -1)
13643                 return i915_modparams.fastboot;
13644
13645         /* Enable fastboot by default on Skylake and newer */
13646         if (INTEL_GEN(dev_priv) >= 9)
13647                 return true;
13648
13649         /* Enable fastboot by default on VLV and CHV */
13650         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
13651                 return true;
13652
13653         /* Disabled by default on all others */
13654         return false;
13655 }
13656
13657 static bool
13658 intel_pipe_config_compare(const struct intel_crtc_state *current_config,
13659                           const struct intel_crtc_state *pipe_config,
13660                           bool fastset)
13661 {
13662         struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev);
13663         struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
13664         bool ret = true;
13665         u32 bp_gamma = 0;
13666         bool fixup_inherited = fastset &&
13667                 (current_config->hw.mode.private_flags & I915_MODE_FLAG_INHERITED) &&
13668                 !(pipe_config->hw.mode.private_flags & I915_MODE_FLAG_INHERITED);
13669
13670         if (fixup_inherited && !fastboot_enabled(dev_priv)) {
13671                 drm_dbg_kms(&dev_priv->drm,
13672                             "initial modeset and fastboot not set\n");
13673                 ret = false;
13674         }
13675
13676 #define PIPE_CONF_CHECK_X(name) do { \
13677         if (current_config->name != pipe_config->name) { \
13678                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13679                                      "(expected 0x%08x, found 0x%08x)", \
13680                                      current_config->name, \
13681                                      pipe_config->name); \
13682                 ret = false; \
13683         } \
13684 } while (0)
13685
13686 #define PIPE_CONF_CHECK_I(name) do { \
13687         if (current_config->name != pipe_config->name) { \
13688                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13689                                      "(expected %i, found %i)", \
13690                                      current_config->name, \
13691                                      pipe_config->name); \
13692                 ret = false; \
13693         } \
13694 } while (0)
13695
13696 #define PIPE_CONF_CHECK_BOOL(name) do { \
13697         if (current_config->name != pipe_config->name) { \
13698                 pipe_config_mismatch(fastset, crtc,  __stringify(name), \
13699                                      "(expected %s, found %s)", \
13700                                      yesno(current_config->name), \
13701                                      yesno(pipe_config->name)); \
13702                 ret = false; \
13703         } \
13704 } while (0)
13705
13706 /*
13707  * Checks state where we only read out the enabling, but not the entire
13708  * state itself (like full infoframes or ELD for audio). These states
13709  * require a full modeset on bootup to fix up.
13710  */
13711 #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
13712         if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
13713                 PIPE_CONF_CHECK_BOOL(name); \
13714         } else { \
13715                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13716                                      "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)", \
13717                                      yesno(current_config->name), \
13718                                      yesno(pipe_config->name)); \
13719                 ret = false; \
13720         } \
13721 } while (0)
13722
13723 #define PIPE_CONF_CHECK_P(name) do { \
13724         if (current_config->name != pipe_config->name) { \
13725                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13726                                      "(expected %p, found %p)", \
13727                                      current_config->name, \
13728                                      pipe_config->name); \
13729                 ret = false; \
13730         } \
13731 } while (0)
13732
13733 #define PIPE_CONF_CHECK_M_N(name) do { \
13734         if (!intel_compare_link_m_n(&current_config->name, \
13735                                     &pipe_config->name,\
13736                                     !fastset)) { \
13737                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13738                                      "(expected tu %i gmch %i/%i link %i/%i, " \
13739                                      "found tu %i, gmch %i/%i link %i/%i)", \
13740                                      current_config->name.tu, \
13741                                      current_config->name.gmch_m, \
13742                                      current_config->name.gmch_n, \
13743                                      current_config->name.link_m, \
13744                                      current_config->name.link_n, \
13745                                      pipe_config->name.tu, \
13746                                      pipe_config->name.gmch_m, \
13747                                      pipe_config->name.gmch_n, \
13748                                      pipe_config->name.link_m, \
13749                                      pipe_config->name.link_n); \
13750                 ret = false; \
13751         } \
13752 } while (0)
13753
13754 /* This is required for BDW+ where there is only one set of registers for
13755  * switching between high and low RR.
13756  * This macro can be used whenever a comparison has to be made between one
13757  * hw state and multiple sw state variables.
13758  */
13759 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
13760         if (!intel_compare_link_m_n(&current_config->name, \
13761                                     &pipe_config->name, !fastset) && \
13762             !intel_compare_link_m_n(&current_config->alt_name, \
13763                                     &pipe_config->name, !fastset)) { \
13764                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13765                                      "(expected tu %i gmch %i/%i link %i/%i, " \
13766                                      "or tu %i gmch %i/%i link %i/%i, " \
13767                                      "found tu %i, gmch %i/%i link %i/%i)", \
13768                                      current_config->name.tu, \
13769                                      current_config->name.gmch_m, \
13770                                      current_config->name.gmch_n, \
13771                                      current_config->name.link_m, \
13772                                      current_config->name.link_n, \
13773                                      current_config->alt_name.tu, \
13774                                      current_config->alt_name.gmch_m, \
13775                                      current_config->alt_name.gmch_n, \
13776                                      current_config->alt_name.link_m, \
13777                                      current_config->alt_name.link_n, \
13778                                      pipe_config->name.tu, \
13779                                      pipe_config->name.gmch_m, \
13780                                      pipe_config->name.gmch_n, \
13781                                      pipe_config->name.link_m, \
13782                                      pipe_config->name.link_n); \
13783                 ret = false; \
13784         } \
13785 } while (0)
13786
13787 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
13788         if ((current_config->name ^ pipe_config->name) & (mask)) { \
13789                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13790                                      "(%x) (expected %i, found %i)", \
13791                                      (mask), \
13792                                      current_config->name & (mask), \
13793                                      pipe_config->name & (mask)); \
13794                 ret = false; \
13795         } \
13796 } while (0)
13797
13798 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
13799         if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
13800                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13801                                      "(expected %i, found %i)", \
13802                                      current_config->name, \
13803                                      pipe_config->name); \
13804                 ret = false; \
13805         } \
13806 } while (0)
13807
13808 #define PIPE_CONF_CHECK_INFOFRAME(name) do { \
13809         if (!intel_compare_infoframe(&current_config->infoframes.name, \
13810                                      &pipe_config->infoframes.name)) { \
13811                 pipe_config_infoframe_mismatch(dev_priv, fastset, __stringify(name), \
13812                                                &current_config->infoframes.name, \
13813                                                &pipe_config->infoframes.name); \
13814                 ret = false; \
13815         } \
13816 } while (0)
13817
13818 #define PIPE_CONF_CHECK_COLOR_LUT(name1, name2, bit_precision) do { \
13819         if (current_config->name1 != pipe_config->name1) { \
13820                 pipe_config_mismatch(fastset, crtc, __stringify(name1), \
13821                                 "(expected %i, found %i, won't compare lut values)", \
13822                                 current_config->name1, \
13823                                 pipe_config->name1); \
13824                 ret = false;\
13825         } else { \
13826                 if (!intel_color_lut_equal(current_config->name2, \
13827                                         pipe_config->name2, pipe_config->name1, \
13828                                         bit_precision)) { \
13829                         pipe_config_mismatch(fastset, crtc, __stringify(name2), \
13830                                         "hw_state doesn't match sw_state"); \
13831                         ret = false; \
13832                 } \
13833         } \
13834 } while (0)
13835
13836 #define PIPE_CONF_QUIRK(quirk) \
13837         ((current_config->quirks | pipe_config->quirks) & (quirk))
13838
13839         PIPE_CONF_CHECK_I(cpu_transcoder);
13840
13841         PIPE_CONF_CHECK_BOOL(has_pch_encoder);
13842         PIPE_CONF_CHECK_I(fdi_lanes);
13843         PIPE_CONF_CHECK_M_N(fdi_m_n);
13844
13845         PIPE_CONF_CHECK_I(lane_count);
13846         PIPE_CONF_CHECK_X(lane_lat_optim_mask);
13847
13848         if (INTEL_GEN(dev_priv) < 8) {
13849                 PIPE_CONF_CHECK_M_N(dp_m_n);
13850
13851                 if (current_config->has_drrs)
13852                         PIPE_CONF_CHECK_M_N(dp_m2_n2);
13853         } else
13854                 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
13855
13856         PIPE_CONF_CHECK_X(output_types);
13857
13858         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hdisplay);
13859         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_htotal);
13860         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_start);
13861         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_end);
13862         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_start);
13863         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_end);
13864
13865         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vdisplay);
13866         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vtotal);
13867         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_start);
13868         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_end);
13869         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_start);
13870         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_end);
13871
13872         PIPE_CONF_CHECK_I(pixel_multiplier);
13873         PIPE_CONF_CHECK_I(output_format);
13874         PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
13875         if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
13876             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
13877                 PIPE_CONF_CHECK_BOOL(limited_color_range);
13878
13879         PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
13880         PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
13881         PIPE_CONF_CHECK_BOOL(has_infoframe);
13882         PIPE_CONF_CHECK_BOOL(fec_enable);
13883
13884         PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
13885
13886         PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
13887                               DRM_MODE_FLAG_INTERLACE);
13888
13889         if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
13890                 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
13891                                       DRM_MODE_FLAG_PHSYNC);
13892                 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
13893                                       DRM_MODE_FLAG_NHSYNC);
13894                 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
13895                                       DRM_MODE_FLAG_PVSYNC);
13896                 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
13897                                       DRM_MODE_FLAG_NVSYNC);
13898         }
13899
13900         PIPE_CONF_CHECK_X(gmch_pfit.control);
13901         /* pfit ratios are autocomputed by the hw on gen4+ */
13902         if (INTEL_GEN(dev_priv) < 4)
13903                 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
13904         PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
13905
13906         /*
13907          * Changing the EDP transcoder input mux
13908          * (A_ONOFF vs. A_ON) requires a full modeset.
13909          */
13910         PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru);
13911
13912         if (!fastset) {
13913                 PIPE_CONF_CHECK_I(pipe_src_w);
13914                 PIPE_CONF_CHECK_I(pipe_src_h);
13915
13916                 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
13917                 if (current_config->pch_pfit.enabled) {
13918                         PIPE_CONF_CHECK_X(pch_pfit.pos);
13919                         PIPE_CONF_CHECK_X(pch_pfit.size);
13920                 }
13921
13922                 PIPE_CONF_CHECK_I(scaler_state.scaler_id);
13923                 PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
13924
13925                 PIPE_CONF_CHECK_X(gamma_mode);
13926                 if (IS_CHERRYVIEW(dev_priv))
13927                         PIPE_CONF_CHECK_X(cgm_mode);
13928                 else
13929                         PIPE_CONF_CHECK_X(csc_mode);
13930                 PIPE_CONF_CHECK_BOOL(gamma_enable);
13931                 PIPE_CONF_CHECK_BOOL(csc_enable);
13932
13933                 PIPE_CONF_CHECK_I(linetime);
13934                 PIPE_CONF_CHECK_I(ips_linetime);
13935
13936                 bp_gamma = intel_color_get_gamma_bit_precision(pipe_config);
13937                 if (bp_gamma)
13938                         PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, hw.gamma_lut, bp_gamma);
13939         }
13940
13941         PIPE_CONF_CHECK_BOOL(double_wide);
13942
13943         PIPE_CONF_CHECK_P(shared_dpll);
13944         PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
13945         PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
13946         PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
13947         PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
13948         PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
13949         PIPE_CONF_CHECK_X(dpll_hw_state.spll);
13950         PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
13951         PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
13952         PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
13953         PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
13954         PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
13955         PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
13956         PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
13957         PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
13958         PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
13959         PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
13960         PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
13961         PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
13962         PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
13963         PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
13964         PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
13965         PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
13966         PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
13967         PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
13968         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
13969         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
13970         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
13971         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
13972         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
13973         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
13974         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
13975
13976         PIPE_CONF_CHECK_X(dsi_pll.ctrl);
13977         PIPE_CONF_CHECK_X(dsi_pll.div);
13978
13979         if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5)
13980                 PIPE_CONF_CHECK_I(pipe_bpp);
13981
13982         PIPE_CONF_CHECK_CLOCK_FUZZY(hw.adjusted_mode.crtc_clock);
13983         PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
13984
13985         PIPE_CONF_CHECK_I(min_voltage_level);
13986
13987         PIPE_CONF_CHECK_X(infoframes.enable);
13988         PIPE_CONF_CHECK_X(infoframes.gcp);
13989         PIPE_CONF_CHECK_INFOFRAME(avi);
13990         PIPE_CONF_CHECK_INFOFRAME(spd);
13991         PIPE_CONF_CHECK_INFOFRAME(hdmi);
13992         PIPE_CONF_CHECK_INFOFRAME(drm);
13993
13994         PIPE_CONF_CHECK_X(sync_mode_slaves_mask);
13995         PIPE_CONF_CHECK_I(master_transcoder);
13996
13997         PIPE_CONF_CHECK_I(dsc.compression_enable);
13998         PIPE_CONF_CHECK_I(dsc.dsc_split);
13999         PIPE_CONF_CHECK_I(dsc.compressed_bpp);
14000
14001         PIPE_CONF_CHECK_I(mst_master_transcoder);
14002
14003 #undef PIPE_CONF_CHECK_X
14004 #undef PIPE_CONF_CHECK_I
14005 #undef PIPE_CONF_CHECK_BOOL
14006 #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
14007 #undef PIPE_CONF_CHECK_P
14008 #undef PIPE_CONF_CHECK_FLAGS
14009 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
14010 #undef PIPE_CONF_CHECK_COLOR_LUT
14011 #undef PIPE_CONF_QUIRK
14012
14013         return ret;
14014 }
14015
14016 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
14017                                            const struct intel_crtc_state *pipe_config)
14018 {
14019         if (pipe_config->has_pch_encoder) {
14020                 int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
14021                                                             &pipe_config->fdi_m_n);
14022                 int dotclock = pipe_config->hw.adjusted_mode.crtc_clock;
14023
14024                 /*
14025                  * FDI already provided one idea for the dotclock.
14026                  * Yell if the encoder disagrees.
14027                  */
14028                 WARN(!intel_fuzzy_clock_check(fdi_dotclock, dotclock),
14029                      "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
14030                      fdi_dotclock, dotclock);
14031         }
14032 }
14033
14034 static void verify_wm_state(struct intel_crtc *crtc,
14035                             struct intel_crtc_state *new_crtc_state)
14036 {
14037         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14038         struct skl_hw_state {
14039                 struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
14040                 struct skl_ddb_entry ddb_uv[I915_MAX_PLANES];
14041                 struct skl_ddb_allocation ddb;
14042                 struct skl_pipe_wm wm;
14043         } *hw;
14044         struct skl_ddb_allocation *sw_ddb;
14045         struct skl_pipe_wm *sw_wm;
14046         struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
14047         const enum pipe pipe = crtc->pipe;
14048         int plane, level, max_level = ilk_wm_max_level(dev_priv);
14049
14050         if (INTEL_GEN(dev_priv) < 9 || !new_crtc_state->hw.active)
14051                 return;
14052
14053         hw = kzalloc(sizeof(*hw), GFP_KERNEL);
14054         if (!hw)
14055                 return;
14056
14057         skl_pipe_wm_get_hw_state(crtc, &hw->wm);
14058         sw_wm = &new_crtc_state->wm.skl.optimal;
14059
14060         skl_pipe_ddb_get_hw_state(crtc, hw->ddb_y, hw->ddb_uv);
14061
14062         skl_ddb_get_hw_state(dev_priv, &hw->ddb);
14063         sw_ddb = &dev_priv->wm.skl_hw.ddb;
14064
14065         if (INTEL_GEN(dev_priv) >= 11 &&
14066             hw->ddb.enabled_slices != sw_ddb->enabled_slices)
14067                 drm_err(&dev_priv->drm,
14068                         "mismatch in DBUF Slices (expected %u, got %u)\n",
14069                         sw_ddb->enabled_slices,
14070                         hw->ddb.enabled_slices);
14071
14072         /* planes */
14073         for_each_universal_plane(dev_priv, pipe, plane) {
14074                 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
14075
14076                 hw_plane_wm = &hw->wm.planes[plane];
14077                 sw_plane_wm = &sw_wm->planes[plane];
14078
14079                 /* Watermarks */
14080                 for (level = 0; level <= max_level; level++) {
14081                         if (skl_wm_level_equals(&hw_plane_wm->wm[level],
14082                                                 &sw_plane_wm->wm[level]))
14083                                 continue;
14084
14085                         drm_err(&dev_priv->drm,
14086                                 "mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
14087                                 pipe_name(pipe), plane + 1, level,
14088                                 sw_plane_wm->wm[level].plane_en,
14089                                 sw_plane_wm->wm[level].plane_res_b,
14090                                 sw_plane_wm->wm[level].plane_res_l,
14091                                 hw_plane_wm->wm[level].plane_en,
14092                                 hw_plane_wm->wm[level].plane_res_b,
14093                                 hw_plane_wm->wm[level].plane_res_l);
14094                 }
14095
14096                 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
14097                                          &sw_plane_wm->trans_wm)) {
14098                         drm_err(&dev_priv->drm,
14099                                 "mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
14100                                 pipe_name(pipe), plane + 1,
14101                                 sw_plane_wm->trans_wm.plane_en,
14102                                 sw_plane_wm->trans_wm.plane_res_b,
14103                                 sw_plane_wm->trans_wm.plane_res_l,
14104                                 hw_plane_wm->trans_wm.plane_en,
14105                                 hw_plane_wm->trans_wm.plane_res_b,
14106                                 hw_plane_wm->trans_wm.plane_res_l);
14107                 }
14108
14109                 /* DDB */
14110                 hw_ddb_entry = &hw->ddb_y[plane];
14111                 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[plane];
14112
14113                 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
14114                         drm_err(&dev_priv->drm,
14115                                 "mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
14116                                 pipe_name(pipe), plane + 1,
14117                                 sw_ddb_entry->start, sw_ddb_entry->end,
14118                                 hw_ddb_entry->start, hw_ddb_entry->end);
14119                 }
14120         }
14121
14122         /*
14123          * cursor
14124          * If the cursor plane isn't active, we may not have updated it's ddb
14125          * allocation. In that case since the ddb allocation will be updated
14126          * once the plane becomes visible, we can skip this check
14127          */
14128         if (1) {
14129                 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
14130
14131                 hw_plane_wm = &hw->wm.planes[PLANE_CURSOR];
14132                 sw_plane_wm = &sw_wm->planes[PLANE_CURSOR];
14133
14134                 /* Watermarks */
14135                 for (level = 0; level <= max_level; level++) {
14136                         if (skl_wm_level_equals(&hw_plane_wm->wm[level],
14137                                                 &sw_plane_wm->wm[level]))
14138                                 continue;
14139
14140                         drm_err(&dev_priv->drm,
14141                                 "mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
14142                                 pipe_name(pipe), level,
14143                                 sw_plane_wm->wm[level].plane_en,
14144                                 sw_plane_wm->wm[level].plane_res_b,
14145                                 sw_plane_wm->wm[level].plane_res_l,
14146                                 hw_plane_wm->wm[level].plane_en,
14147                                 hw_plane_wm->wm[level].plane_res_b,
14148                                 hw_plane_wm->wm[level].plane_res_l);
14149                 }
14150
14151                 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
14152                                          &sw_plane_wm->trans_wm)) {
14153                         drm_err(&dev_priv->drm,
14154                                 "mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
14155                                 pipe_name(pipe),
14156                                 sw_plane_wm->trans_wm.plane_en,
14157                                 sw_plane_wm->trans_wm.plane_res_b,
14158                                 sw_plane_wm->trans_wm.plane_res_l,
14159                                 hw_plane_wm->trans_wm.plane_en,
14160                                 hw_plane_wm->trans_wm.plane_res_b,
14161                                 hw_plane_wm->trans_wm.plane_res_l);
14162                 }
14163
14164                 /* DDB */
14165                 hw_ddb_entry = &hw->ddb_y[PLANE_CURSOR];
14166                 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
14167
14168                 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
14169                         drm_err(&dev_priv->drm,
14170                                 "mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
14171                                 pipe_name(pipe),
14172                                 sw_ddb_entry->start, sw_ddb_entry->end,
14173                                 hw_ddb_entry->start, hw_ddb_entry->end);
14174                 }
14175         }
14176
14177         kfree(hw);
14178 }
14179
14180 static void
14181 verify_connector_state(struct intel_atomic_state *state,
14182                        struct intel_crtc *crtc)
14183 {
14184         struct drm_connector *connector;
14185         struct drm_connector_state *new_conn_state;
14186         int i;
14187
14188         for_each_new_connector_in_state(&state->base, connector, new_conn_state, i) {
14189                 struct drm_encoder *encoder = connector->encoder;
14190                 struct intel_crtc_state *crtc_state = NULL;
14191
14192                 if (new_conn_state->crtc != &crtc->base)
14193                         continue;
14194
14195                 if (crtc)
14196                         crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
14197
14198                 intel_connector_verify_state(crtc_state, new_conn_state);
14199
14200                 I915_STATE_WARN(new_conn_state->best_encoder != encoder,
14201                      "connector's atomic encoder doesn't match legacy encoder\n");
14202         }
14203 }
14204
14205 static void
14206 verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_state *state)
14207 {
14208         struct intel_encoder *encoder;
14209         struct drm_connector *connector;
14210         struct drm_connector_state *old_conn_state, *new_conn_state;
14211         int i;
14212
14213         for_each_intel_encoder(&dev_priv->drm, encoder) {
14214                 bool enabled = false, found = false;
14215                 enum pipe pipe;
14216
14217                 drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s]\n",
14218                             encoder->base.base.id,
14219                             encoder->base.name);
14220
14221                 for_each_oldnew_connector_in_state(&state->base, connector, old_conn_state,
14222                                                    new_conn_state, i) {
14223                         if (old_conn_state->best_encoder == &encoder->base)
14224                                 found = true;
14225
14226                         if (new_conn_state->best_encoder != &encoder->base)
14227                                 continue;
14228                         found = enabled = true;
14229
14230                         I915_STATE_WARN(new_conn_state->crtc !=
14231                                         encoder->base.crtc,
14232                              "connector's crtc doesn't match encoder crtc\n");
14233                 }
14234
14235                 if (!found)
14236                         continue;
14237
14238                 I915_STATE_WARN(!!encoder->base.crtc != enabled,
14239                      "encoder's enabled state mismatch "
14240                      "(expected %i, found %i)\n",
14241                      !!encoder->base.crtc, enabled);
14242
14243                 if (!encoder->base.crtc) {
14244                         bool active;
14245
14246                         active = encoder->get_hw_state(encoder, &pipe);
14247                         I915_STATE_WARN(active,
14248                              "encoder detached but still enabled on pipe %c.\n",
14249                              pipe_name(pipe));
14250                 }
14251         }
14252 }
14253
14254 static void
14255 verify_crtc_state(struct intel_crtc *crtc,
14256                   struct intel_crtc_state *old_crtc_state,
14257                   struct intel_crtc_state *new_crtc_state)
14258 {
14259         struct drm_device *dev = crtc->base.dev;
14260         struct drm_i915_private *dev_priv = to_i915(dev);
14261         struct intel_encoder *encoder;
14262         struct intel_crtc_state *pipe_config = old_crtc_state;
14263         struct drm_atomic_state *state = old_crtc_state->uapi.state;
14264         bool active;
14265
14266         __drm_atomic_helper_crtc_destroy_state(&old_crtc_state->uapi);
14267         intel_crtc_free_hw_state(old_crtc_state);
14268         intel_crtc_state_reset(old_crtc_state, crtc);
14269         old_crtc_state->uapi.state = state;
14270
14271         drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s]\n", crtc->base.base.id,
14272                     crtc->base.name);
14273
14274         active = dev_priv->display.get_pipe_config(crtc, pipe_config);
14275
14276         /* we keep both pipes enabled on 830 */
14277         if (IS_I830(dev_priv))
14278                 active = new_crtc_state->hw.active;
14279
14280         I915_STATE_WARN(new_crtc_state->hw.active != active,
14281                         "crtc active state doesn't match with hw state "
14282                         "(expected %i, found %i)\n",
14283                         new_crtc_state->hw.active, active);
14284
14285         I915_STATE_WARN(crtc->active != new_crtc_state->hw.active,
14286                         "transitional active state does not match atomic hw state "
14287                         "(expected %i, found %i)\n",
14288                         new_crtc_state->hw.active, crtc->active);
14289
14290         for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
14291                 enum pipe pipe;
14292
14293                 active = encoder->get_hw_state(encoder, &pipe);
14294                 I915_STATE_WARN(active != new_crtc_state->hw.active,
14295                                 "[ENCODER:%i] active %i with crtc active %i\n",
14296                                 encoder->base.base.id, active,
14297                                 new_crtc_state->hw.active);
14298
14299                 I915_STATE_WARN(active && crtc->pipe != pipe,
14300                                 "Encoder connected to wrong pipe %c\n",
14301                                 pipe_name(pipe));
14302
14303                 if (active)
14304                         encoder->get_config(encoder, pipe_config);
14305         }
14306
14307         intel_crtc_compute_pixel_rate(pipe_config);
14308
14309         if (!new_crtc_state->hw.active)
14310                 return;
14311
14312         intel_pipe_config_sanity_check(dev_priv, pipe_config);
14313
14314         if (!intel_pipe_config_compare(new_crtc_state,
14315                                        pipe_config, false)) {
14316                 I915_STATE_WARN(1, "pipe state doesn't match!\n");
14317                 intel_dump_pipe_config(pipe_config, NULL, "[hw state]");
14318                 intel_dump_pipe_config(new_crtc_state, NULL, "[sw state]");
14319         }
14320 }
14321
14322 static void
14323 intel_verify_planes(struct intel_atomic_state *state)
14324 {
14325         struct intel_plane *plane;
14326         const struct intel_plane_state *plane_state;
14327         int i;
14328
14329         for_each_new_intel_plane_in_state(state, plane,
14330                                           plane_state, i)
14331                 assert_plane(plane, plane_state->planar_slave ||
14332                              plane_state->uapi.visible);
14333 }
14334
14335 static void
14336 verify_single_dpll_state(struct drm_i915_private *dev_priv,
14337                          struct intel_shared_dpll *pll,
14338                          struct intel_crtc *crtc,
14339                          struct intel_crtc_state *new_crtc_state)
14340 {
14341         struct intel_dpll_hw_state dpll_hw_state;
14342         unsigned int crtc_mask;
14343         bool active;
14344
14345         memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
14346
14347         drm_dbg_kms(&dev_priv->drm, "%s\n", pll->info->name);
14348
14349         active = pll->info->funcs->get_hw_state(dev_priv, pll, &dpll_hw_state);
14350
14351         if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
14352                 I915_STATE_WARN(!pll->on && pll->active_mask,
14353                      "pll in active use but not on in sw tracking\n");
14354                 I915_STATE_WARN(pll->on && !pll->active_mask,
14355                      "pll is on but not used by any active crtc\n");
14356                 I915_STATE_WARN(pll->on != active,
14357                      "pll on state mismatch (expected %i, found %i)\n",
14358                      pll->on, active);
14359         }
14360
14361         if (!crtc) {
14362                 I915_STATE_WARN(pll->active_mask & ~pll->state.crtc_mask,
14363                                 "more active pll users than references: %x vs %x\n",
14364                                 pll->active_mask, pll->state.crtc_mask);
14365
14366                 return;
14367         }
14368
14369         crtc_mask = drm_crtc_mask(&crtc->base);
14370
14371         if (new_crtc_state->hw.active)
14372                 I915_STATE_WARN(!(pll->active_mask & crtc_mask),
14373                                 "pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
14374                                 pipe_name(drm_crtc_index(&crtc->base)), pll->active_mask);
14375         else
14376                 I915_STATE_WARN(pll->active_mask & crtc_mask,
14377                                 "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
14378                                 pipe_name(drm_crtc_index(&crtc->base)), pll->active_mask);
14379
14380         I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask),
14381                         "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
14382                         crtc_mask, pll->state.crtc_mask);
14383
14384         I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
14385                                           &dpll_hw_state,
14386                                           sizeof(dpll_hw_state)),
14387                         "pll hw state mismatch\n");
14388 }
14389
14390 static void
14391 verify_shared_dpll_state(struct intel_crtc *crtc,
14392                          struct intel_crtc_state *old_crtc_state,
14393                          struct intel_crtc_state *new_crtc_state)
14394 {
14395         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14396
14397         if (new_crtc_state->shared_dpll)
14398                 verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll, crtc, new_crtc_state);
14399
14400         if (old_crtc_state->shared_dpll &&
14401             old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
14402                 unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
14403                 struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
14404
14405                 I915_STATE_WARN(pll->active_mask & crtc_mask,
14406                                 "pll active mismatch (didn't expect pipe %c in active mask)\n",
14407                                 pipe_name(drm_crtc_index(&crtc->base)));
14408                 I915_STATE_WARN(pll->state.crtc_mask & crtc_mask,
14409                                 "pll enabled crtcs mismatch (found %x in enabled mask)\n",
14410                                 pipe_name(drm_crtc_index(&crtc->base)));
14411         }
14412 }
14413
14414 static void
14415 intel_modeset_verify_crtc(struct intel_crtc *crtc,
14416                           struct intel_atomic_state *state,
14417                           struct intel_crtc_state *old_crtc_state,
14418                           struct intel_crtc_state *new_crtc_state)
14419 {
14420         if (!needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe)
14421                 return;
14422
14423         verify_wm_state(crtc, new_crtc_state);
14424         verify_connector_state(state, crtc);
14425         verify_crtc_state(crtc, old_crtc_state, new_crtc_state);
14426         verify_shared_dpll_state(crtc, old_crtc_state, new_crtc_state);
14427 }
14428
14429 static void
14430 verify_disabled_dpll_state(struct drm_i915_private *dev_priv)
14431 {
14432         int i;
14433
14434         for (i = 0; i < dev_priv->num_shared_dpll; i++)
14435                 verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL);
14436 }
14437
14438 static void
14439 intel_modeset_verify_disabled(struct drm_i915_private *dev_priv,
14440                               struct intel_atomic_state *state)
14441 {
14442         verify_encoder_state(dev_priv, state);
14443         verify_connector_state(state, NULL);
14444         verify_disabled_dpll_state(dev_priv);
14445 }
14446
14447 static void
14448 intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
14449 {
14450         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
14451         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14452         const struct drm_display_mode *adjusted_mode =
14453                 &crtc_state->hw.adjusted_mode;
14454
14455         drm_calc_timestamping_constants(&crtc->base, adjusted_mode);
14456
14457         /*
14458          * The scanline counter increments at the leading edge of hsync.
14459          *
14460          * On most platforms it starts counting from vtotal-1 on the
14461          * first active line. That means the scanline counter value is
14462          * always one less than what we would expect. Ie. just after
14463          * start of vblank, which also occurs at start of hsync (on the
14464          * last active line), the scanline counter will read vblank_start-1.
14465          *
14466          * On gen2 the scanline counter starts counting from 1 instead
14467          * of vtotal-1, so we have to subtract one (or rather add vtotal-1
14468          * to keep the value positive), instead of adding one.
14469          *
14470          * On HSW+ the behaviour of the scanline counter depends on the output
14471          * type. For DP ports it behaves like most other platforms, but on HDMI
14472          * there's an extra 1 line difference. So we need to add two instead of
14473          * one to the value.
14474          *
14475          * On VLV/CHV DSI the scanline counter would appear to increment
14476          * approx. 1/3 of a scanline before start of vblank. Unfortunately
14477          * that means we can't tell whether we're in vblank or not while
14478          * we're on that particular line. We must still set scanline_offset
14479          * to 1 so that the vblank timestamps come out correct when we query
14480          * the scanline counter from within the vblank interrupt handler.
14481          * However if queried just before the start of vblank we'll get an
14482          * answer that's slightly in the future.
14483          */
14484         if (IS_GEN(dev_priv, 2)) {
14485                 int vtotal;
14486
14487                 vtotal = adjusted_mode->crtc_vtotal;
14488                 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
14489                         vtotal /= 2;
14490
14491                 crtc->scanline_offset = vtotal - 1;
14492         } else if (HAS_DDI(dev_priv) &&
14493                    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
14494                 crtc->scanline_offset = 2;
14495         } else {
14496                 crtc->scanline_offset = 1;
14497         }
14498 }
14499
14500 static void intel_modeset_clear_plls(struct intel_atomic_state *state)
14501 {
14502         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14503         struct intel_crtc_state *new_crtc_state;
14504         struct intel_crtc *crtc;
14505         int i;
14506
14507         if (!dev_priv->display.crtc_compute_clock)
14508                 return;
14509
14510         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
14511                 if (!needs_modeset(new_crtc_state))
14512                         continue;
14513
14514                 intel_release_shared_dplls(state, crtc);
14515         }
14516 }
14517
14518 /*
14519  * This implements the workaround described in the "notes" section of the mode
14520  * set sequence documentation. When going from no pipes or single pipe to
14521  * multiple pipes, and planes are enabled after the pipe, we need to wait at
14522  * least 2 vblanks on the first pipe before enabling planes on the second pipe.
14523  */
14524 static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state)
14525 {
14526         struct intel_crtc_state *crtc_state;
14527         struct intel_crtc *crtc;
14528         struct intel_crtc_state *first_crtc_state = NULL;
14529         struct intel_crtc_state *other_crtc_state = NULL;
14530         enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
14531         int i;
14532
14533         /* look at all crtc's that are going to be enabled in during modeset */
14534         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
14535                 if (!crtc_state->hw.active ||
14536                     !needs_modeset(crtc_state))
14537                         continue;
14538
14539                 if (first_crtc_state) {
14540                         other_crtc_state = crtc_state;
14541                         break;
14542                 } else {
14543                         first_crtc_state = crtc_state;
14544                         first_pipe = crtc->pipe;
14545                 }
14546         }
14547
14548         /* No workaround needed? */
14549         if (!first_crtc_state)
14550                 return 0;
14551
14552         /* w/a possibly needed, check how many crtc's are already enabled. */
14553         for_each_intel_crtc(state->base.dev, crtc) {
14554                 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
14555                 if (IS_ERR(crtc_state))
14556                         return PTR_ERR(crtc_state);
14557
14558                 crtc_state->hsw_workaround_pipe = INVALID_PIPE;
14559
14560                 if (!crtc_state->hw.active ||
14561                     needs_modeset(crtc_state))
14562                         continue;
14563
14564                 /* 2 or more enabled crtcs means no need for w/a */
14565                 if (enabled_pipe != INVALID_PIPE)
14566                         return 0;
14567
14568                 enabled_pipe = crtc->pipe;
14569         }
14570
14571         if (enabled_pipe != INVALID_PIPE)
14572                 first_crtc_state->hsw_workaround_pipe = enabled_pipe;
14573         else if (other_crtc_state)
14574                 other_crtc_state->hsw_workaround_pipe = first_pipe;
14575
14576         return 0;
14577 }
14578
14579 u8 intel_calc_active_pipes(struct intel_atomic_state *state,
14580                            u8 active_pipes)
14581 {
14582         const struct intel_crtc_state *crtc_state;
14583         struct intel_crtc *crtc;
14584         int i;
14585
14586         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
14587                 if (crtc_state->hw.active)
14588                         active_pipes |= BIT(crtc->pipe);
14589                 else
14590                         active_pipes &= ~BIT(crtc->pipe);
14591         }
14592
14593         return active_pipes;
14594 }
14595
14596 static int intel_modeset_checks(struct intel_atomic_state *state)
14597 {
14598         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14599         int ret;
14600
14601         state->modeset = true;
14602         state->active_pipes = intel_calc_active_pipes(state, dev_priv->active_pipes);
14603
14604         state->active_pipe_changes = state->active_pipes ^ dev_priv->active_pipes;
14605
14606         if (state->active_pipe_changes) {
14607                 ret = _intel_atomic_lock_global_state(state);
14608                 if (ret)
14609                         return ret;
14610         }
14611
14612         ret = intel_modeset_calc_cdclk(state);
14613         if (ret)
14614                 return ret;
14615
14616         intel_modeset_clear_plls(state);
14617
14618         if (IS_HASWELL(dev_priv))
14619                 return hsw_mode_set_planes_workaround(state);
14620
14621         return 0;
14622 }
14623
14624 /*
14625  * Handle calculation of various watermark data at the end of the atomic check
14626  * phase.  The code here should be run after the per-crtc and per-plane 'check'
14627  * handlers to ensure that all derived state has been updated.
14628  */
14629 static int calc_watermark_data(struct intel_atomic_state *state)
14630 {
14631         struct drm_device *dev = state->base.dev;
14632         struct drm_i915_private *dev_priv = to_i915(dev);
14633
14634         /* Is there platform-specific watermark information to calculate? */
14635         if (dev_priv->display.compute_global_watermarks)
14636                 return dev_priv->display.compute_global_watermarks(state);
14637
14638         return 0;
14639 }
14640
14641 static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state,
14642                                      struct intel_crtc_state *new_crtc_state)
14643 {
14644         if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true))
14645                 return;
14646
14647         new_crtc_state->uapi.mode_changed = false;
14648         new_crtc_state->update_pipe = true;
14649 }
14650
14651 static void intel_crtc_copy_fastset(const struct intel_crtc_state *old_crtc_state,
14652                                     struct intel_crtc_state *new_crtc_state)
14653 {
14654         /*
14655          * If we're not doing the full modeset we want to
14656          * keep the current M/N values as they may be
14657          * sufficiently different to the computed values
14658          * to cause problems.
14659          *
14660          * FIXME: should really copy more fuzzy state here
14661          */
14662         new_crtc_state->fdi_m_n = old_crtc_state->fdi_m_n;
14663         new_crtc_state->dp_m_n = old_crtc_state->dp_m_n;
14664         new_crtc_state->dp_m2_n2 = old_crtc_state->dp_m2_n2;
14665         new_crtc_state->has_drrs = old_crtc_state->has_drrs;
14666 }
14667
14668 static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state,
14669                                           struct intel_crtc *crtc,
14670                                           u8 plane_ids_mask)
14671 {
14672         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14673         struct intel_plane *plane;
14674
14675         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
14676                 struct intel_plane_state *plane_state;
14677
14678                 if ((plane_ids_mask & BIT(plane->id)) == 0)
14679                         continue;
14680
14681                 plane_state = intel_atomic_get_plane_state(state, plane);
14682                 if (IS_ERR(plane_state))
14683                         return PTR_ERR(plane_state);
14684         }
14685
14686         return 0;
14687 }
14688
14689 static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv)
14690 {
14691         /* See {hsw,vlv,ivb}_plane_ratio() */
14692         return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) ||
14693                 IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
14694                 IS_IVYBRIDGE(dev_priv);
14695 }
14696
14697 static int intel_atomic_check_planes(struct intel_atomic_state *state,
14698                                      bool *need_cdclk_calc)
14699 {
14700         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14701         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
14702         struct intel_plane_state *plane_state;
14703         struct intel_plane *plane;
14704         struct intel_crtc *crtc;
14705         int i, ret;
14706
14707         ret = icl_add_linked_planes(state);
14708         if (ret)
14709                 return ret;
14710
14711         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
14712                 ret = intel_plane_atomic_check(state, plane);
14713                 if (ret) {
14714                         drm_dbg_atomic(&dev_priv->drm,
14715                                        "[PLANE:%d:%s] atomic driver check failed\n",
14716                                        plane->base.base.id, plane->base.name);
14717                         return ret;
14718                 }
14719         }
14720
14721         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14722                                             new_crtc_state, i) {
14723                 u8 old_active_planes, new_active_planes;
14724
14725                 ret = icl_check_nv12_planes(new_crtc_state);
14726                 if (ret)
14727                         return ret;
14728
14729                 /*
14730                  * On some platforms the number of active planes affects
14731                  * the planes' minimum cdclk calculation. Add such planes
14732                  * to the state before we compute the minimum cdclk.
14733                  */
14734                 if (!active_planes_affects_min_cdclk(dev_priv))
14735                         continue;
14736
14737                 old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
14738                 new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
14739
14740                 if (hweight8(old_active_planes) == hweight8(new_active_planes))
14741                         continue;
14742
14743                 ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes);
14744                 if (ret)
14745                         return ret;
14746         }
14747
14748         /*
14749          * active_planes bitmask has been updated, and potentially
14750          * affected planes are part of the state. We can now
14751          * compute the minimum cdclk for each plane.
14752          */
14753         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
14754                 ret = intel_plane_calc_min_cdclk(state, plane, need_cdclk_calc);
14755                 if (ret)
14756                         return ret;
14757         }
14758
14759         return 0;
14760 }
14761
14762 static int intel_atomic_check_crtcs(struct intel_atomic_state *state)
14763 {
14764         struct intel_crtc_state *crtc_state;
14765         struct intel_crtc *crtc;
14766         int i;
14767
14768         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
14769                 int ret = intel_crtc_atomic_check(state, crtc);
14770                 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
14771                 if (ret) {
14772                         drm_dbg_atomic(&i915->drm,
14773                                        "[CRTC:%d:%s] atomic driver check failed\n",
14774                                        crtc->base.base.id, crtc->base.name);
14775                         return ret;
14776                 }
14777         }
14778
14779         return 0;
14780 }
14781
14782 static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state,
14783                                                u8 transcoders)
14784 {
14785         const struct intel_crtc_state *new_crtc_state;
14786         struct intel_crtc *crtc;
14787         int i;
14788
14789         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
14790                 if (new_crtc_state->hw.enable &&
14791                     transcoders & BIT(new_crtc_state->cpu_transcoder) &&
14792                     needs_modeset(new_crtc_state))
14793                         return true;
14794         }
14795
14796         return false;
14797 }
14798
14799 static int
14800 intel_modeset_all_tiles(struct intel_atomic_state *state, int tile_grp_id)
14801 {
14802         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14803         struct drm_connector *connector;
14804         struct drm_connector_list_iter conn_iter;
14805         int ret = 0;
14806
14807         drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
14808         drm_for_each_connector_iter(connector, &conn_iter) {
14809                 struct drm_connector_state *conn_state;
14810                 struct drm_crtc_state *crtc_state;
14811
14812                 if (!connector->has_tile ||
14813                     connector->tile_group->id != tile_grp_id)
14814                         continue;
14815                 conn_state = drm_atomic_get_connector_state(&state->base,
14816                                                             connector);
14817                 if (IS_ERR(conn_state)) {
14818                         ret =  PTR_ERR(conn_state);
14819                         break;
14820                 }
14821
14822                 if (!conn_state->crtc)
14823                         continue;
14824
14825                 crtc_state = drm_atomic_get_crtc_state(&state->base,
14826                                                        conn_state->crtc);
14827                 if (IS_ERR(crtc_state)) {
14828                         ret = PTR_ERR(crtc_state);
14829                         break;
14830                 }
14831                 crtc_state->mode_changed = true;
14832                 ret = drm_atomic_add_affected_connectors(&state->base,
14833                                                          conn_state->crtc);
14834                 if (ret)
14835                         break;
14836         }
14837         drm_connector_list_iter_end(&conn_iter);
14838
14839         return ret;
14840 }
14841
14842 static int
14843 intel_atomic_check_tiled_conns(struct intel_atomic_state *state)
14844 {
14845         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14846         struct drm_connector *connector;
14847         struct drm_connector_state *old_conn_state, *new_conn_state;
14848         int i, ret;
14849
14850         if (INTEL_GEN(dev_priv) < 11)
14851                 return 0;
14852
14853         /* Is tiled, mark all other tiled CRTCs as needing a modeset */
14854         for_each_oldnew_connector_in_state(&state->base, connector,
14855                                            old_conn_state, new_conn_state, i) {
14856                 if (!connector->has_tile)
14857                         continue;
14858                 if (!intel_connector_needs_modeset(state, connector))
14859                         continue;
14860
14861                 ret = intel_modeset_all_tiles(state, connector->tile_group->id);
14862                 if (ret)
14863                         return ret;
14864         }
14865
14866         return 0;
14867 }
14868
14869 /**
14870  * intel_atomic_check - validate state object
14871  * @dev: drm device
14872  * @_state: state to validate
14873  */
14874 static int intel_atomic_check(struct drm_device *dev,
14875                               struct drm_atomic_state *_state)
14876 {
14877         struct drm_i915_private *dev_priv = to_i915(dev);
14878         struct intel_atomic_state *state = to_intel_atomic_state(_state);
14879         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
14880         struct intel_cdclk_state *new_cdclk_state;
14881         struct intel_crtc *crtc;
14882         int ret, i;
14883         bool any_ms = false;
14884
14885         /* Catch I915_MODE_FLAG_INHERITED */
14886         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14887                                             new_crtc_state, i) {
14888                 if (new_crtc_state->hw.mode.private_flags !=
14889                     old_crtc_state->hw.mode.private_flags)
14890                         new_crtc_state->uapi.mode_changed = true;
14891         }
14892
14893         ret = drm_atomic_helper_check_modeset(dev, &state->base);
14894         if (ret)
14895                 goto fail;
14896
14897         /**
14898          * This check adds all the connectors in current state that belong to
14899          * the same tile group to a full modeset.
14900          * This function directly sets the mode_changed to true and we also call
14901          * drm_atomic_add_affected_connectors(). Hence we are not explicitly
14902          * calling drm_atomic_helper_check_modeset() after this.
14903          *
14904          * Fixme: Handle some corner cases where one of the
14905          * tiled connectors gets disconnected and tile info is lost but since it
14906          * was previously synced to other conn, we need to add that to the modeset.
14907          */
14908         ret = intel_atomic_check_tiled_conns(state);
14909         if (ret)
14910                 goto fail;
14911
14912         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14913                                             new_crtc_state, i) {
14914                 if (!needs_modeset(new_crtc_state)) {
14915                         /* Light copy */
14916                         intel_crtc_copy_uapi_to_hw_state_nomodeset(new_crtc_state);
14917
14918                         continue;
14919                 }
14920
14921                 ret = intel_crtc_prepare_cleared_state(new_crtc_state);
14922                 if (ret)
14923                         goto fail;
14924
14925                 if (!new_crtc_state->hw.enable)
14926                         continue;
14927
14928                 ret = intel_modeset_pipe_config(new_crtc_state);
14929                 if (ret)
14930                         goto fail;
14931
14932                 intel_crtc_check_fastset(old_crtc_state, new_crtc_state);
14933         }
14934
14935         /**
14936          * Check if fastset is allowed by external dependencies like other
14937          * pipes and transcoders.
14938          *
14939          * Right now it only forces a fullmodeset when the MST master
14940          * transcoder did not changed but the pipe of the master transcoder
14941          * needs a fullmodeset so all slaves also needs to do a fullmodeset or
14942          * in case of port synced crtcs, if one of the synced crtcs
14943          * needs a full modeset, all other synced crtcs should be
14944          * forced a full modeset.
14945          */
14946         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
14947                 if (!new_crtc_state->hw.enable || needs_modeset(new_crtc_state))
14948                         continue;
14949
14950                 if (intel_dp_mst_is_slave_trans(new_crtc_state)) {
14951                         enum transcoder master = new_crtc_state->mst_master_transcoder;
14952
14953                         if (intel_cpu_transcoders_need_modeset(state, BIT(master))) {
14954                                 new_crtc_state->uapi.mode_changed = true;
14955                                 new_crtc_state->update_pipe = false;
14956                         }
14957                 }
14958
14959                 if (is_trans_port_sync_mode(new_crtc_state)) {
14960                         u8 trans = new_crtc_state->sync_mode_slaves_mask;
14961
14962                         if (new_crtc_state->master_transcoder != INVALID_TRANSCODER)
14963                                 trans |= BIT(new_crtc_state->master_transcoder);
14964
14965                         if (intel_cpu_transcoders_need_modeset(state, trans)) {
14966                                 new_crtc_state->uapi.mode_changed = true;
14967                                 new_crtc_state->update_pipe = false;
14968                         }
14969                 }
14970         }
14971
14972         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14973                                             new_crtc_state, i) {
14974                 if (needs_modeset(new_crtc_state)) {
14975                         any_ms = true;
14976                         continue;
14977                 }
14978
14979                 if (!new_crtc_state->update_pipe)
14980                         continue;
14981
14982                 intel_crtc_copy_fastset(old_crtc_state, new_crtc_state);
14983         }
14984
14985         if (any_ms && !check_digital_port_conflicts(state)) {
14986                 drm_dbg_kms(&dev_priv->drm,
14987                             "rejecting conflicting digital port configuration\n");
14988                 ret = EINVAL;
14989                 goto fail;
14990         }
14991
14992         ret = drm_dp_mst_atomic_check(&state->base);
14993         if (ret)
14994                 goto fail;
14995
14996         ret = intel_atomic_check_planes(state, &any_ms);
14997         if (ret)
14998                 goto fail;
14999
15000         new_cdclk_state = intel_atomic_get_new_cdclk_state(state);
15001         if (new_cdclk_state && new_cdclk_state->force_min_cdclk_changed)
15002                 any_ms = true;
15003
15004         if (any_ms) {
15005                 ret = intel_modeset_checks(state);
15006                 if (ret)
15007                         goto fail;
15008         }
15009
15010         ret = intel_atomic_check_crtcs(state);
15011         if (ret)
15012                 goto fail;
15013
15014         intel_fbc_choose_crtc(dev_priv, state);
15015         ret = calc_watermark_data(state);
15016         if (ret)
15017                 goto fail;
15018
15019         ret = intel_bw_atomic_check(state);
15020         if (ret)
15021                 goto fail;
15022
15023         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
15024                                             new_crtc_state, i) {
15025                 if (!needs_modeset(new_crtc_state) &&
15026                     !new_crtc_state->update_pipe)
15027                         continue;
15028
15029                 intel_dump_pipe_config(new_crtc_state, state,
15030                                        needs_modeset(new_crtc_state) ?
15031                                        "[modeset]" : "[fastset]");
15032         }
15033
15034         return 0;
15035
15036  fail:
15037         if (ret == -EDEADLK)
15038                 return ret;
15039
15040         /*
15041          * FIXME would probably be nice to know which crtc specifically
15042          * caused the failure, in cases where we can pinpoint it.
15043          */
15044         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
15045                                             new_crtc_state, i)
15046                 intel_dump_pipe_config(new_crtc_state, state, "[failed]");
15047
15048         return ret;
15049 }
15050
15051 static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
15052 {
15053         return drm_atomic_helper_prepare_planes(state->base.dev,
15054                                                 &state->base);
15055 }
15056
15057 u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
15058 {
15059         struct drm_device *dev = crtc->base.dev;
15060         struct drm_vblank_crtc *vblank = &dev->vblank[drm_crtc_index(&crtc->base)];
15061
15062         if (!vblank->max_vblank_count)
15063                 return (u32)drm_crtc_accurate_vblank_count(&crtc->base);
15064
15065         return crtc->base.funcs->get_vblank_counter(&crtc->base);
15066 }
15067
15068 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
15069                                   struct intel_crtc_state *crtc_state)
15070 {
15071         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
15072
15073         if (!IS_GEN(dev_priv, 2) || crtc_state->active_planes)
15074                 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
15075
15076         if (crtc_state->has_pch_encoder) {
15077                 enum pipe pch_transcoder =
15078                         intel_crtc_pch_transcoder(crtc);
15079
15080                 intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
15081         }
15082 }
15083
15084 static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
15085                                const struct intel_crtc_state *new_crtc_state)
15086 {
15087         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
15088         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
15089
15090         /*
15091          * Update pipe size and adjust fitter if needed: the reason for this is
15092          * that in compute_mode_changes we check the native mode (not the pfit
15093          * mode) to see if we can flip rather than do a full mode set. In the
15094          * fastboot case, we'll flip, but if we don't update the pipesrc and
15095          * pfit state, we'll end up with a big fb scanned out into the wrong
15096          * sized surface.
15097          */
15098         intel_set_pipe_src_size(new_crtc_state);
15099
15100         /* on skylake this is done by detaching scalers */
15101         if (INTEL_GEN(dev_priv) >= 9) {
15102                 skl_detach_scalers(new_crtc_state);
15103
15104                 if (new_crtc_state->pch_pfit.enabled)
15105                         skl_pfit_enable(new_crtc_state);
15106         } else if (HAS_PCH_SPLIT(dev_priv)) {
15107                 if (new_crtc_state->pch_pfit.enabled)
15108                         ilk_pfit_enable(new_crtc_state);
15109                 else if (old_crtc_state->pch_pfit.enabled)
15110                         ilk_pfit_disable(old_crtc_state);
15111         }
15112
15113         /*
15114          * The register is supposedly single buffered so perhaps
15115          * not 100% correct to do this here. But SKL+ calculate
15116          * this based on the adjust pixel rate so pfit changes do
15117          * affect it and so it must be updated for fastsets.
15118          * HSW/BDW only really need this here for fastboot, after
15119          * that the value should not change without a full modeset.
15120          */
15121         if (INTEL_GEN(dev_priv) >= 9 ||
15122             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
15123                 hsw_set_linetime_wm(new_crtc_state);
15124
15125         if (INTEL_GEN(dev_priv) >= 11)
15126                 icl_set_pipe_chicken(crtc);
15127 }
15128
15129 static void commit_pipe_config(struct intel_atomic_state *state,
15130                                struct intel_crtc_state *old_crtc_state,
15131                                struct intel_crtc_state *new_crtc_state)
15132 {
15133         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
15134         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
15135         bool modeset = needs_modeset(new_crtc_state);
15136
15137         /*
15138          * During modesets pipe configuration was programmed as the
15139          * CRTC was enabled.
15140          */
15141         if (!modeset) {
15142                 if (new_crtc_state->uapi.color_mgmt_changed ||
15143                     new_crtc_state->update_pipe)
15144                         intel_color_commit(new_crtc_state);
15145
15146                 if (INTEL_GEN(dev_priv) >= 9)
15147                         skl_detach_scalers(new_crtc_state);
15148
15149                 if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
15150                         bdw_set_pipemisc(new_crtc_state);
15151
15152                 if (new_crtc_state->update_pipe)
15153                         intel_pipe_fastset(old_crtc_state, new_crtc_state);
15154         }
15155
15156         if (dev_priv->display.atomic_update_watermarks)
15157                 dev_priv->display.atomic_update_watermarks(state, crtc);
15158 }
15159
15160 static void intel_update_crtc(struct intel_crtc *crtc,
15161                               struct intel_atomic_state *state,
15162                               struct intel_crtc_state *old_crtc_state,
15163                               struct intel_crtc_state *new_crtc_state)
15164 {
15165         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
15166         bool modeset = needs_modeset(new_crtc_state);
15167
15168         if (modeset) {
15169                 intel_crtc_update_active_timings(new_crtc_state);
15170
15171                 dev_priv->display.crtc_enable(state, crtc);
15172
15173                 /* vblanks work again, re-enable pipe CRC. */
15174                 intel_crtc_enable_pipe_crc(crtc);
15175         } else {
15176                 if (new_crtc_state->preload_luts &&
15177                     (new_crtc_state->uapi.color_mgmt_changed ||
15178                      new_crtc_state->update_pipe))
15179                         intel_color_load_luts(new_crtc_state);
15180
15181                 intel_pre_plane_update(state, crtc);
15182
15183                 if (new_crtc_state->update_pipe)
15184                         intel_encoders_update_pipe(state, crtc);
15185         }
15186
15187         if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc)
15188                 intel_fbc_disable(crtc);
15189         else
15190                 intel_fbc_enable(state, crtc);
15191
15192         /* Perform vblank evasion around commit operation */
15193         intel_pipe_update_start(new_crtc_state);
15194
15195         commit_pipe_config(state, old_crtc_state, new_crtc_state);
15196
15197         if (INTEL_GEN(dev_priv) >= 9)
15198                 skl_update_planes_on_crtc(state, crtc);
15199         else
15200                 i9xx_update_planes_on_crtc(state, crtc);
15201
15202         intel_pipe_update_end(new_crtc_state);
15203
15204         /*
15205          * We usually enable FIFO underrun interrupts as part of the
15206          * CRTC enable sequence during modesets.  But when we inherit a
15207          * valid pipe configuration from the BIOS we need to take care
15208          * of enabling them on the CRTC's first fastset.
15209          */
15210         if (new_crtc_state->update_pipe && !modeset &&
15211             old_crtc_state->hw.mode.private_flags & I915_MODE_FLAG_INHERITED)
15212                 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
15213 }
15214
15215 static struct intel_crtc *intel_get_slave_crtc(const struct intel_crtc_state *new_crtc_state)
15216 {
15217         struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev);
15218         enum transcoder slave_transcoder;
15219
15220         WARN_ON(!is_power_of_2(new_crtc_state->sync_mode_slaves_mask));
15221
15222         slave_transcoder = ffs(new_crtc_state->sync_mode_slaves_mask) - 1;
15223         return intel_get_crtc_for_pipe(dev_priv,
15224                                        (enum pipe)slave_transcoder);
15225 }
15226
15227 static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
15228                                           struct intel_crtc_state *old_crtc_state,
15229                                           struct intel_crtc_state *new_crtc_state,
15230                                           struct intel_crtc *crtc)
15231 {
15232         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
15233
15234         intel_crtc_disable_planes(state, crtc);
15235
15236         /*
15237          * We need to disable pipe CRC before disabling the pipe,
15238          * or we race against vblank off.
15239          */
15240         intel_crtc_disable_pipe_crc(crtc);
15241
15242         dev_priv->display.crtc_disable(state, crtc);
15243         crtc->active = false;
15244         intel_fbc_disable(crtc);
15245         intel_disable_shared_dpll(old_crtc_state);
15246
15247         /* FIXME unify this for all platforms */
15248         if (!new_crtc_state->hw.active &&
15249             !HAS_GMCH(dev_priv) &&
15250             dev_priv->display.initial_watermarks)
15251                 dev_priv->display.initial_watermarks(state, crtc);
15252 }
15253
15254 static void intel_commit_modeset_disables(struct intel_atomic_state *state)
15255 {
15256         struct intel_crtc_state *new_crtc_state, *old_crtc_state;
15257         struct intel_crtc *crtc;
15258         u32 handled = 0;
15259         int i;
15260
15261         /* Only disable port sync and MST slaves */
15262         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
15263                                             new_crtc_state, i) {
15264                 if (!needs_modeset(new_crtc_state))
15265                         continue;
15266
15267                 if (!old_crtc_state->hw.active)
15268                         continue;
15269
15270                 /* In case of Transcoder port Sync master slave CRTCs can be
15271                  * assigned in any order and we need to make sure that
15272                  * slave CRTCs are disabled first and then master CRTC since
15273                  * Slave vblanks are masked till Master Vblanks.
15274                  */
15275                 if (!is_trans_port_sync_slave(old_crtc_state) &&
15276                     !intel_dp_mst_is_slave_trans(old_crtc_state))
15277                         continue;
15278
15279                 intel_pre_plane_update(state, crtc);
15280                 intel_old_crtc_state_disables(state, old_crtc_state,
15281                                               new_crtc_state, crtc);
15282                 handled |= BIT(crtc->pipe);
15283         }
15284
15285         /* Disable everything else left on */
15286         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
15287                                             new_crtc_state, i) {
15288                 if (!needs_modeset(new_crtc_state) ||
15289                     (handled & BIT(crtc->pipe)))
15290                         continue;
15291
15292                 intel_pre_plane_update(state, crtc);
15293                 if (old_crtc_state->hw.active)
15294                         intel_old_crtc_state_disables(state, old_crtc_state,
15295                                                       new_crtc_state, crtc);
15296         }
15297 }
15298
15299 static void intel_commit_modeset_enables(struct intel_atomic_state *state)
15300 {
15301         struct intel_crtc *crtc;
15302         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
15303         int i;
15304
15305         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
15306                 if (!new_crtc_state->hw.active)
15307                         continue;
15308
15309                 intel_update_crtc(crtc, state, old_crtc_state,
15310                                   new_crtc_state);
15311         }
15312 }
15313
15314 static void intel_crtc_enable_trans_port_sync(struct intel_crtc *crtc,
15315                                               struct intel_atomic_state *state,
15316                                               struct intel_crtc_state *new_crtc_state)
15317 {
15318         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
15319
15320         intel_crtc_update_active_timings(new_crtc_state);
15321         dev_priv->display.crtc_enable(state, crtc);
15322         intel_crtc_enable_pipe_crc(crtc);
15323 }
15324
15325 static void intel_set_dp_tp_ctl_normal(struct intel_crtc *crtc,
15326                                        struct intel_atomic_state *state)
15327 {
15328         struct drm_connector *uninitialized_var(conn);
15329         struct drm_connector_state *conn_state;
15330         struct intel_dp *intel_dp;
15331         int i;
15332
15333         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
15334                 if (conn_state->crtc == &crtc->base)
15335                         break;
15336         }
15337         intel_dp = intel_attached_dp(to_intel_connector(conn));
15338         intel_dp_stop_link_train(intel_dp);
15339 }
15340
15341 /*
15342  * TODO: This is only called from port sync and it is identical to what will be
15343  * executed again in intel_update_crtc() over port sync pipes
15344  */
15345 static void intel_post_crtc_enable_updates(struct intel_crtc *crtc,
15346                                            struct intel_atomic_state *state)
15347 {
15348         struct intel_crtc_state *new_crtc_state =
15349                 intel_atomic_get_new_crtc_state(state, crtc);
15350         struct intel_crtc_state *old_crtc_state =
15351                 intel_atomic_get_old_crtc_state(state, crtc);
15352         bool modeset = needs_modeset(new_crtc_state);
15353
15354         if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc)
15355                 intel_fbc_disable(crtc);
15356         else
15357                 intel_fbc_enable(state, crtc);
15358
15359         /* Perform vblank evasion around commit operation */
15360         intel_pipe_update_start(new_crtc_state);
15361         commit_pipe_config(state, old_crtc_state, new_crtc_state);
15362         skl_update_planes_on_crtc(state, crtc);
15363         intel_pipe_update_end(new_crtc_state);
15364
15365         /*
15366          * We usually enable FIFO underrun interrupts as part of the
15367          * CRTC enable sequence during modesets.  But when we inherit a
15368          * valid pipe configuration from the BIOS we need to take care
15369          * of enabling them on the CRTC's first fastset.
15370          */
15371         if (new_crtc_state->update_pipe && !modeset &&
15372             old_crtc_state->hw.mode.private_flags & I915_MODE_FLAG_INHERITED)
15373                 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
15374 }
15375
15376 static void intel_update_trans_port_sync_crtcs(struct intel_crtc *crtc,
15377                                                struct intel_atomic_state *state,
15378                                                struct intel_crtc_state *old_crtc_state,
15379                                                struct intel_crtc_state *new_crtc_state)
15380 {
15381         struct drm_i915_private *i915 = to_i915(crtc->base.dev);
15382         struct intel_crtc *slave_crtc = intel_get_slave_crtc(new_crtc_state);
15383         struct intel_crtc_state *new_slave_crtc_state =
15384                 intel_atomic_get_new_crtc_state(state, slave_crtc);
15385         struct intel_crtc_state *old_slave_crtc_state =
15386                 intel_atomic_get_old_crtc_state(state, slave_crtc);
15387
15388         WARN_ON(!slave_crtc || !new_slave_crtc_state ||
15389                 !old_slave_crtc_state);
15390
15391         drm_dbg_kms(&i915->drm,
15392                     "Updating Transcoder Port Sync Master CRTC = %d %s and Slave CRTC %d %s\n",
15393                     crtc->base.base.id, crtc->base.name,
15394                     slave_crtc->base.base.id, slave_crtc->base.name);
15395
15396         /* Enable seq for slave with with DP_TP_CTL left Idle until the
15397          * master is ready
15398          */
15399         intel_crtc_enable_trans_port_sync(slave_crtc,
15400                                           state,
15401                                           new_slave_crtc_state);
15402
15403         /* Enable seq for master with with DP_TP_CTL left Idle */
15404         intel_crtc_enable_trans_port_sync(crtc,
15405                                           state,
15406                                           new_crtc_state);
15407
15408         /* Set Slave's DP_TP_CTL to Normal */
15409         intel_set_dp_tp_ctl_normal(slave_crtc,
15410                                    state);
15411
15412         /* Set Master's DP_TP_CTL To Normal */
15413         usleep_range(200, 400);
15414         intel_set_dp_tp_ctl_normal(crtc,
15415                                    state);
15416
15417         /* Now do the post crtc enable for all master and slaves */
15418         intel_post_crtc_enable_updates(slave_crtc,
15419                                        state);
15420         intel_post_crtc_enable_updates(crtc,
15421                                        state);
15422 }
15423
15424 static void skl_commit_modeset_enables(struct intel_atomic_state *state)
15425 {
15426         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
15427         struct intel_crtc *crtc;
15428         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
15429         u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
15430         u8 required_slices = state->wm_results.ddb.enabled_slices;
15431         struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
15432         const u8 num_pipes = INTEL_NUM_PIPES(dev_priv);
15433         u8 update_pipes = 0, modeset_pipes = 0;
15434         int i;
15435
15436         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
15437                 enum pipe pipe = crtc->pipe;
15438
15439                 if (!new_crtc_state->hw.active)
15440                         continue;
15441
15442                 /* ignore allocations for crtc's that have been turned off. */
15443                 if (!needs_modeset(new_crtc_state)) {
15444                         entries[pipe] = old_crtc_state->wm.skl.ddb;
15445                         update_pipes |= BIT(pipe);
15446                 } else {
15447                         modeset_pipes |= BIT(pipe);
15448                 }
15449         }
15450
15451         /* If 2nd DBuf slice required, enable it here */
15452         if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices)
15453                 icl_dbuf_slices_update(dev_priv, required_slices);
15454
15455         /*
15456          * Whenever the number of active pipes changes, we need to make sure we
15457          * update the pipes in the right order so that their ddb allocations
15458          * never overlap with each other between CRTC updates. Otherwise we'll
15459          * cause pipe underruns and other bad stuff.
15460          *
15461          * So first lets enable all pipes that do not need a fullmodeset as
15462          * those don't have any external dependency.
15463          */
15464         while (update_pipes) {
15465                 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
15466                                                     new_crtc_state, i) {
15467                         enum pipe pipe = crtc->pipe;
15468
15469                         if ((update_pipes & BIT(pipe)) == 0)
15470                                 continue;
15471
15472                         if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
15473                                                         entries, num_pipes, pipe))
15474                                 continue;
15475
15476                         entries[pipe] = new_crtc_state->wm.skl.ddb;
15477                         update_pipes &= ~BIT(pipe);
15478
15479                         intel_update_crtc(crtc, state, old_crtc_state,
15480                                           new_crtc_state);
15481
15482                         /*
15483                          * If this is an already active pipe, it's DDB changed,
15484                          * and this isn't the last pipe that needs updating
15485                          * then we need to wait for a vblank to pass for the
15486                          * new ddb allocation to take effect.
15487                          */
15488                         if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb,
15489                                                  &old_crtc_state->wm.skl.ddb) &&
15490                             (update_pipes | modeset_pipes))
15491                                 intel_wait_for_vblank(dev_priv, pipe);
15492                 }
15493         }
15494
15495         /*
15496          * Enable all pipes that needs a modeset and do not depends on other
15497          * pipes
15498          */
15499         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
15500                                             new_crtc_state, i) {
15501                 enum pipe pipe = crtc->pipe;
15502
15503                 if ((modeset_pipes & BIT(pipe)) == 0)
15504                         continue;
15505
15506                 if (intel_dp_mst_is_slave_trans(new_crtc_state) ||
15507                     is_trans_port_sync_slave(new_crtc_state))
15508                         continue;
15509
15510                 WARN_ON(skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
15511                                                     entries, num_pipes, pipe));
15512
15513                 entries[pipe] = new_crtc_state->wm.skl.ddb;
15514                 modeset_pipes &= ~BIT(pipe);
15515
15516                 if (is_trans_port_sync_mode(new_crtc_state)) {
15517                         struct intel_crtc *slave_crtc;
15518
15519                         intel_update_trans_port_sync_crtcs(crtc, state,
15520                                                            old_crtc_state,
15521                                                            new_crtc_state);
15522
15523                         slave_crtc = intel_get_slave_crtc(new_crtc_state);
15524                         /* TODO: update entries[] of slave */
15525                         modeset_pipes &= ~BIT(slave_crtc->pipe);
15526
15527                 } else {
15528                         intel_update_crtc(crtc, state, old_crtc_state,
15529                                           new_crtc_state);
15530                 }
15531         }
15532
15533         /*
15534          * Finally enable all pipes that needs a modeset and depends on
15535          * other pipes, right now it is only MST slaves as both port sync slave
15536          * and master are enabled together
15537          */
15538         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
15539                                             new_crtc_state, i) {
15540                 enum pipe pipe = crtc->pipe;
15541
15542                 if ((modeset_pipes & BIT(pipe)) == 0)
15543                         continue;
15544
15545                 WARN_ON(skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
15546                                                     entries, num_pipes, pipe));
15547
15548                 entries[pipe] = new_crtc_state->wm.skl.ddb;
15549                 modeset_pipes &= ~BIT(pipe);
15550
15551                 intel_update_crtc(crtc, state, old_crtc_state, new_crtc_state);
15552         }
15553
15554         WARN_ON(modeset_pipes);
15555
15556         /* If 2nd DBuf slice is no more required disable it */
15557         if (INTEL_GEN(dev_priv) >= 11 && required_slices < hw_enabled_slices)
15558                 icl_dbuf_slices_update(dev_priv, required_slices);
15559 }
15560
15561 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
15562 {
15563         struct intel_atomic_state *state, *next;
15564         struct llist_node *freed;
15565
15566         freed = llist_del_all(&dev_priv->atomic_helper.free_list);
15567         llist_for_each_entry_safe(state, next, freed, freed)
15568                 drm_atomic_state_put(&state->base);
15569 }
15570
15571 static void intel_atomic_helper_free_state_worker(struct work_struct *work)
15572 {
15573         struct drm_i915_private *dev_priv =
15574                 container_of(work, typeof(*dev_priv), atomic_helper.free_work);
15575
15576         intel_atomic_helper_free_state(dev_priv);
15577 }
15578
15579 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
15580 {
15581         struct wait_queue_entry wait_fence, wait_reset;
15582         struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
15583
15584         init_wait_entry(&wait_fence, 0);
15585         init_wait_entry(&wait_reset, 0);
15586         for (;;) {
15587                 prepare_to_wait(&intel_state->commit_ready.wait,
15588                                 &wait_fence, TASK_UNINTERRUPTIBLE);
15589                 prepare_to_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
15590                                               I915_RESET_MODESET),
15591                                 &wait_reset, TASK_UNINTERRUPTIBLE);
15592
15593
15594                 if (i915_sw_fence_done(&intel_state->commit_ready) ||
15595                     test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
15596                         break;
15597
15598                 schedule();
15599         }
15600         finish_wait(&intel_state->commit_ready.wait, &wait_fence);
15601         finish_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
15602                                   I915_RESET_MODESET),
15603                     &wait_reset);
15604 }
15605
15606 static void intel_atomic_cleanup_work(struct work_struct *work)
15607 {
15608         struct drm_atomic_state *state =
15609                 container_of(work, struct drm_atomic_state, commit_work);
15610         struct drm_i915_private *i915 = to_i915(state->dev);
15611
15612         drm_atomic_helper_cleanup_planes(&i915->drm, state);
15613         drm_atomic_helper_commit_cleanup_done(state);
15614         drm_atomic_state_put(state);
15615
15616         intel_atomic_helper_free_state(i915);
15617 }
15618
15619 static void intel_atomic_commit_tail(struct intel_atomic_state *state)
15620 {
15621         struct drm_device *dev = state->base.dev;
15622         struct drm_i915_private *dev_priv = to_i915(dev);
15623         struct intel_crtc_state *new_crtc_state, *old_crtc_state;
15624         struct intel_crtc *crtc;
15625         u64 put_domains[I915_MAX_PIPES] = {};
15626         intel_wakeref_t wakeref = 0;
15627         int i;
15628
15629         intel_atomic_commit_fence_wait(state);
15630
15631         drm_atomic_helper_wait_for_dependencies(&state->base);
15632
15633         if (state->modeset)
15634                 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
15635
15636         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
15637                                             new_crtc_state, i) {
15638                 if (needs_modeset(new_crtc_state) ||
15639                     new_crtc_state->update_pipe) {
15640
15641                         put_domains[crtc->pipe] =
15642                                 modeset_get_crtc_power_domains(new_crtc_state);
15643                 }
15644         }
15645
15646         intel_commit_modeset_disables(state);
15647
15648         /* FIXME: Eventually get rid of our crtc->config pointer */
15649         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
15650                 crtc->config = new_crtc_state;
15651
15652         if (state->modeset) {
15653                 drm_atomic_helper_update_legacy_modeset_state(dev, &state->base);
15654
15655                 intel_set_cdclk_pre_plane_update(state);
15656
15657                 /*
15658                  * SKL workaround: bspec recommends we disable the SAGV when we
15659                  * have more then one pipe enabled
15660                  */
15661                 if (!intel_can_enable_sagv(state))
15662                         intel_disable_sagv(dev_priv);
15663
15664                 intel_modeset_verify_disabled(dev_priv, state);
15665         }
15666
15667         /* Complete the events for pipes that have now been disabled */
15668         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
15669                 bool modeset = needs_modeset(new_crtc_state);
15670
15671                 /* Complete events for now disable pipes here. */
15672                 if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) {
15673                         spin_lock_irq(&dev->event_lock);
15674                         drm_crtc_send_vblank_event(&crtc->base,
15675                                                    new_crtc_state->uapi.event);
15676                         spin_unlock_irq(&dev->event_lock);
15677
15678                         new_crtc_state->uapi.event = NULL;
15679                 }
15680         }
15681
15682         if (state->modeset)
15683                 intel_encoders_update_prepare(state);
15684
15685         /* Now enable the clocks, plane, pipe, and connectors that we set up. */
15686         dev_priv->display.commit_modeset_enables(state);
15687
15688         if (state->modeset) {
15689                 intel_encoders_update_complete(state);
15690
15691                 intel_set_cdclk_post_plane_update(state);
15692         }
15693
15694         /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
15695          * already, but still need the state for the delayed optimization. To
15696          * fix this:
15697          * - wrap the optimization/post_plane_update stuff into a per-crtc work.
15698          * - schedule that vblank worker _before_ calling hw_done
15699          * - at the start of commit_tail, cancel it _synchrously
15700          * - switch over to the vblank wait helper in the core after that since
15701          *   we don't need out special handling any more.
15702          */
15703         drm_atomic_helper_wait_for_flip_done(dev, &state->base);
15704
15705         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
15706                 if (new_crtc_state->hw.active &&
15707                     !needs_modeset(new_crtc_state) &&
15708                     !new_crtc_state->preload_luts &&
15709                     (new_crtc_state->uapi.color_mgmt_changed ||
15710                      new_crtc_state->update_pipe))
15711                         intel_color_load_luts(new_crtc_state);
15712         }
15713
15714         /*
15715          * Now that the vblank has passed, we can go ahead and program the
15716          * optimal watermarks on platforms that need two-step watermark
15717          * programming.
15718          *
15719          * TODO: Move this (and other cleanup) to an async worker eventually.
15720          */
15721         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
15722                                             new_crtc_state, i) {
15723                 /*
15724                  * Gen2 reports pipe underruns whenever all planes are disabled.
15725                  * So re-enable underrun reporting after some planes get enabled.
15726                  *
15727                  * We do this before .optimize_watermarks() so that we have a
15728                  * chance of catching underruns with the intermediate watermarks
15729                  * vs. the new plane configuration.
15730                  */
15731                 if (IS_GEN(dev_priv, 2) && planes_enabling(old_crtc_state, new_crtc_state))
15732                         intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
15733
15734                 if (dev_priv->display.optimize_watermarks)
15735                         dev_priv->display.optimize_watermarks(state, crtc);
15736         }
15737
15738         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
15739                 intel_post_plane_update(state, crtc);
15740
15741                 if (put_domains[i])
15742                         modeset_put_power_domains(dev_priv, put_domains[i]);
15743
15744                 intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
15745         }
15746
15747         /* Underruns don't always raise interrupts, so check manually */
15748         intel_check_cpu_fifo_underruns(dev_priv);
15749         intel_check_pch_fifo_underruns(dev_priv);
15750
15751         if (state->modeset)
15752                 intel_verify_planes(state);
15753
15754         if (state->modeset && intel_can_enable_sagv(state))
15755                 intel_enable_sagv(dev_priv);
15756
15757         drm_atomic_helper_commit_hw_done(&state->base);
15758
15759         if (state->modeset) {
15760                 /* As one of the primary mmio accessors, KMS has a high
15761                  * likelihood of triggering bugs in unclaimed access. After we
15762                  * finish modesetting, see if an error has been flagged, and if
15763                  * so enable debugging for the next modeset - and hope we catch
15764                  * the culprit.
15765                  */
15766                 intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
15767                 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
15768         }
15769         intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
15770
15771         /*
15772          * Defer the cleanup of the old state to a separate worker to not
15773          * impede the current task (userspace for blocking modesets) that
15774          * are executed inline. For out-of-line asynchronous modesets/flips,
15775          * deferring to a new worker seems overkill, but we would place a
15776          * schedule point (cond_resched()) here anyway to keep latencies
15777          * down.
15778          */
15779         INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work);
15780         queue_work(system_highpri_wq, &state->base.commit_work);
15781 }
15782
15783 static void intel_atomic_commit_work(struct work_struct *work)
15784 {
15785         struct intel_atomic_state *state =
15786                 container_of(work, struct intel_atomic_state, base.commit_work);
15787
15788         intel_atomic_commit_tail(state);
15789 }
15790
15791 static int __i915_sw_fence_call
15792 intel_atomic_commit_ready(struct i915_sw_fence *fence,
15793                           enum i915_sw_fence_notify notify)
15794 {
15795         struct intel_atomic_state *state =
15796                 container_of(fence, struct intel_atomic_state, commit_ready);
15797
15798         switch (notify) {
15799         case FENCE_COMPLETE:
15800                 /* we do blocking waits in the worker, nothing to do here */
15801                 break;
15802         case FENCE_FREE:
15803                 {
15804                         struct intel_atomic_helper *helper =
15805                                 &to_i915(state->base.dev)->atomic_helper;
15806
15807                         if (llist_add(&state->freed, &helper->free_list))
15808                                 schedule_work(&helper->free_work);
15809                         break;
15810                 }
15811         }
15812
15813         return NOTIFY_DONE;
15814 }
15815
15816 static void intel_atomic_track_fbs(struct intel_atomic_state *state)
15817 {
15818         struct intel_plane_state *old_plane_state, *new_plane_state;
15819         struct intel_plane *plane;
15820         int i;
15821
15822         for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
15823                                              new_plane_state, i)
15824                 intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
15825                                         to_intel_frontbuffer(new_plane_state->hw.fb),
15826                                         plane->frontbuffer_bit);
15827 }
15828
15829 static void assert_global_state_locked(struct drm_i915_private *dev_priv)
15830 {
15831         struct intel_crtc *crtc;
15832
15833         for_each_intel_crtc(&dev_priv->drm, crtc)
15834                 drm_modeset_lock_assert_held(&crtc->base.mutex);
15835 }
15836
15837 static int intel_atomic_commit(struct drm_device *dev,
15838                                struct drm_atomic_state *_state,
15839                                bool nonblock)
15840 {
15841         struct intel_atomic_state *state = to_intel_atomic_state(_state);
15842         struct drm_i915_private *dev_priv = to_i915(dev);
15843         int ret = 0;
15844
15845         state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
15846
15847         drm_atomic_state_get(&state->base);
15848         i915_sw_fence_init(&state->commit_ready,
15849                            intel_atomic_commit_ready);
15850
15851         /*
15852          * The intel_legacy_cursor_update() fast path takes care
15853          * of avoiding the vblank waits for simple cursor
15854          * movement and flips. For cursor on/off and size changes,
15855          * we want to perform the vblank waits so that watermark
15856          * updates happen during the correct frames. Gen9+ have
15857          * double buffered watermarks and so shouldn't need this.
15858          *
15859          * Unset state->legacy_cursor_update before the call to
15860          * drm_atomic_helper_setup_commit() because otherwise
15861          * drm_atomic_helper_wait_for_flip_done() is a noop and
15862          * we get FIFO underruns because we didn't wait
15863          * for vblank.
15864          *
15865          * FIXME doing watermarks and fb cleanup from a vblank worker
15866          * (assuming we had any) would solve these problems.
15867          */
15868         if (INTEL_GEN(dev_priv) < 9 && state->base.legacy_cursor_update) {
15869                 struct intel_crtc_state *new_crtc_state;
15870                 struct intel_crtc *crtc;
15871                 int i;
15872
15873                 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
15874                         if (new_crtc_state->wm.need_postvbl_update ||
15875                             new_crtc_state->update_wm_post)
15876                                 state->base.legacy_cursor_update = false;
15877         }
15878
15879         ret = intel_atomic_prepare_commit(state);
15880         if (ret) {
15881                 drm_dbg_atomic(&dev_priv->drm,
15882                                "Preparing state failed with %i\n", ret);
15883                 i915_sw_fence_commit(&state->commit_ready);
15884                 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
15885                 return ret;
15886         }
15887
15888         ret = drm_atomic_helper_setup_commit(&state->base, nonblock);
15889         if (!ret)
15890                 ret = drm_atomic_helper_swap_state(&state->base, true);
15891         if (!ret)
15892                 intel_atomic_swap_global_state(state);
15893
15894         if (ret) {
15895                 i915_sw_fence_commit(&state->commit_ready);
15896
15897                 drm_atomic_helper_cleanup_planes(dev, &state->base);
15898                 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
15899                 return ret;
15900         }
15901         dev_priv->wm.distrust_bios_wm = false;
15902         intel_shared_dpll_swap_state(state);
15903         intel_atomic_track_fbs(state);
15904
15905         if (state->global_state_changed) {
15906                 assert_global_state_locked(dev_priv);
15907
15908                 dev_priv->active_pipes = state->active_pipes;
15909         }
15910
15911         drm_atomic_state_get(&state->base);
15912         INIT_WORK(&state->base.commit_work, intel_atomic_commit_work);
15913
15914         i915_sw_fence_commit(&state->commit_ready);
15915         if (nonblock && state->modeset) {
15916                 queue_work(dev_priv->modeset_wq, &state->base.commit_work);
15917         } else if (nonblock) {
15918                 queue_work(dev_priv->flip_wq, &state->base.commit_work);
15919         } else {
15920                 if (state->modeset)
15921                         flush_workqueue(dev_priv->modeset_wq);
15922                 intel_atomic_commit_tail(state);
15923         }
15924
15925         return 0;
15926 }
15927
15928 struct wait_rps_boost {
15929         struct wait_queue_entry wait;
15930
15931         struct drm_crtc *crtc;
15932         struct i915_request *request;
15933 };
15934
15935 static int do_rps_boost(struct wait_queue_entry *_wait,
15936                         unsigned mode, int sync, void *key)
15937 {
15938         struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
15939         struct i915_request *rq = wait->request;
15940
15941         /*
15942          * If we missed the vblank, but the request is already running it
15943          * is reasonable to assume that it will complete before the next
15944          * vblank without our intervention, so leave RPS alone.
15945          */
15946         if (!i915_request_started(rq))
15947                 intel_rps_boost(rq);
15948         i915_request_put(rq);
15949
15950         drm_crtc_vblank_put(wait->crtc);
15951
15952         list_del(&wait->wait.entry);
15953         kfree(wait);
15954         return 1;
15955 }
15956
15957 static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
15958                                        struct dma_fence *fence)
15959 {
15960         struct wait_rps_boost *wait;
15961
15962         if (!dma_fence_is_i915(fence))
15963                 return;
15964
15965         if (INTEL_GEN(to_i915(crtc->dev)) < 6)
15966                 return;
15967
15968         if (drm_crtc_vblank_get(crtc))
15969                 return;
15970
15971         wait = kmalloc(sizeof(*wait), GFP_KERNEL);
15972         if (!wait) {
15973                 drm_crtc_vblank_put(crtc);
15974                 return;
15975         }
15976
15977         wait->request = to_request(dma_fence_get(fence));
15978         wait->crtc = crtc;
15979
15980         wait->wait.func = do_rps_boost;
15981         wait->wait.flags = 0;
15982
15983         add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
15984 }
15985
15986 static int intel_plane_pin_fb(struct intel_plane_state *plane_state)
15987 {
15988         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
15989         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
15990         struct drm_framebuffer *fb = plane_state->hw.fb;
15991         struct i915_vma *vma;
15992
15993         if (plane->id == PLANE_CURSOR &&
15994             INTEL_INFO(dev_priv)->display.cursor_needs_physical) {
15995                 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
15996                 const int align = intel_cursor_alignment(dev_priv);
15997                 int err;
15998
15999                 err = i915_gem_object_attach_phys(obj, align);
16000                 if (err)
16001                         return err;
16002         }
16003
16004         vma = intel_pin_and_fence_fb_obj(fb,
16005                                          &plane_state->view,
16006                                          intel_plane_uses_fence(plane_state),
16007                                          &plane_state->flags);
16008         if (IS_ERR(vma))
16009                 return PTR_ERR(vma);
16010
16011         plane_state->vma = vma;
16012
16013         return 0;
16014 }
16015
16016 static void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
16017 {
16018         struct i915_vma *vma;
16019
16020         vma = fetch_and_zero(&old_plane_state->vma);
16021         if (vma)
16022                 intel_unpin_fb_vma(vma, old_plane_state->flags);
16023 }
16024
16025 static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj)
16026 {
16027         struct i915_sched_attr attr = {
16028                 .priority = I915_USER_PRIORITY(I915_PRIORITY_DISPLAY),
16029         };
16030
16031         i915_gem_object_wait_priority(obj, 0, &attr);
16032 }
16033
16034 /**
16035  * intel_prepare_plane_fb - Prepare fb for usage on plane
16036  * @_plane: drm plane to prepare for
16037  * @_new_plane_state: the plane state being prepared
16038  *
16039  * Prepares a framebuffer for usage on a display plane.  Generally this
16040  * involves pinning the underlying object and updating the frontbuffer tracking
16041  * bits.  Some older platforms need special physical address handling for
16042  * cursor planes.
16043  *
16044  * Returns 0 on success, negative error code on failure.
16045  */
16046 int
16047 intel_prepare_plane_fb(struct drm_plane *_plane,
16048                        struct drm_plane_state *_new_plane_state)
16049 {
16050         struct intel_plane *plane = to_intel_plane(_plane);
16051         struct intel_plane_state *new_plane_state =
16052                 to_intel_plane_state(_new_plane_state);
16053         struct intel_atomic_state *state =
16054                 to_intel_atomic_state(new_plane_state->uapi.state);
16055         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
16056         const struct intel_plane_state *old_plane_state =
16057                 intel_atomic_get_old_plane_state(state, plane);
16058         struct drm_i915_gem_object *obj = intel_fb_obj(new_plane_state->hw.fb);
16059         struct drm_i915_gem_object *old_obj = intel_fb_obj(old_plane_state->hw.fb);
16060         int ret;
16061
16062         if (old_obj) {
16063                 const struct intel_crtc_state *crtc_state =
16064                         intel_atomic_get_new_crtc_state(state,
16065                                                         to_intel_crtc(old_plane_state->hw.crtc));
16066
16067                 /* Big Hammer, we also need to ensure that any pending
16068                  * MI_WAIT_FOR_EVENT inside a user batch buffer on the
16069                  * current scanout is retired before unpinning the old
16070                  * framebuffer. Note that we rely on userspace rendering
16071                  * into the buffer attached to the pipe they are waiting
16072                  * on. If not, userspace generates a GPU hang with IPEHR
16073                  * point to the MI_WAIT_FOR_EVENT.
16074                  *
16075                  * This should only fail upon a hung GPU, in which case we
16076                  * can safely continue.
16077                  */
16078                 if (needs_modeset(crtc_state)) {
16079                         ret = i915_sw_fence_await_reservation(&state->commit_ready,
16080                                                               old_obj->base.resv, NULL,
16081                                                               false, 0,
16082                                                               GFP_KERNEL);
16083                         if (ret < 0)
16084                                 return ret;
16085                 }
16086         }
16087
16088         if (new_plane_state->uapi.fence) { /* explicit fencing */
16089                 ret = i915_sw_fence_await_dma_fence(&state->commit_ready,
16090                                                     new_plane_state->uapi.fence,
16091                                                     I915_FENCE_TIMEOUT,
16092                                                     GFP_KERNEL);
16093                 if (ret < 0)
16094                         return ret;
16095         }
16096
16097         if (!obj)
16098                 return 0;
16099
16100         ret = i915_gem_object_pin_pages(obj);
16101         if (ret)
16102                 return ret;
16103
16104         ret = intel_plane_pin_fb(new_plane_state);
16105
16106         i915_gem_object_unpin_pages(obj);
16107         if (ret)
16108                 return ret;
16109
16110         fb_obj_bump_render_priority(obj);
16111         i915_gem_object_flush_frontbuffer(obj, ORIGIN_DIRTYFB);
16112
16113         if (!new_plane_state->uapi.fence) { /* implicit fencing */
16114                 struct dma_fence *fence;
16115
16116                 ret = i915_sw_fence_await_reservation(&state->commit_ready,
16117                                                       obj->base.resv, NULL,
16118                                                       false, I915_FENCE_TIMEOUT,
16119                                                       GFP_KERNEL);
16120                 if (ret < 0)
16121                         goto unpin_fb;
16122
16123                 fence = dma_resv_get_excl_rcu(obj->base.resv);
16124                 if (fence) {
16125                         add_rps_boost_after_vblank(new_plane_state->hw.crtc,
16126                                                    fence);
16127                         dma_fence_put(fence);
16128                 }
16129         } else {
16130                 add_rps_boost_after_vblank(new_plane_state->hw.crtc,
16131                                            new_plane_state->uapi.fence);
16132         }
16133
16134         /*
16135          * We declare pageflips to be interactive and so merit a small bias
16136          * towards upclocking to deliver the frame on time. By only changing
16137          * the RPS thresholds to sample more regularly and aim for higher
16138          * clocks we can hopefully deliver low power workloads (like kodi)
16139          * that are not quite steady state without resorting to forcing
16140          * maximum clocks following a vblank miss (see do_rps_boost()).
16141          */
16142         if (!state->rps_interactive) {
16143                 intel_rps_mark_interactive(&dev_priv->gt.rps, true);
16144                 state->rps_interactive = true;
16145         }
16146
16147         return 0;
16148
16149 unpin_fb:
16150         intel_plane_unpin_fb(new_plane_state);
16151
16152         return ret;
16153 }
16154
16155 /**
16156  * intel_cleanup_plane_fb - Cleans up an fb after plane use
16157  * @plane: drm plane to clean up for
16158  * @_old_plane_state: the state from the previous modeset
16159  *
16160  * Cleans up a framebuffer that has just been removed from a plane.
16161  */
16162 void
16163 intel_cleanup_plane_fb(struct drm_plane *plane,
16164                        struct drm_plane_state *_old_plane_state)
16165 {
16166         struct intel_plane_state *old_plane_state =
16167                 to_intel_plane_state(_old_plane_state);
16168         struct intel_atomic_state *state =
16169                 to_intel_atomic_state(old_plane_state->uapi.state);
16170         struct drm_i915_private *dev_priv = to_i915(plane->dev);
16171         struct drm_i915_gem_object *obj = intel_fb_obj(old_plane_state->hw.fb);
16172
16173         if (!obj)
16174                 return;
16175
16176         if (state->rps_interactive) {
16177                 intel_rps_mark_interactive(&dev_priv->gt.rps, false);
16178                 state->rps_interactive = false;
16179         }
16180
16181         /* Should only be called after a successful intel_prepare_plane_fb()! */
16182         intel_plane_unpin_fb(old_plane_state);
16183 }
16184
16185 /**
16186  * intel_plane_destroy - destroy a plane
16187  * @plane: plane to destroy
16188  *
16189  * Common destruction function for all types of planes (primary, cursor,
16190  * sprite).
16191  */
16192 void intel_plane_destroy(struct drm_plane *plane)
16193 {
16194         drm_plane_cleanup(plane);
16195         kfree(to_intel_plane(plane));
16196 }
16197
16198 static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane,
16199                                             u32 format, u64 modifier)
16200 {
16201         switch (modifier) {
16202         case DRM_FORMAT_MOD_LINEAR:
16203         case I915_FORMAT_MOD_X_TILED:
16204                 break;
16205         default:
16206                 return false;
16207         }
16208
16209         switch (format) {
16210         case DRM_FORMAT_C8:
16211         case DRM_FORMAT_RGB565:
16212         case DRM_FORMAT_XRGB1555:
16213         case DRM_FORMAT_XRGB8888:
16214                 return modifier == DRM_FORMAT_MOD_LINEAR ||
16215                         modifier == I915_FORMAT_MOD_X_TILED;
16216         default:
16217                 return false;
16218         }
16219 }
16220
16221 static bool i965_plane_format_mod_supported(struct drm_plane *_plane,
16222                                             u32 format, u64 modifier)
16223 {
16224         switch (modifier) {
16225         case DRM_FORMAT_MOD_LINEAR:
16226         case I915_FORMAT_MOD_X_TILED:
16227                 break;
16228         default:
16229                 return false;
16230         }
16231
16232         switch (format) {
16233         case DRM_FORMAT_C8:
16234         case DRM_FORMAT_RGB565:
16235         case DRM_FORMAT_XRGB8888:
16236         case DRM_FORMAT_XBGR8888:
16237         case DRM_FORMAT_ARGB8888:
16238         case DRM_FORMAT_ABGR8888:
16239         case DRM_FORMAT_XRGB2101010:
16240         case DRM_FORMAT_XBGR2101010:
16241         case DRM_FORMAT_ARGB2101010:
16242         case DRM_FORMAT_ABGR2101010:
16243         case DRM_FORMAT_XBGR16161616F:
16244                 return modifier == DRM_FORMAT_MOD_LINEAR ||
16245                         modifier == I915_FORMAT_MOD_X_TILED;
16246         default:
16247                 return false;
16248         }
16249 }
16250
16251 static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
16252                                               u32 format, u64 modifier)
16253 {
16254         return modifier == DRM_FORMAT_MOD_LINEAR &&
16255                 format == DRM_FORMAT_ARGB8888;
16256 }
16257
16258 static const struct drm_plane_funcs i965_plane_funcs = {
16259         .update_plane = drm_atomic_helper_update_plane,
16260         .disable_plane = drm_atomic_helper_disable_plane,
16261         .destroy = intel_plane_destroy,
16262         .atomic_duplicate_state = intel_plane_duplicate_state,
16263         .atomic_destroy_state = intel_plane_destroy_state,
16264         .format_mod_supported = i965_plane_format_mod_supported,
16265 };
16266
16267 static const struct drm_plane_funcs i8xx_plane_funcs = {
16268         .update_plane = drm_atomic_helper_update_plane,
16269         .disable_plane = drm_atomic_helper_disable_plane,
16270         .destroy = intel_plane_destroy,
16271         .atomic_duplicate_state = intel_plane_duplicate_state,
16272         .atomic_destroy_state = intel_plane_destroy_state,
16273         .format_mod_supported = i8xx_plane_format_mod_supported,
16274 };
16275
16276 static int
16277 intel_legacy_cursor_update(struct drm_plane *_plane,
16278                            struct drm_crtc *_crtc,
16279                            struct drm_framebuffer *fb,
16280                            int crtc_x, int crtc_y,
16281                            unsigned int crtc_w, unsigned int crtc_h,
16282                            u32 src_x, u32 src_y,
16283                            u32 src_w, u32 src_h,
16284                            struct drm_modeset_acquire_ctx *ctx)
16285 {
16286         struct intel_plane *plane = to_intel_plane(_plane);
16287         struct intel_crtc *crtc = to_intel_crtc(_crtc);
16288         struct intel_plane_state *old_plane_state =
16289                 to_intel_plane_state(plane->base.state);
16290         struct intel_plane_state *new_plane_state;
16291         struct intel_crtc_state *crtc_state =
16292                 to_intel_crtc_state(crtc->base.state);
16293         struct intel_crtc_state *new_crtc_state;
16294         int ret;
16295
16296         /*
16297          * When crtc is inactive or there is a modeset pending,
16298          * wait for it to complete in the slowpath
16299          */
16300         if (!crtc_state->hw.active || needs_modeset(crtc_state) ||
16301             crtc_state->update_pipe)
16302                 goto slow;
16303
16304         /*
16305          * Don't do an async update if there is an outstanding commit modifying
16306          * the plane.  This prevents our async update's changes from getting
16307          * overridden by a previous synchronous update's state.
16308          */
16309         if (old_plane_state->uapi.commit &&
16310             !try_wait_for_completion(&old_plane_state->uapi.commit->hw_done))
16311                 goto slow;
16312
16313         /*
16314          * If any parameters change that may affect watermarks,
16315          * take the slowpath. Only changing fb or position should be
16316          * in the fastpath.
16317          */
16318         if (old_plane_state->uapi.crtc != &crtc->base ||
16319             old_plane_state->uapi.src_w != src_w ||
16320             old_plane_state->uapi.src_h != src_h ||
16321             old_plane_state->uapi.crtc_w != crtc_w ||
16322             old_plane_state->uapi.crtc_h != crtc_h ||
16323             !old_plane_state->uapi.fb != !fb)
16324                 goto slow;
16325
16326         new_plane_state = to_intel_plane_state(intel_plane_duplicate_state(&plane->base));
16327         if (!new_plane_state)
16328                 return -ENOMEM;
16329
16330         new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(&crtc->base));
16331         if (!new_crtc_state) {
16332                 ret = -ENOMEM;
16333                 goto out_free;
16334         }
16335
16336         drm_atomic_set_fb_for_plane(&new_plane_state->uapi, fb);
16337
16338         new_plane_state->uapi.src_x = src_x;
16339         new_plane_state->uapi.src_y = src_y;
16340         new_plane_state->uapi.src_w = src_w;
16341         new_plane_state->uapi.src_h = src_h;
16342         new_plane_state->uapi.crtc_x = crtc_x;
16343         new_plane_state->uapi.crtc_y = crtc_y;
16344         new_plane_state->uapi.crtc_w = crtc_w;
16345         new_plane_state->uapi.crtc_h = crtc_h;
16346
16347         intel_plane_copy_uapi_to_hw_state(new_plane_state, new_plane_state);
16348
16349         ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state,
16350                                                   old_plane_state, new_plane_state);
16351         if (ret)
16352                 goto out_free;
16353
16354         ret = intel_plane_pin_fb(new_plane_state);
16355         if (ret)
16356                 goto out_free;
16357
16358         intel_frontbuffer_flush(to_intel_frontbuffer(new_plane_state->hw.fb),
16359                                 ORIGIN_FLIP);
16360         intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
16361                                 to_intel_frontbuffer(new_plane_state->hw.fb),
16362                                 plane->frontbuffer_bit);
16363
16364         /* Swap plane state */
16365         plane->base.state = &new_plane_state->uapi;
16366
16367         /*
16368          * We cannot swap crtc_state as it may be in use by an atomic commit or
16369          * page flip that's running simultaneously. If we swap crtc_state and
16370          * destroy the old state, we will cause a use-after-free there.
16371          *
16372          * Only update active_planes, which is needed for our internal
16373          * bookkeeping. Either value will do the right thing when updating
16374          * planes atomically. If the cursor was part of the atomic update then
16375          * we would have taken the slowpath.
16376          */
16377         crtc_state->active_planes = new_crtc_state->active_planes;
16378
16379         if (new_plane_state->uapi.visible)
16380                 intel_update_plane(plane, crtc_state, new_plane_state);
16381         else
16382                 intel_disable_plane(plane, crtc_state);
16383
16384         intel_plane_unpin_fb(old_plane_state);
16385
16386 out_free:
16387         if (new_crtc_state)
16388                 intel_crtc_destroy_state(&crtc->base, &new_crtc_state->uapi);
16389         if (ret)
16390                 intel_plane_destroy_state(&plane->base, &new_plane_state->uapi);
16391         else
16392                 intel_plane_destroy_state(&plane->base, &old_plane_state->uapi);
16393         return ret;
16394
16395 slow:
16396         return drm_atomic_helper_update_plane(&plane->base, &crtc->base, fb,
16397                                               crtc_x, crtc_y, crtc_w, crtc_h,
16398                                               src_x, src_y, src_w, src_h, ctx);
16399 }
16400
16401 static const struct drm_plane_funcs intel_cursor_plane_funcs = {
16402         .update_plane = intel_legacy_cursor_update,
16403         .disable_plane = drm_atomic_helper_disable_plane,
16404         .destroy = intel_plane_destroy,
16405         .atomic_duplicate_state = intel_plane_duplicate_state,
16406         .atomic_destroy_state = intel_plane_destroy_state,
16407         .format_mod_supported = intel_cursor_format_mod_supported,
16408 };
16409
16410 static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv,
16411                                enum i9xx_plane_id i9xx_plane)
16412 {
16413         if (!HAS_FBC(dev_priv))
16414                 return false;
16415
16416         if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
16417                 return i9xx_plane == PLANE_A; /* tied to pipe A */
16418         else if (IS_IVYBRIDGE(dev_priv))
16419                 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B ||
16420                         i9xx_plane == PLANE_C;
16421         else if (INTEL_GEN(dev_priv) >= 4)
16422                 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B;
16423         else
16424                 return i9xx_plane == PLANE_A;
16425 }
16426
16427 static struct intel_plane *
16428 intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
16429 {
16430         struct intel_plane *plane;
16431         const struct drm_plane_funcs *plane_funcs;
16432         unsigned int supported_rotations;
16433         unsigned int possible_crtcs;
16434         const u32 *formats;
16435         int num_formats;
16436         int ret, zpos;
16437
16438         if (INTEL_GEN(dev_priv) >= 9)
16439                 return skl_universal_plane_create(dev_priv, pipe,
16440                                                   PLANE_PRIMARY);
16441
16442         plane = intel_plane_alloc();
16443         if (IS_ERR(plane))
16444                 return plane;
16445
16446         plane->pipe = pipe;
16447         /*
16448          * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
16449          * port is hooked to pipe B. Hence we want plane A feeding pipe B.
16450          */
16451         if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4)
16452                 plane->i9xx_plane = (enum i9xx_plane_id) !pipe;
16453         else
16454                 plane->i9xx_plane = (enum i9xx_plane_id) pipe;
16455         plane->id = PLANE_PRIMARY;
16456         plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id);
16457
16458         plane->has_fbc = i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane);
16459         if (plane->has_fbc) {
16460                 struct intel_fbc *fbc = &dev_priv->fbc;
16461
16462                 fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
16463         }
16464
16465         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
16466                 formats = vlv_primary_formats;
16467                 num_formats = ARRAY_SIZE(vlv_primary_formats);
16468         } else if (INTEL_GEN(dev_priv) >= 4) {
16469                 /*
16470                  * WaFP16GammaEnabling:ivb
16471                  * "Workaround : When using the 64-bit format, the plane
16472                  *  output on each color channel has one quarter amplitude.
16473                  *  It can be brought up to full amplitude by using pipe
16474                  *  gamma correction or pipe color space conversion to
16475                  *  multiply the plane output by four."
16476                  *
16477                  * There is no dedicated plane gamma for the primary plane,
16478                  * and using the pipe gamma/csc could conflict with other
16479                  * planes, so we choose not to expose fp16 on IVB primary
16480                  * planes. HSW primary planes no longer have this problem.
16481                  */
16482                 if (IS_IVYBRIDGE(dev_priv)) {
16483                         formats = ivb_primary_formats;
16484                         num_formats = ARRAY_SIZE(ivb_primary_formats);
16485                 } else {
16486                         formats = i965_primary_formats;
16487                         num_formats = ARRAY_SIZE(i965_primary_formats);
16488                 }
16489         } else {
16490                 formats = i8xx_primary_formats;
16491                 num_formats = ARRAY_SIZE(i8xx_primary_formats);
16492         }
16493
16494         if (INTEL_GEN(dev_priv) >= 4)
16495                 plane_funcs = &i965_plane_funcs;
16496         else
16497                 plane_funcs = &i8xx_plane_funcs;
16498
16499         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
16500                 plane->min_cdclk = vlv_plane_min_cdclk;
16501         else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
16502                 plane->min_cdclk = hsw_plane_min_cdclk;
16503         else if (IS_IVYBRIDGE(dev_priv))
16504                 plane->min_cdclk = ivb_plane_min_cdclk;
16505         else
16506                 plane->min_cdclk = i9xx_plane_min_cdclk;
16507
16508         plane->max_stride = i9xx_plane_max_stride;
16509         plane->update_plane = i9xx_update_plane;
16510         plane->disable_plane = i9xx_disable_plane;
16511         plane->get_hw_state = i9xx_plane_get_hw_state;
16512         plane->check_plane = i9xx_plane_check;
16513
16514         possible_crtcs = BIT(pipe);
16515
16516         if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
16517                 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
16518                                                possible_crtcs, plane_funcs,
16519                                                formats, num_formats,
16520                                                i9xx_format_modifiers,
16521                                                DRM_PLANE_TYPE_PRIMARY,
16522                                                "primary %c", pipe_name(pipe));
16523         else
16524                 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
16525                                                possible_crtcs, plane_funcs,
16526                                                formats, num_formats,
16527                                                i9xx_format_modifiers,
16528                                                DRM_PLANE_TYPE_PRIMARY,
16529                                                "plane %c",
16530                                                plane_name(plane->i9xx_plane));
16531         if (ret)
16532                 goto fail;
16533
16534         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
16535                 supported_rotations =
16536                         DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
16537                         DRM_MODE_REFLECT_X;
16538         } else if (INTEL_GEN(dev_priv) >= 4) {
16539                 supported_rotations =
16540                         DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
16541         } else {
16542                 supported_rotations = DRM_MODE_ROTATE_0;
16543         }
16544
16545         if (INTEL_GEN(dev_priv) >= 4)
16546                 drm_plane_create_rotation_property(&plane->base,
16547                                                    DRM_MODE_ROTATE_0,
16548                                                    supported_rotations);
16549
16550         zpos = 0;
16551         drm_plane_create_zpos_immutable_property(&plane->base, zpos);
16552
16553         drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
16554
16555         return plane;
16556
16557 fail:
16558         intel_plane_free(plane);
16559
16560         return ERR_PTR(ret);
16561 }
16562
16563 static struct intel_plane *
16564 intel_cursor_plane_create(struct drm_i915_private *dev_priv,
16565                           enum pipe pipe)
16566 {
16567         unsigned int possible_crtcs;
16568         struct intel_plane *cursor;
16569         int ret, zpos;
16570
16571         cursor = intel_plane_alloc();
16572         if (IS_ERR(cursor))
16573                 return cursor;
16574
16575         cursor->pipe = pipe;
16576         cursor->i9xx_plane = (enum i9xx_plane_id) pipe;
16577         cursor->id = PLANE_CURSOR;
16578         cursor->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, cursor->id);
16579
16580         if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
16581                 cursor->max_stride = i845_cursor_max_stride;
16582                 cursor->update_plane = i845_update_cursor;
16583                 cursor->disable_plane = i845_disable_cursor;
16584                 cursor->get_hw_state = i845_cursor_get_hw_state;
16585                 cursor->check_plane = i845_check_cursor;
16586         } else {
16587                 cursor->max_stride = i9xx_cursor_max_stride;
16588                 cursor->update_plane = i9xx_update_cursor;
16589                 cursor->disable_plane = i9xx_disable_cursor;
16590                 cursor->get_hw_state = i9xx_cursor_get_hw_state;
16591                 cursor->check_plane = i9xx_check_cursor;
16592         }
16593
16594         cursor->cursor.base = ~0;
16595         cursor->cursor.cntl = ~0;
16596
16597         if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv))
16598                 cursor->cursor.size = ~0;
16599
16600         possible_crtcs = BIT(pipe);
16601
16602         ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
16603                                        possible_crtcs, &intel_cursor_plane_funcs,
16604                                        intel_cursor_formats,
16605                                        ARRAY_SIZE(intel_cursor_formats),
16606                                        cursor_format_modifiers,
16607                                        DRM_PLANE_TYPE_CURSOR,
16608                                        "cursor %c", pipe_name(pipe));
16609         if (ret)
16610                 goto fail;
16611
16612         if (INTEL_GEN(dev_priv) >= 4)
16613                 drm_plane_create_rotation_property(&cursor->base,
16614                                                    DRM_MODE_ROTATE_0,
16615                                                    DRM_MODE_ROTATE_0 |
16616                                                    DRM_MODE_ROTATE_180);
16617
16618         zpos = RUNTIME_INFO(dev_priv)->num_sprites[pipe] + 1;
16619         drm_plane_create_zpos_immutable_property(&cursor->base, zpos);
16620
16621         drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
16622
16623         return cursor;
16624
16625 fail:
16626         intel_plane_free(cursor);
16627
16628         return ERR_PTR(ret);
16629 }
16630
16631 #define INTEL_CRTC_FUNCS \
16632         .gamma_set = drm_atomic_helper_legacy_gamma_set, \
16633         .set_config = drm_atomic_helper_set_config, \
16634         .destroy = intel_crtc_destroy, \
16635         .page_flip = drm_atomic_helper_page_flip, \
16636         .atomic_duplicate_state = intel_crtc_duplicate_state, \
16637         .atomic_destroy_state = intel_crtc_destroy_state, \
16638         .set_crc_source = intel_crtc_set_crc_source, \
16639         .verify_crc_source = intel_crtc_verify_crc_source, \
16640         .get_crc_sources = intel_crtc_get_crc_sources
16641
16642 static const struct drm_crtc_funcs bdw_crtc_funcs = {
16643         INTEL_CRTC_FUNCS,
16644
16645         .get_vblank_counter = g4x_get_vblank_counter,
16646         .enable_vblank = bdw_enable_vblank,
16647         .disable_vblank = bdw_disable_vblank,
16648 };
16649
16650 static const struct drm_crtc_funcs ilk_crtc_funcs = {
16651         INTEL_CRTC_FUNCS,
16652
16653         .get_vblank_counter = g4x_get_vblank_counter,
16654         .enable_vblank = ilk_enable_vblank,
16655         .disable_vblank = ilk_disable_vblank,
16656 };
16657
16658 static const struct drm_crtc_funcs g4x_crtc_funcs = {
16659         INTEL_CRTC_FUNCS,
16660
16661         .get_vblank_counter = g4x_get_vblank_counter,
16662         .enable_vblank = i965_enable_vblank,
16663         .disable_vblank = i965_disable_vblank,
16664 };
16665
16666 static const struct drm_crtc_funcs i965_crtc_funcs = {
16667         INTEL_CRTC_FUNCS,
16668
16669         .get_vblank_counter = i915_get_vblank_counter,
16670         .enable_vblank = i965_enable_vblank,
16671         .disable_vblank = i965_disable_vblank,
16672 };
16673
16674 static const struct drm_crtc_funcs i915gm_crtc_funcs = {
16675         INTEL_CRTC_FUNCS,
16676
16677         .get_vblank_counter = i915_get_vblank_counter,
16678         .enable_vblank = i915gm_enable_vblank,
16679         .disable_vblank = i915gm_disable_vblank,
16680 };
16681
16682 static const struct drm_crtc_funcs i915_crtc_funcs = {
16683         INTEL_CRTC_FUNCS,
16684
16685         .get_vblank_counter = i915_get_vblank_counter,
16686         .enable_vblank = i8xx_enable_vblank,
16687         .disable_vblank = i8xx_disable_vblank,
16688 };
16689
16690 static const struct drm_crtc_funcs i8xx_crtc_funcs = {
16691         INTEL_CRTC_FUNCS,
16692
16693         /* no hw vblank counter */
16694         .enable_vblank = i8xx_enable_vblank,
16695         .disable_vblank = i8xx_disable_vblank,
16696 };
16697
16698 static struct intel_crtc *intel_crtc_alloc(void)
16699 {
16700         struct intel_crtc_state *crtc_state;
16701         struct intel_crtc *crtc;
16702
16703         crtc = kzalloc(sizeof(*crtc), GFP_KERNEL);
16704         if (!crtc)
16705                 return ERR_PTR(-ENOMEM);
16706
16707         crtc_state = intel_crtc_state_alloc(crtc);
16708         if (!crtc_state) {
16709                 kfree(crtc);
16710                 return ERR_PTR(-ENOMEM);
16711         }
16712
16713         crtc->base.state = &crtc_state->uapi;
16714         crtc->config = crtc_state;
16715
16716         return crtc;
16717 }
16718
16719 static void intel_crtc_free(struct intel_crtc *crtc)
16720 {
16721         intel_crtc_destroy_state(&crtc->base, crtc->base.state);
16722         kfree(crtc);
16723 }
16724
16725 static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
16726 {
16727         struct intel_plane *primary, *cursor;
16728         const struct drm_crtc_funcs *funcs;
16729         struct intel_crtc *crtc;
16730         int sprite, ret;
16731
16732         crtc = intel_crtc_alloc();
16733         if (IS_ERR(crtc))
16734                 return PTR_ERR(crtc);
16735
16736         crtc->pipe = pipe;
16737         crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[pipe];
16738
16739         primary = intel_primary_plane_create(dev_priv, pipe);
16740         if (IS_ERR(primary)) {
16741                 ret = PTR_ERR(primary);
16742                 goto fail;
16743         }
16744         crtc->plane_ids_mask |= BIT(primary->id);
16745
16746         for_each_sprite(dev_priv, pipe, sprite) {
16747                 struct intel_plane *plane;
16748
16749                 plane = intel_sprite_plane_create(dev_priv, pipe, sprite);
16750                 if (IS_ERR(plane)) {
16751                         ret = PTR_ERR(plane);
16752                         goto fail;
16753                 }
16754                 crtc->plane_ids_mask |= BIT(plane->id);
16755         }
16756
16757         cursor = intel_cursor_plane_create(dev_priv, pipe);
16758         if (IS_ERR(cursor)) {
16759                 ret = PTR_ERR(cursor);
16760                 goto fail;
16761         }
16762         crtc->plane_ids_mask |= BIT(cursor->id);
16763
16764         if (HAS_GMCH(dev_priv)) {
16765                 if (IS_CHERRYVIEW(dev_priv) ||
16766                     IS_VALLEYVIEW(dev_priv) || IS_G4X(dev_priv))
16767                         funcs = &g4x_crtc_funcs;
16768                 else if (IS_GEN(dev_priv, 4))
16769                         funcs = &i965_crtc_funcs;
16770                 else if (IS_I945GM(dev_priv) || IS_I915GM(dev_priv))
16771                         funcs = &i915gm_crtc_funcs;
16772                 else if (IS_GEN(dev_priv, 3))
16773                         funcs = &i915_crtc_funcs;
16774                 else
16775                         funcs = &i8xx_crtc_funcs;
16776         } else {
16777                 if (INTEL_GEN(dev_priv) >= 8)
16778                         funcs = &bdw_crtc_funcs;
16779                 else
16780                         funcs = &ilk_crtc_funcs;
16781         }
16782
16783         ret = drm_crtc_init_with_planes(&dev_priv->drm, &crtc->base,
16784                                         &primary->base, &cursor->base,
16785                                         funcs, "pipe %c", pipe_name(pipe));
16786         if (ret)
16787                 goto fail;
16788
16789         BUG_ON(pipe >= ARRAY_SIZE(dev_priv->pipe_to_crtc_mapping) ||
16790                dev_priv->pipe_to_crtc_mapping[pipe] != NULL);
16791         dev_priv->pipe_to_crtc_mapping[pipe] = crtc;
16792
16793         if (INTEL_GEN(dev_priv) < 9) {
16794                 enum i9xx_plane_id i9xx_plane = primary->i9xx_plane;
16795
16796                 BUG_ON(i9xx_plane >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
16797                        dev_priv->plane_to_crtc_mapping[i9xx_plane] != NULL);
16798                 dev_priv->plane_to_crtc_mapping[i9xx_plane] = crtc;
16799         }
16800
16801         intel_color_init(crtc);
16802
16803         WARN_ON(drm_crtc_index(&crtc->base) != crtc->pipe);
16804
16805         return 0;
16806
16807 fail:
16808         intel_crtc_free(crtc);
16809
16810         return ret;
16811 }
16812
16813 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
16814                                       struct drm_file *file)
16815 {
16816         struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
16817         struct drm_crtc *drmmode_crtc;
16818         struct intel_crtc *crtc;
16819
16820         drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
16821         if (!drmmode_crtc)
16822                 return -ENOENT;
16823
16824         crtc = to_intel_crtc(drmmode_crtc);
16825         pipe_from_crtc_id->pipe = crtc->pipe;
16826
16827         return 0;
16828 }
16829
16830 static u32 intel_encoder_possible_clones(struct intel_encoder *encoder)
16831 {
16832         struct drm_device *dev = encoder->base.dev;
16833         struct intel_encoder *source_encoder;
16834         u32 possible_clones = 0;
16835
16836         for_each_intel_encoder(dev, source_encoder) {
16837                 if (encoders_cloneable(encoder, source_encoder))
16838                         possible_clones |= drm_encoder_mask(&source_encoder->base);
16839         }
16840
16841         return possible_clones;
16842 }
16843
16844 static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder)
16845 {
16846         struct drm_device *dev = encoder->base.dev;
16847         struct intel_crtc *crtc;
16848         u32 possible_crtcs = 0;
16849
16850         for_each_intel_crtc(dev, crtc) {
16851                 if (encoder->pipe_mask & BIT(crtc->pipe))
16852                         possible_crtcs |= drm_crtc_mask(&crtc->base);
16853         }
16854
16855         return possible_crtcs;
16856 }
16857
16858 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
16859 {
16860         if (!IS_MOBILE(dev_priv))
16861                 return false;
16862
16863         if ((intel_de_read(dev_priv, DP_A) & DP_DETECTED) == 0)
16864                 return false;
16865
16866         if (IS_GEN(dev_priv, 5) && (intel_de_read(dev_priv, FUSE_STRAP) & ILK_eDP_A_DISABLE))
16867                 return false;
16868
16869         return true;
16870 }
16871
16872 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
16873 {
16874         if (INTEL_GEN(dev_priv) >= 9)
16875                 return false;
16876
16877         if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
16878                 return false;
16879
16880         if (HAS_PCH_LPT_H(dev_priv) &&
16881             intel_de_read(dev_priv, SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
16882                 return false;
16883
16884         /* DDI E can't be used if DDI A requires 4 lanes */
16885         if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
16886                 return false;
16887
16888         if (!dev_priv->vbt.int_crt_support)
16889                 return false;
16890
16891         return true;
16892 }
16893
16894 void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
16895 {
16896         int pps_num;
16897         int pps_idx;
16898
16899         if (HAS_DDI(dev_priv))
16900                 return;
16901         /*
16902          * This w/a is needed at least on CPT/PPT, but to be sure apply it
16903          * everywhere where registers can be write protected.
16904          */
16905         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
16906                 pps_num = 2;
16907         else
16908                 pps_num = 1;
16909
16910         for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
16911                 u32 val = intel_de_read(dev_priv, PP_CONTROL(pps_idx));
16912
16913                 val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS;
16914                 intel_de_write(dev_priv, PP_CONTROL(pps_idx), val);
16915         }
16916 }
16917
16918 static void intel_pps_init(struct drm_i915_private *dev_priv)
16919 {
16920         if (HAS_PCH_SPLIT(dev_priv) || IS_GEN9_LP(dev_priv))
16921                 dev_priv->pps_mmio_base = PCH_PPS_BASE;
16922         else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
16923                 dev_priv->pps_mmio_base = VLV_PPS_BASE;
16924         else
16925                 dev_priv->pps_mmio_base = PPS_BASE;
16926
16927         intel_pps_unlock_regs_wa(dev_priv);
16928 }
16929
16930 static void intel_setup_outputs(struct drm_i915_private *dev_priv)
16931 {
16932         struct intel_encoder *encoder;
16933         bool dpd_is_edp = false;
16934
16935         intel_pps_init(dev_priv);
16936
16937         if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv))
16938                 return;
16939
16940         if (INTEL_GEN(dev_priv) >= 12) {
16941                 intel_ddi_init(dev_priv, PORT_A);
16942                 intel_ddi_init(dev_priv, PORT_B);
16943                 intel_ddi_init(dev_priv, PORT_D);
16944                 intel_ddi_init(dev_priv, PORT_E);
16945                 intel_ddi_init(dev_priv, PORT_F);
16946                 intel_ddi_init(dev_priv, PORT_G);
16947                 intel_ddi_init(dev_priv, PORT_H);
16948                 intel_ddi_init(dev_priv, PORT_I);
16949                 icl_dsi_init(dev_priv);
16950         } else if (IS_ELKHARTLAKE(dev_priv)) {
16951                 intel_ddi_init(dev_priv, PORT_A);
16952                 intel_ddi_init(dev_priv, PORT_B);
16953                 intel_ddi_init(dev_priv, PORT_C);
16954                 intel_ddi_init(dev_priv, PORT_D);
16955                 icl_dsi_init(dev_priv);
16956         } else if (IS_GEN(dev_priv, 11)) {
16957                 intel_ddi_init(dev_priv, PORT_A);
16958                 intel_ddi_init(dev_priv, PORT_B);
16959                 intel_ddi_init(dev_priv, PORT_C);
16960                 intel_ddi_init(dev_priv, PORT_D);
16961                 intel_ddi_init(dev_priv, PORT_E);
16962                 /*
16963                  * On some ICL SKUs port F is not present. No strap bits for
16964                  * this, so rely on VBT.
16965                  * Work around broken VBTs on SKUs known to have no port F.
16966                  */
16967                 if (IS_ICL_WITH_PORT_F(dev_priv) &&
16968                     intel_bios_is_port_present(dev_priv, PORT_F))
16969                         intel_ddi_init(dev_priv, PORT_F);
16970
16971                 icl_dsi_init(dev_priv);
16972         } else if (IS_GEN9_LP(dev_priv)) {
16973                 /*
16974                  * FIXME: Broxton doesn't support port detection via the
16975                  * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
16976                  * detect the ports.
16977                  */
16978                 intel_ddi_init(dev_priv, PORT_A);
16979                 intel_ddi_init(dev_priv, PORT_B);
16980                 intel_ddi_init(dev_priv, PORT_C);
16981
16982                 vlv_dsi_init(dev_priv);
16983         } else if (HAS_DDI(dev_priv)) {
16984                 int found;
16985
16986                 if (intel_ddi_crt_present(dev_priv))
16987                         intel_crt_init(dev_priv);
16988
16989                 /*
16990                  * Haswell uses DDI functions to detect digital outputs.
16991                  * On SKL pre-D0 the strap isn't connected, so we assume
16992                  * it's there.
16993                  */
16994                 found = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
16995                 /* WaIgnoreDDIAStrap: skl */
16996                 if (found || IS_GEN9_BC(dev_priv))
16997                         intel_ddi_init(dev_priv, PORT_A);
16998
16999                 /* DDI B, C, D, and F detection is indicated by the SFUSE_STRAP
17000                  * register */
17001                 found = intel_de_read(dev_priv, SFUSE_STRAP);
17002
17003                 if (found & SFUSE_STRAP_DDIB_DETECTED)
17004                         intel_ddi_init(dev_priv, PORT_B);
17005                 if (found & SFUSE_STRAP_DDIC_DETECTED)
17006                         intel_ddi_init(dev_priv, PORT_C);
17007                 if (found & SFUSE_STRAP_DDID_DETECTED)
17008                         intel_ddi_init(dev_priv, PORT_D);
17009                 if (found & SFUSE_STRAP_DDIF_DETECTED)
17010                         intel_ddi_init(dev_priv, PORT_F);
17011                 /*
17012                  * On SKL we don't have a way to detect DDI-E so we rely on VBT.
17013                  */
17014                 if (IS_GEN9_BC(dev_priv) &&
17015                     intel_bios_is_port_present(dev_priv, PORT_E))
17016                         intel_ddi_init(dev_priv, PORT_E);
17017
17018         } else if (HAS_PCH_SPLIT(dev_priv)) {
17019                 int found;
17020
17021                 /*
17022                  * intel_edp_init_connector() depends on this completing first,
17023                  * to prevent the registration of both eDP and LVDS and the
17024                  * incorrect sharing of the PPS.
17025                  */
17026                 intel_lvds_init(dev_priv);
17027                 intel_crt_init(dev_priv);
17028
17029                 dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
17030
17031                 if (ilk_has_edp_a(dev_priv))
17032                         intel_dp_init(dev_priv, DP_A, PORT_A);
17033
17034                 if (intel_de_read(dev_priv, PCH_HDMIB) & SDVO_DETECTED) {
17035                         /* PCH SDVOB multiplex with HDMIB */
17036                         found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
17037                         if (!found)
17038                                 intel_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
17039                         if (!found && (intel_de_read(dev_priv, PCH_DP_B) & DP_DETECTED))
17040                                 intel_dp_init(dev_priv, PCH_DP_B, PORT_B);
17041                 }
17042
17043                 if (intel_de_read(dev_priv, PCH_HDMIC) & SDVO_DETECTED)
17044                         intel_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
17045
17046                 if (!dpd_is_edp && intel_de_read(dev_priv, PCH_HDMID) & SDVO_DETECTED)
17047                         intel_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
17048
17049                 if (intel_de_read(dev_priv, PCH_DP_C) & DP_DETECTED)
17050                         intel_dp_init(dev_priv, PCH_DP_C, PORT_C);
17051
17052                 if (intel_de_read(dev_priv, PCH_DP_D) & DP_DETECTED)
17053                         intel_dp_init(dev_priv, PCH_DP_D, PORT_D);
17054         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
17055                 bool has_edp, has_port;
17056
17057                 if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support)
17058                         intel_crt_init(dev_priv);
17059
17060                 /*
17061                  * The DP_DETECTED bit is the latched state of the DDC
17062                  * SDA pin at boot. However since eDP doesn't require DDC
17063                  * (no way to plug in a DP->HDMI dongle) the DDC pins for
17064                  * eDP ports may have been muxed to an alternate function.
17065                  * Thus we can't rely on the DP_DETECTED bit alone to detect
17066                  * eDP ports. Consult the VBT as well as DP_DETECTED to
17067                  * detect eDP ports.
17068                  *
17069                  * Sadly the straps seem to be missing sometimes even for HDMI
17070                  * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
17071                  * and VBT for the presence of the port. Additionally we can't
17072                  * trust the port type the VBT declares as we've seen at least
17073                  * HDMI ports that the VBT claim are DP or eDP.
17074                  */
17075                 has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
17076                 has_port = intel_bios_is_port_present(dev_priv, PORT_B);
17077                 if (intel_de_read(dev_priv, VLV_DP_B) & DP_DETECTED || has_port)
17078                         has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B);
17079                 if ((intel_de_read(dev_priv, VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
17080                         intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
17081
17082                 has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
17083                 has_port = intel_bios_is_port_present(dev_priv, PORT_C);
17084                 if (intel_de_read(dev_priv, VLV_DP_C) & DP_DETECTED || has_port)
17085                         has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C);
17086                 if ((intel_de_read(dev_priv, VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
17087                         intel_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
17088
17089                 if (IS_CHERRYVIEW(dev_priv)) {
17090                         /*
17091                          * eDP not supported on port D,
17092                          * so no need to worry about it
17093                          */
17094                         has_port = intel_bios_is_port_present(dev_priv, PORT_D);
17095                         if (intel_de_read(dev_priv, CHV_DP_D) & DP_DETECTED || has_port)
17096                                 intel_dp_init(dev_priv, CHV_DP_D, PORT_D);
17097                         if (intel_de_read(dev_priv, CHV_HDMID) & SDVO_DETECTED || has_port)
17098                                 intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
17099                 }
17100
17101                 vlv_dsi_init(dev_priv);
17102         } else if (IS_PINEVIEW(dev_priv)) {
17103                 intel_lvds_init(dev_priv);
17104                 intel_crt_init(dev_priv);
17105         } else if (IS_GEN_RANGE(dev_priv, 3, 4)) {
17106                 bool found = false;
17107
17108                 if (IS_MOBILE(dev_priv))
17109                         intel_lvds_init(dev_priv);
17110
17111                 intel_crt_init(dev_priv);
17112
17113                 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
17114                         drm_dbg_kms(&dev_priv->drm, "probing SDVOB\n");
17115                         found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
17116                         if (!found && IS_G4X(dev_priv)) {
17117                                 drm_dbg_kms(&dev_priv->drm,
17118                                             "probing HDMI on SDVOB\n");
17119                                 intel_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
17120                         }
17121
17122                         if (!found && IS_G4X(dev_priv))
17123                                 intel_dp_init(dev_priv, DP_B, PORT_B);
17124                 }
17125
17126                 /* Before G4X SDVOC doesn't have its own detect register */
17127
17128                 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
17129                         drm_dbg_kms(&dev_priv->drm, "probing SDVOC\n");
17130                         found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
17131                 }
17132
17133                 if (!found && (intel_de_read(dev_priv, GEN3_SDVOC) & SDVO_DETECTED)) {
17134
17135                         if (IS_G4X(dev_priv)) {
17136                                 drm_dbg_kms(&dev_priv->drm,
17137                                             "probing HDMI on SDVOC\n");
17138                                 intel_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
17139                         }
17140                         if (IS_G4X(dev_priv))
17141                                 intel_dp_init(dev_priv, DP_C, PORT_C);
17142                 }
17143
17144                 if (IS_G4X(dev_priv) && (intel_de_read(dev_priv, DP_D) & DP_DETECTED))
17145                         intel_dp_init(dev_priv, DP_D, PORT_D);
17146
17147                 if (SUPPORTS_TV(dev_priv))
17148                         intel_tv_init(dev_priv);
17149         } else if (IS_GEN(dev_priv, 2)) {
17150                 if (IS_I85X(dev_priv))
17151                         intel_lvds_init(dev_priv);
17152
17153                 intel_crt_init(dev_priv);
17154                 intel_dvo_init(dev_priv);
17155         }
17156
17157         intel_psr_init(dev_priv);
17158
17159         for_each_intel_encoder(&dev_priv->drm, encoder) {
17160                 encoder->base.possible_crtcs =
17161                         intel_encoder_possible_crtcs(encoder);
17162                 encoder->base.possible_clones =
17163                         intel_encoder_possible_clones(encoder);
17164         }
17165
17166         intel_init_pch_refclk(dev_priv);
17167
17168         drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
17169 }
17170
17171 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
17172 {
17173         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
17174
17175         drm_framebuffer_cleanup(fb);
17176         intel_frontbuffer_put(intel_fb->frontbuffer);
17177
17178         kfree(intel_fb);
17179 }
17180
17181 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
17182                                                 struct drm_file *file,
17183                                                 unsigned int *handle)
17184 {
17185         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
17186         struct drm_i915_private *i915 = to_i915(obj->base.dev);
17187
17188         if (obj->userptr.mm) {
17189                 drm_dbg(&i915->drm,
17190                         "attempting to use a userptr for a framebuffer, denied\n");
17191                 return -EINVAL;
17192         }
17193
17194         return drm_gem_handle_create(file, &obj->base, handle);
17195 }
17196
17197 static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
17198                                         struct drm_file *file,
17199                                         unsigned flags, unsigned color,
17200                                         struct drm_clip_rect *clips,
17201                                         unsigned num_clips)
17202 {
17203         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
17204
17205         i915_gem_object_flush_if_display(obj);
17206         intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
17207
17208         return 0;
17209 }
17210
17211 static const struct drm_framebuffer_funcs intel_fb_funcs = {
17212         .destroy = intel_user_framebuffer_destroy,
17213         .create_handle = intel_user_framebuffer_create_handle,
17214         .dirty = intel_user_framebuffer_dirty,
17215 };
17216
17217 static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
17218                                   struct drm_i915_gem_object *obj,
17219                                   struct drm_mode_fb_cmd2 *mode_cmd)
17220 {
17221         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
17222         struct drm_framebuffer *fb = &intel_fb->base;
17223         u32 max_stride;
17224         unsigned int tiling, stride;
17225         int ret = -EINVAL;
17226         int i;
17227
17228         intel_fb->frontbuffer = intel_frontbuffer_get(obj);
17229         if (!intel_fb->frontbuffer)
17230                 return -ENOMEM;
17231
17232         i915_gem_object_lock(obj);
17233         tiling = i915_gem_object_get_tiling(obj);
17234         stride = i915_gem_object_get_stride(obj);
17235         i915_gem_object_unlock(obj);
17236
17237         if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
17238                 /*
17239                  * If there's a fence, enforce that
17240                  * the fb modifier and tiling mode match.
17241                  */
17242                 if (tiling != I915_TILING_NONE &&
17243                     tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
17244                         drm_dbg_kms(&dev_priv->drm,
17245                                     "tiling_mode doesn't match fb modifier\n");
17246                         goto err;
17247                 }
17248         } else {
17249                 if (tiling == I915_TILING_X) {
17250                         mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
17251                 } else if (tiling == I915_TILING_Y) {
17252                         drm_dbg_kms(&dev_priv->drm,
17253                                     "No Y tiling for legacy addfb\n");
17254                         goto err;
17255                 }
17256         }
17257
17258         if (!drm_any_plane_has_format(&dev_priv->drm,
17259                                       mode_cmd->pixel_format,
17260                                       mode_cmd->modifier[0])) {
17261                 struct drm_format_name_buf format_name;
17262
17263                 drm_dbg_kms(&dev_priv->drm,
17264                             "unsupported pixel format %s / modifier 0x%llx\n",
17265                             drm_get_format_name(mode_cmd->pixel_format,
17266                                                 &format_name),
17267                             mode_cmd->modifier[0]);
17268                 goto err;
17269         }
17270
17271         /*
17272          * gen2/3 display engine uses the fence if present,
17273          * so the tiling mode must match the fb modifier exactly.
17274          */
17275         if (INTEL_GEN(dev_priv) < 4 &&
17276             tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
17277                 drm_dbg_kms(&dev_priv->drm,
17278                             "tiling_mode must match fb modifier exactly on gen2/3\n");
17279                 goto err;
17280         }
17281
17282         max_stride = intel_fb_max_stride(dev_priv, mode_cmd->pixel_format,
17283                                          mode_cmd->modifier[0]);
17284         if (mode_cmd->pitches[0] > max_stride) {
17285                 drm_dbg_kms(&dev_priv->drm,
17286                             "%s pitch (%u) must be at most %d\n",
17287                             mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
17288                             "tiled" : "linear",
17289                             mode_cmd->pitches[0], max_stride);
17290                 goto err;
17291         }
17292
17293         /*
17294          * If there's a fence, enforce that
17295          * the fb pitch and fence stride match.
17296          */
17297         if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
17298                 drm_dbg_kms(&dev_priv->drm,
17299                             "pitch (%d) must match tiling stride (%d)\n",
17300                             mode_cmd->pitches[0], stride);
17301                 goto err;
17302         }
17303
17304         /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
17305         if (mode_cmd->offsets[0] != 0) {
17306                 drm_dbg_kms(&dev_priv->drm,
17307                             "plane 0 offset (0x%08x) must be 0\n",
17308                             mode_cmd->offsets[0]);
17309                 goto err;
17310         }
17311
17312         drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
17313
17314         for (i = 0; i < fb->format->num_planes; i++) {
17315                 u32 stride_alignment;
17316
17317                 if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
17318                         drm_dbg_kms(&dev_priv->drm, "bad plane %d handle\n",
17319                                     i);
17320                         goto err;
17321                 }
17322
17323                 stride_alignment = intel_fb_stride_alignment(fb, i);
17324                 if (fb->pitches[i] & (stride_alignment - 1)) {
17325                         drm_dbg_kms(&dev_priv->drm,
17326                                     "plane %d pitch (%d) must be at least %u byte aligned\n",
17327                                     i, fb->pitches[i], stride_alignment);
17328                         goto err;
17329                 }
17330
17331                 if (is_gen12_ccs_plane(fb, i)) {
17332                         int ccs_aux_stride = gen12_ccs_aux_stride(fb, i);
17333
17334                         if (fb->pitches[i] != ccs_aux_stride) {
17335                                 drm_dbg_kms(&dev_priv->drm,
17336                                             "ccs aux plane %d pitch (%d) must be %d\n",
17337                                             i,
17338                                             fb->pitches[i], ccs_aux_stride);
17339                                 goto err;
17340                         }
17341                 }
17342
17343                 fb->obj[i] = &obj->base;
17344         }
17345
17346         ret = intel_fill_fb_info(dev_priv, fb);
17347         if (ret)
17348                 goto err;
17349
17350         ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs);
17351         if (ret) {
17352                 drm_err(&dev_priv->drm, "framebuffer init failed %d\n", ret);
17353                 goto err;
17354         }
17355
17356         return 0;
17357
17358 err:
17359         intel_frontbuffer_put(intel_fb->frontbuffer);
17360         return ret;
17361 }
17362
17363 static struct drm_framebuffer *
17364 intel_user_framebuffer_create(struct drm_device *dev,
17365                               struct drm_file *filp,
17366                               const struct drm_mode_fb_cmd2 *user_mode_cmd)
17367 {
17368         struct drm_framebuffer *fb;
17369         struct drm_i915_gem_object *obj;
17370         struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
17371
17372         obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
17373         if (!obj)
17374                 return ERR_PTR(-ENOENT);
17375
17376         fb = intel_framebuffer_create(obj, &mode_cmd);
17377         i915_gem_object_put(obj);
17378
17379         return fb;
17380 }
17381
17382 static enum drm_mode_status
17383 intel_mode_valid(struct drm_device *dev,
17384                  const struct drm_display_mode *mode)
17385 {
17386         struct drm_i915_private *dev_priv = to_i915(dev);
17387         int hdisplay_max, htotal_max;
17388         int vdisplay_max, vtotal_max;
17389
17390         /*
17391          * Can't reject DBLSCAN here because Xorg ddxen can add piles
17392          * of DBLSCAN modes to the output's mode list when they detect
17393          * the scaling mode property on the connector. And they don't
17394          * ask the kernel to validate those modes in any way until
17395          * modeset time at which point the client gets a protocol error.
17396          * So in order to not upset those clients we silently ignore the
17397          * DBLSCAN flag on such connectors. For other connectors we will
17398          * reject modes with the DBLSCAN flag in encoder->compute_config().
17399          * And we always reject DBLSCAN modes in connector->mode_valid()
17400          * as we never want such modes on the connector's mode list.
17401          */
17402
17403         if (mode->vscan > 1)
17404                 return MODE_NO_VSCAN;
17405
17406         if (mode->flags & DRM_MODE_FLAG_HSKEW)
17407                 return MODE_H_ILLEGAL;
17408
17409         if (mode->flags & (DRM_MODE_FLAG_CSYNC |
17410                            DRM_MODE_FLAG_NCSYNC |
17411                            DRM_MODE_FLAG_PCSYNC))
17412                 return MODE_HSYNC;
17413
17414         if (mode->flags & (DRM_MODE_FLAG_BCAST |
17415                            DRM_MODE_FLAG_PIXMUX |
17416                            DRM_MODE_FLAG_CLKDIV2))
17417                 return MODE_BAD;
17418
17419         /* Transcoder timing limits */
17420         if (INTEL_GEN(dev_priv) >= 11) {
17421                 hdisplay_max = 16384;
17422                 vdisplay_max = 8192;
17423                 htotal_max = 16384;
17424                 vtotal_max = 8192;
17425         } else if (INTEL_GEN(dev_priv) >= 9 ||
17426                    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
17427                 hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
17428                 vdisplay_max = 4096;
17429                 htotal_max = 8192;
17430                 vtotal_max = 8192;
17431         } else if (INTEL_GEN(dev_priv) >= 3) {
17432                 hdisplay_max = 4096;
17433                 vdisplay_max = 4096;
17434                 htotal_max = 8192;
17435                 vtotal_max = 8192;
17436         } else {
17437                 hdisplay_max = 2048;
17438                 vdisplay_max = 2048;
17439                 htotal_max = 4096;
17440                 vtotal_max = 4096;
17441         }
17442
17443         if (mode->hdisplay > hdisplay_max ||
17444             mode->hsync_start > htotal_max ||
17445             mode->hsync_end > htotal_max ||
17446             mode->htotal > htotal_max)
17447                 return MODE_H_ILLEGAL;
17448
17449         if (mode->vdisplay > vdisplay_max ||
17450             mode->vsync_start > vtotal_max ||
17451             mode->vsync_end > vtotal_max ||
17452             mode->vtotal > vtotal_max)
17453                 return MODE_V_ILLEGAL;
17454
17455         if (INTEL_GEN(dev_priv) >= 5) {
17456                 if (mode->hdisplay < 64 ||
17457                     mode->htotal - mode->hdisplay < 32)
17458                         return MODE_H_ILLEGAL;
17459
17460                 if (mode->vtotal - mode->vdisplay < 5)
17461                         return MODE_V_ILLEGAL;
17462         } else {
17463                 if (mode->htotal - mode->hdisplay < 32)
17464                         return MODE_H_ILLEGAL;
17465
17466                 if (mode->vtotal - mode->vdisplay < 3)
17467                         return MODE_V_ILLEGAL;
17468         }
17469
17470         return MODE_OK;
17471 }
17472
17473 enum drm_mode_status
17474 intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
17475                                 const struct drm_display_mode *mode)
17476 {
17477         int plane_width_max, plane_height_max;
17478
17479         /*
17480          * intel_mode_valid() should be
17481          * sufficient on older platforms.
17482          */
17483         if (INTEL_GEN(dev_priv) < 9)
17484                 return MODE_OK;
17485
17486         /*
17487          * Most people will probably want a fullscreen
17488          * plane so let's not advertize modes that are
17489          * too big for that.
17490          */
17491         if (INTEL_GEN(dev_priv) >= 11) {
17492                 plane_width_max = 5120;
17493                 plane_height_max = 4320;
17494         } else {
17495                 plane_width_max = 5120;
17496                 plane_height_max = 4096;
17497         }
17498
17499         if (mode->hdisplay > plane_width_max)
17500                 return MODE_H_ILLEGAL;
17501
17502         if (mode->vdisplay > plane_height_max)
17503                 return MODE_V_ILLEGAL;
17504
17505         return MODE_OK;
17506 }
17507
17508 static const struct drm_mode_config_funcs intel_mode_funcs = {
17509         .fb_create = intel_user_framebuffer_create,
17510         .get_format_info = intel_get_format_info,
17511         .output_poll_changed = intel_fbdev_output_poll_changed,
17512         .mode_valid = intel_mode_valid,
17513         .atomic_check = intel_atomic_check,
17514         .atomic_commit = intel_atomic_commit,
17515         .atomic_state_alloc = intel_atomic_state_alloc,
17516         .atomic_state_clear = intel_atomic_state_clear,
17517         .atomic_state_free = intel_atomic_state_free,
17518 };
17519
17520 /**
17521  * intel_init_display_hooks - initialize the display modesetting hooks
17522  * @dev_priv: device private
17523  */
17524 void intel_init_display_hooks(struct drm_i915_private *dev_priv)
17525 {
17526         intel_init_cdclk_hooks(dev_priv);
17527
17528         if (INTEL_GEN(dev_priv) >= 9) {
17529                 dev_priv->display.get_pipe_config = hsw_get_pipe_config;
17530                 dev_priv->display.get_initial_plane_config =
17531                         skl_get_initial_plane_config;
17532                 dev_priv->display.crtc_compute_clock = hsw_crtc_compute_clock;
17533                 dev_priv->display.crtc_enable = hsw_crtc_enable;
17534                 dev_priv->display.crtc_disable = hsw_crtc_disable;
17535         } else if (HAS_DDI(dev_priv)) {
17536                 dev_priv->display.get_pipe_config = hsw_get_pipe_config;
17537                 dev_priv->display.get_initial_plane_config =
17538                         i9xx_get_initial_plane_config;
17539                 dev_priv->display.crtc_compute_clock =
17540                         hsw_crtc_compute_clock;
17541                 dev_priv->display.crtc_enable = hsw_crtc_enable;
17542                 dev_priv->display.crtc_disable = hsw_crtc_disable;
17543         } else if (HAS_PCH_SPLIT(dev_priv)) {
17544                 dev_priv->display.get_pipe_config = ilk_get_pipe_config;
17545                 dev_priv->display.get_initial_plane_config =
17546                         i9xx_get_initial_plane_config;
17547                 dev_priv->display.crtc_compute_clock =
17548                         ilk_crtc_compute_clock;
17549                 dev_priv->display.crtc_enable = ilk_crtc_enable;
17550                 dev_priv->display.crtc_disable = ilk_crtc_disable;
17551         } else if (IS_CHERRYVIEW(dev_priv)) {
17552                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
17553                 dev_priv->display.get_initial_plane_config =
17554                         i9xx_get_initial_plane_config;
17555                 dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock;
17556                 dev_priv->display.crtc_enable = valleyview_crtc_enable;
17557                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
17558         } else if (IS_VALLEYVIEW(dev_priv)) {
17559                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
17560                 dev_priv->display.get_initial_plane_config =
17561                         i9xx_get_initial_plane_config;
17562                 dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock;
17563                 dev_priv->display.crtc_enable = valleyview_crtc_enable;
17564                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
17565         } else if (IS_G4X(dev_priv)) {
17566                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
17567                 dev_priv->display.get_initial_plane_config =
17568                         i9xx_get_initial_plane_config;
17569                 dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock;
17570                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
17571                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
17572         } else if (IS_PINEVIEW(dev_priv)) {
17573                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
17574                 dev_priv->display.get_initial_plane_config =
17575                         i9xx_get_initial_plane_config;
17576                 dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
17577                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
17578                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
17579         } else if (!IS_GEN(dev_priv, 2)) {
17580                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
17581                 dev_priv->display.get_initial_plane_config =
17582                         i9xx_get_initial_plane_config;
17583                 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
17584                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
17585                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
17586         } else {
17587                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
17588                 dev_priv->display.get_initial_plane_config =
17589                         i9xx_get_initial_plane_config;
17590                 dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
17591                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
17592                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
17593         }
17594
17595         if (IS_GEN(dev_priv, 5)) {
17596                 dev_priv->display.fdi_link_train = ilk_fdi_link_train;
17597         } else if (IS_GEN(dev_priv, 6)) {
17598                 dev_priv->display.fdi_link_train = gen6_fdi_link_train;
17599         } else if (IS_IVYBRIDGE(dev_priv)) {
17600                 /* FIXME: detect B0+ stepping and use auto training */
17601                 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
17602         }
17603
17604         if (INTEL_GEN(dev_priv) >= 9)
17605                 dev_priv->display.commit_modeset_enables = skl_commit_modeset_enables;
17606         else
17607                 dev_priv->display.commit_modeset_enables = intel_commit_modeset_enables;
17608
17609 }
17610
17611 void intel_modeset_init_hw(struct drm_i915_private *i915)
17612 {
17613         struct intel_cdclk_state *cdclk_state =
17614                 to_intel_cdclk_state(i915->cdclk.obj.state);
17615
17616         intel_update_cdclk(i915);
17617         intel_dump_cdclk_config(&i915->cdclk.hw, "Current CDCLK");
17618         cdclk_state->logical = cdclk_state->actual = i915->cdclk.hw;
17619 }
17620
17621 static int sanitize_watermarks_add_affected(struct drm_atomic_state *state)
17622 {
17623         struct drm_plane *plane;
17624         struct drm_crtc *crtc;
17625
17626         drm_for_each_crtc(crtc, state->dev) {
17627                 struct drm_crtc_state *crtc_state;
17628
17629                 crtc_state = drm_atomic_get_crtc_state(state, crtc);
17630                 if (IS_ERR(crtc_state))
17631                         return PTR_ERR(crtc_state);
17632         }
17633
17634         drm_for_each_plane(plane, state->dev) {
17635                 struct drm_plane_state *plane_state;
17636
17637                 plane_state = drm_atomic_get_plane_state(state, plane);
17638                 if (IS_ERR(plane_state))
17639                         return PTR_ERR(plane_state);
17640         }
17641
17642         return 0;
17643 }
17644
17645 /*
17646  * Calculate what we think the watermarks should be for the state we've read
17647  * out of the hardware and then immediately program those watermarks so that
17648  * we ensure the hardware settings match our internal state.
17649  *
17650  * We can calculate what we think WM's should be by creating a duplicate of the
17651  * current state (which was constructed during hardware readout) and running it
17652  * through the atomic check code to calculate new watermark values in the
17653  * state object.
17654  */
17655 static void sanitize_watermarks(struct drm_i915_private *dev_priv)
17656 {
17657         struct drm_atomic_state *state;
17658         struct intel_atomic_state *intel_state;
17659         struct intel_crtc *crtc;
17660         struct intel_crtc_state *crtc_state;
17661         struct drm_modeset_acquire_ctx ctx;
17662         int ret;
17663         int i;
17664
17665         /* Only supported on platforms that use atomic watermark design */
17666         if (!dev_priv->display.optimize_watermarks)
17667                 return;
17668
17669         state = drm_atomic_state_alloc(&dev_priv->drm);
17670         if (WARN_ON(!state))
17671                 return;
17672
17673         intel_state = to_intel_atomic_state(state);
17674
17675         drm_modeset_acquire_init(&ctx, 0);
17676
17677 retry:
17678         state->acquire_ctx = &ctx;
17679
17680         /*
17681          * Hardware readout is the only time we don't want to calculate
17682          * intermediate watermarks (since we don't trust the current
17683          * watermarks).
17684          */
17685         if (!HAS_GMCH(dev_priv))
17686                 intel_state->skip_intermediate_wm = true;
17687
17688         ret = sanitize_watermarks_add_affected(state);
17689         if (ret)
17690                 goto fail;
17691
17692         ret = intel_atomic_check(&dev_priv->drm, state);
17693         if (ret)
17694                 goto fail;
17695
17696         /* Write calculated watermark values back */
17697         for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
17698                 crtc_state->wm.need_postvbl_update = true;
17699                 dev_priv->display.optimize_watermarks(intel_state, crtc);
17700
17701                 to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm;
17702         }
17703
17704 fail:
17705         if (ret == -EDEADLK) {
17706                 drm_atomic_state_clear(state);
17707                 drm_modeset_backoff(&ctx);
17708                 goto retry;
17709         }
17710
17711         /*
17712          * If we fail here, it means that the hardware appears to be
17713          * programmed in a way that shouldn't be possible, given our
17714          * understanding of watermark requirements.  This might mean a
17715          * mistake in the hardware readout code or a mistake in the
17716          * watermark calculations for a given platform.  Raise a WARN
17717          * so that this is noticeable.
17718          *
17719          * If this actually happens, we'll have to just leave the
17720          * BIOS-programmed watermarks untouched and hope for the best.
17721          */
17722         WARN(ret, "Could not determine valid watermarks for inherited state\n");
17723
17724         drm_atomic_state_put(state);
17725
17726         drm_modeset_drop_locks(&ctx);
17727         drm_modeset_acquire_fini(&ctx);
17728 }
17729
17730 static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
17731 {
17732         if (IS_GEN(dev_priv, 5)) {
17733                 u32 fdi_pll_clk =
17734                         intel_de_read(dev_priv, FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
17735
17736                 dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
17737         } else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) {
17738                 dev_priv->fdi_pll_freq = 270000;
17739         } else {
17740                 return;
17741         }
17742
17743         drm_dbg(&dev_priv->drm, "FDI PLL freq=%d\n", dev_priv->fdi_pll_freq);
17744 }
17745
17746 static int intel_initial_commit(struct drm_device *dev)
17747 {
17748         struct drm_atomic_state *state = NULL;
17749         struct drm_modeset_acquire_ctx ctx;
17750         struct intel_crtc *crtc;
17751         int ret = 0;
17752
17753         state = drm_atomic_state_alloc(dev);
17754         if (!state)
17755                 return -ENOMEM;
17756
17757         drm_modeset_acquire_init(&ctx, 0);
17758
17759 retry:
17760         state->acquire_ctx = &ctx;
17761
17762         for_each_intel_crtc(dev, crtc) {
17763                 struct intel_crtc_state *crtc_state =
17764                         intel_atomic_get_crtc_state(state, crtc);
17765
17766                 if (IS_ERR(crtc_state)) {
17767                         ret = PTR_ERR(crtc_state);
17768                         goto out;
17769                 }
17770
17771                 if (crtc_state->hw.active) {
17772                         ret = drm_atomic_add_affected_planes(state, &crtc->base);
17773                         if (ret)
17774                                 goto out;
17775
17776                         /*
17777                          * FIXME hack to force a LUT update to avoid the
17778                          * plane update forcing the pipe gamma on without
17779                          * having a proper LUT loaded. Remove once we
17780                          * have readout for pipe gamma enable.
17781                          */
17782                         crtc_state->uapi.color_mgmt_changed = true;
17783                 }
17784         }
17785
17786         ret = drm_atomic_commit(state);
17787
17788 out:
17789         if (ret == -EDEADLK) {
17790                 drm_atomic_state_clear(state);
17791                 drm_modeset_backoff(&ctx);
17792                 goto retry;
17793         }
17794
17795         drm_atomic_state_put(state);
17796
17797         drm_modeset_drop_locks(&ctx);
17798         drm_modeset_acquire_fini(&ctx);
17799
17800         return ret;
17801 }
17802
17803 static void intel_mode_config_init(struct drm_i915_private *i915)
17804 {
17805         struct drm_mode_config *mode_config = &i915->drm.mode_config;
17806
17807         drm_mode_config_init(&i915->drm);
17808         INIT_LIST_HEAD(&i915->global_obj_list);
17809
17810         mode_config->min_width = 0;
17811         mode_config->min_height = 0;
17812
17813         mode_config->preferred_depth = 24;
17814         mode_config->prefer_shadow = 1;
17815
17816         mode_config->allow_fb_modifiers = true;
17817
17818         mode_config->funcs = &intel_mode_funcs;
17819
17820         /*
17821          * Maximum framebuffer dimensions, chosen to match
17822          * the maximum render engine surface size on gen4+.
17823          */
17824         if (INTEL_GEN(i915) >= 7) {
17825                 mode_config->max_width = 16384;
17826                 mode_config->max_height = 16384;
17827         } else if (INTEL_GEN(i915) >= 4) {
17828                 mode_config->max_width = 8192;
17829                 mode_config->max_height = 8192;
17830         } else if (IS_GEN(i915, 3)) {
17831                 mode_config->max_width = 4096;
17832                 mode_config->max_height = 4096;
17833         } else {
17834                 mode_config->max_width = 2048;
17835                 mode_config->max_height = 2048;
17836         }
17837
17838         if (IS_I845G(i915) || IS_I865G(i915)) {
17839                 mode_config->cursor_width = IS_I845G(i915) ? 64 : 512;
17840                 mode_config->cursor_height = 1023;
17841         } else if (IS_GEN(i915, 2)) {
17842                 mode_config->cursor_width = 64;
17843                 mode_config->cursor_height = 64;
17844         } else {
17845                 mode_config->cursor_width = 256;
17846                 mode_config->cursor_height = 256;
17847         }
17848 }
17849
17850 static void intel_mode_config_cleanup(struct drm_i915_private *i915)
17851 {
17852         intel_atomic_global_obj_cleanup(i915);
17853         drm_mode_config_cleanup(&i915->drm);
17854 }
17855
17856 int intel_modeset_init(struct drm_i915_private *i915)
17857 {
17858         struct drm_device *dev = &i915->drm;
17859         enum pipe pipe;
17860         struct intel_crtc *crtc;
17861         int ret;
17862
17863         i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
17864         i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI |
17865                                         WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
17866
17867         intel_mode_config_init(i915);
17868
17869         ret = intel_cdclk_init(i915);
17870         if (ret)
17871                 return ret;
17872
17873         ret = intel_bw_init(i915);
17874         if (ret)
17875                 return ret;
17876
17877         init_llist_head(&i915->atomic_helper.free_list);
17878         INIT_WORK(&i915->atomic_helper.free_work,
17879                   intel_atomic_helper_free_state_worker);
17880
17881         intel_init_quirks(i915);
17882
17883         intel_fbc_init(i915);
17884
17885         intel_init_pm(i915);
17886
17887         intel_panel_sanitize_ssc(i915);
17888
17889         intel_gmbus_setup(i915);
17890
17891         drm_dbg_kms(&i915->drm, "%d display pipe%s available.\n",
17892                     INTEL_NUM_PIPES(i915),
17893                     INTEL_NUM_PIPES(i915) > 1 ? "s" : "");
17894
17895         if (HAS_DISPLAY(i915) && INTEL_DISPLAY_ENABLED(i915)) {
17896                 for_each_pipe(i915, pipe) {
17897                         ret = intel_crtc_init(i915, pipe);
17898                         if (ret) {
17899                                 intel_mode_config_cleanup(i915);
17900                                 return ret;
17901                         }
17902                 }
17903         }
17904
17905         intel_shared_dpll_init(dev);
17906         intel_update_fdi_pll_freq(i915);
17907
17908         intel_update_czclk(i915);
17909         intel_modeset_init_hw(i915);
17910
17911         intel_hdcp_component_init(i915);
17912
17913         if (i915->max_cdclk_freq == 0)
17914                 intel_update_max_cdclk(i915);
17915
17916         /* Just disable it once at startup */
17917         intel_vga_disable(i915);
17918         intel_setup_outputs(i915);
17919
17920         drm_modeset_lock_all(dev);
17921         intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
17922         drm_modeset_unlock_all(dev);
17923
17924         for_each_intel_crtc(dev, crtc) {
17925                 struct intel_initial_plane_config plane_config = {};
17926
17927                 if (!crtc->active)
17928                         continue;
17929
17930                 /*
17931                  * Note that reserving the BIOS fb up front prevents us
17932                  * from stuffing other stolen allocations like the ring
17933                  * on top.  This prevents some ugliness at boot time, and
17934                  * can even allow for smooth boot transitions if the BIOS
17935                  * fb is large enough for the active pipe configuration.
17936                  */
17937                 i915->display.get_initial_plane_config(crtc, &plane_config);
17938
17939                 /*
17940                  * If the fb is shared between multiple heads, we'll
17941                  * just get the first one.
17942                  */
17943                 intel_find_initial_plane_obj(crtc, &plane_config);
17944         }
17945
17946         /*
17947          * Make sure hardware watermarks really match the state we read out.
17948          * Note that we need to do this after reconstructing the BIOS fb's
17949          * since the watermark calculation done here will use pstate->fb.
17950          */
17951         if (!HAS_GMCH(i915))
17952                 sanitize_watermarks(i915);
17953
17954         /*
17955          * Force all active planes to recompute their states. So that on
17956          * mode_setcrtc after probe, all the intel_plane_state variables
17957          * are already calculated and there is no assert_plane warnings
17958          * during bootup.
17959          */
17960         ret = intel_initial_commit(dev);
17961         if (ret)
17962                 drm_dbg_kms(&i915->drm, "Initial commit in probe failed.\n");
17963
17964         return 0;
17965 }
17966
17967 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
17968 {
17969         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
17970         /* 640x480@60Hz, ~25175 kHz */
17971         struct dpll clock = {
17972                 .m1 = 18,
17973                 .m2 = 7,
17974                 .p1 = 13,
17975                 .p2 = 4,
17976                 .n = 2,
17977         };
17978         u32 dpll, fp;
17979         int i;
17980
17981         WARN_ON(i9xx_calc_dpll_params(48000, &clock) != 25154);
17982
17983         drm_dbg_kms(&dev_priv->drm,
17984                     "enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
17985                     pipe_name(pipe), clock.vco, clock.dot);
17986
17987         fp = i9xx_dpll_compute_fp(&clock);
17988         dpll = DPLL_DVO_2X_MODE |
17989                 DPLL_VGA_MODE_DIS |
17990                 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
17991                 PLL_P2_DIVIDE_BY_4 |
17992                 PLL_REF_INPUT_DREFCLK |
17993                 DPLL_VCO_ENABLE;
17994
17995         intel_de_write(dev_priv, FP0(pipe), fp);
17996         intel_de_write(dev_priv, FP1(pipe), fp);
17997
17998         intel_de_write(dev_priv, HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
17999         intel_de_write(dev_priv, HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
18000         intel_de_write(dev_priv, HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
18001         intel_de_write(dev_priv, VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
18002         intel_de_write(dev_priv, VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
18003         intel_de_write(dev_priv, VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
18004         intel_de_write(dev_priv, PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
18005
18006         /*
18007          * Apparently we need to have VGA mode enabled prior to changing
18008          * the P1/P2 dividers. Otherwise the DPLL will keep using the old
18009          * dividers, even though the register value does change.
18010          */
18011         intel_de_write(dev_priv, DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
18012         intel_de_write(dev_priv, DPLL(pipe), dpll);
18013
18014         /* Wait for the clocks to stabilize. */
18015         intel_de_posting_read(dev_priv, DPLL(pipe));
18016         udelay(150);
18017
18018         /* The pixel multiplier can only be updated once the
18019          * DPLL is enabled and the clocks are stable.
18020          *
18021          * So write it again.
18022          */
18023         intel_de_write(dev_priv, DPLL(pipe), dpll);
18024
18025         /* We do this three times for luck */
18026         for (i = 0; i < 3 ; i++) {
18027                 intel_de_write(dev_priv, DPLL(pipe), dpll);
18028                 intel_de_posting_read(dev_priv, DPLL(pipe));
18029                 udelay(150); /* wait for warmup */
18030         }
18031
18032         intel_de_write(dev_priv, PIPECONF(pipe),
18033                        PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
18034         intel_de_posting_read(dev_priv, PIPECONF(pipe));
18035
18036         intel_wait_for_pipe_scanline_moving(crtc);
18037 }
18038
18039 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
18040 {
18041         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
18042
18043         drm_dbg_kms(&dev_priv->drm, "disabling pipe %c due to force quirk\n",
18044                     pipe_name(pipe));
18045
18046         WARN_ON(intel_de_read(dev_priv, DSPCNTR(PLANE_A)) & DISPLAY_PLANE_ENABLE);
18047         WARN_ON(intel_de_read(dev_priv, DSPCNTR(PLANE_B)) & DISPLAY_PLANE_ENABLE);
18048         WARN_ON(intel_de_read(dev_priv, DSPCNTR(PLANE_C)) & DISPLAY_PLANE_ENABLE);
18049         WARN_ON(intel_de_read(dev_priv, CURCNTR(PIPE_A)) & MCURSOR_MODE);
18050         WARN_ON(intel_de_read(dev_priv, CURCNTR(PIPE_B)) & MCURSOR_MODE);
18051
18052         intel_de_write(dev_priv, PIPECONF(pipe), 0);
18053         intel_de_posting_read(dev_priv, PIPECONF(pipe));
18054
18055         intel_wait_for_pipe_scanline_stopped(crtc);
18056
18057         intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS);
18058         intel_de_posting_read(dev_priv, DPLL(pipe));
18059 }
18060
18061 static void
18062 intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
18063 {
18064         struct intel_crtc *crtc;
18065
18066         if (INTEL_GEN(dev_priv) >= 4)
18067                 return;
18068
18069         for_each_intel_crtc(&dev_priv->drm, crtc) {
18070                 struct intel_plane *plane =
18071                         to_intel_plane(crtc->base.primary);
18072                 struct intel_crtc *plane_crtc;
18073                 enum pipe pipe;
18074
18075                 if (!plane->get_hw_state(plane, &pipe))
18076                         continue;
18077
18078                 if (pipe == crtc->pipe)
18079                         continue;
18080
18081                 drm_dbg_kms(&dev_priv->drm,
18082                             "[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
18083                             plane->base.base.id, plane->base.name);
18084
18085                 plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
18086                 intel_plane_disable_noatomic(plane_crtc, plane);
18087         }
18088 }
18089
18090 static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
18091 {
18092         struct drm_device *dev = crtc->base.dev;
18093         struct intel_encoder *encoder;
18094
18095         for_each_encoder_on_crtc(dev, &crtc->base, encoder)
18096                 return true;
18097
18098         return false;
18099 }
18100
18101 static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
18102 {
18103         struct drm_device *dev = encoder->base.dev;
18104         struct intel_connector *connector;
18105
18106         for_each_connector_on_encoder(dev, &encoder->base, connector)
18107                 return connector;
18108
18109         return NULL;
18110 }
18111
18112 static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
18113                               enum pipe pch_transcoder)
18114 {
18115         return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
18116                 (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A);
18117 }
18118
18119 static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc_state)
18120 {
18121         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
18122         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
18123         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
18124
18125         if (INTEL_GEN(dev_priv) >= 9 ||
18126             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
18127                 i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder);
18128                 u32 val;
18129
18130                 if (transcoder_is_dsi(cpu_transcoder))
18131                         return;
18132
18133                 val = intel_de_read(dev_priv, reg);
18134                 val &= ~HSW_FRAME_START_DELAY_MASK;
18135                 val |= HSW_FRAME_START_DELAY(0);
18136                 intel_de_write(dev_priv, reg, val);
18137         } else {
18138                 i915_reg_t reg = PIPECONF(cpu_transcoder);
18139                 u32 val;
18140
18141                 val = intel_de_read(dev_priv, reg);
18142                 val &= ~PIPECONF_FRAME_START_DELAY_MASK;
18143                 val |= PIPECONF_FRAME_START_DELAY(0);
18144                 intel_de_write(dev_priv, reg, val);
18145         }
18146
18147         if (!crtc_state->has_pch_encoder)
18148                 return;
18149
18150         if (HAS_PCH_IBX(dev_priv)) {
18151                 i915_reg_t reg = PCH_TRANSCONF(crtc->pipe);
18152                 u32 val;
18153
18154                 val = intel_de_read(dev_priv, reg);
18155                 val &= ~TRANS_FRAME_START_DELAY_MASK;
18156                 val |= TRANS_FRAME_START_DELAY(0);
18157                 intel_de_write(dev_priv, reg, val);
18158         } else {
18159                 enum pipe pch_transcoder = intel_crtc_pch_transcoder(crtc);
18160                 i915_reg_t reg = TRANS_CHICKEN2(pch_transcoder);
18161                 u32 val;
18162
18163                 val = intel_de_read(dev_priv, reg);
18164                 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
18165                 val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
18166                 intel_de_write(dev_priv, reg, val);
18167         }
18168 }
18169
18170 static void intel_sanitize_crtc(struct intel_crtc *crtc,
18171                                 struct drm_modeset_acquire_ctx *ctx)
18172 {
18173         struct drm_device *dev = crtc->base.dev;
18174         struct drm_i915_private *dev_priv = to_i915(dev);
18175         struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
18176
18177         if (crtc_state->hw.active) {
18178                 struct intel_plane *plane;
18179
18180                 /* Clear any frame start delays used for debugging left by the BIOS */
18181                 intel_sanitize_frame_start_delay(crtc_state);
18182
18183                 /* Disable everything but the primary plane */
18184                 for_each_intel_plane_on_crtc(dev, crtc, plane) {
18185                         const struct intel_plane_state *plane_state =
18186                                 to_intel_plane_state(plane->base.state);
18187
18188                         if (plane_state->uapi.visible &&
18189                             plane->base.type != DRM_PLANE_TYPE_PRIMARY)
18190                                 intel_plane_disable_noatomic(crtc, plane);
18191                 }
18192
18193                 /*
18194                  * Disable any background color set by the BIOS, but enable the
18195                  * gamma and CSC to match how we program our planes.
18196                  */
18197                 if (INTEL_GEN(dev_priv) >= 9)
18198                         intel_de_write(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe),
18199                                        SKL_BOTTOM_COLOR_GAMMA_ENABLE | SKL_BOTTOM_COLOR_CSC_ENABLE);
18200         }
18201
18202         /* Adjust the state of the output pipe according to whether we
18203          * have active connectors/encoders. */
18204         if (crtc_state->hw.active && !intel_crtc_has_encoders(crtc))
18205                 intel_crtc_disable_noatomic(crtc, ctx);
18206
18207         if (crtc_state->hw.active || HAS_GMCH(dev_priv)) {
18208                 /*
18209                  * We start out with underrun reporting disabled to avoid races.
18210                  * For correct bookkeeping mark this on active crtcs.
18211                  *
18212                  * Also on gmch platforms we dont have any hardware bits to
18213                  * disable the underrun reporting. Which means we need to start
18214                  * out with underrun reporting disabled also on inactive pipes,
18215                  * since otherwise we'll complain about the garbage we read when
18216                  * e.g. coming up after runtime pm.
18217                  *
18218                  * No protection against concurrent access is required - at
18219                  * worst a fifo underrun happens which also sets this to false.
18220                  */
18221                 crtc->cpu_fifo_underrun_disabled = true;
18222                 /*
18223                  * We track the PCH trancoder underrun reporting state
18224                  * within the crtc. With crtc for pipe A housing the underrun
18225                  * reporting state for PCH transcoder A, crtc for pipe B housing
18226                  * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
18227                  * and marking underrun reporting as disabled for the non-existing
18228                  * PCH transcoders B and C would prevent enabling the south
18229                  * error interrupt (see cpt_can_enable_serr_int()).
18230                  */
18231                 if (has_pch_trancoder(dev_priv, crtc->pipe))
18232                         crtc->pch_fifo_underrun_disabled = true;
18233         }
18234 }
18235
18236 static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
18237 {
18238         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
18239
18240         /*
18241          * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
18242          * the hardware when a high res displays plugged in. DPLL P
18243          * divider is zero, and the pipe timings are bonkers. We'll
18244          * try to disable everything in that case.
18245          *
18246          * FIXME would be nice to be able to sanitize this state
18247          * without several WARNs, but for now let's take the easy
18248          * road.
18249          */
18250         return IS_GEN(dev_priv, 6) &&
18251                 crtc_state->hw.active &&
18252                 crtc_state->shared_dpll &&
18253                 crtc_state->port_clock == 0;
18254 }
18255
18256 static void intel_sanitize_encoder(struct intel_encoder *encoder)
18257 {
18258         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
18259         struct intel_connector *connector;
18260         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
18261         struct intel_crtc_state *crtc_state = crtc ?
18262                 to_intel_crtc_state(crtc->base.state) : NULL;
18263
18264         /* We need to check both for a crtc link (meaning that the
18265          * encoder is active and trying to read from a pipe) and the
18266          * pipe itself being active. */
18267         bool has_active_crtc = crtc_state &&
18268                 crtc_state->hw.active;
18269
18270         if (crtc_state && has_bogus_dpll_config(crtc_state)) {
18271                 drm_dbg_kms(&dev_priv->drm,
18272                             "BIOS has misprogrammed the hardware. Disabling pipe %c\n",
18273                             pipe_name(crtc->pipe));
18274                 has_active_crtc = false;
18275         }
18276
18277         connector = intel_encoder_find_connector(encoder);
18278         if (connector && !has_active_crtc) {
18279                 drm_dbg_kms(&dev_priv->drm,
18280                             "[ENCODER:%d:%s] has active connectors but no active pipe!\n",
18281                             encoder->base.base.id,
18282                             encoder->base.name);
18283
18284                 /* Connector is active, but has no active pipe. This is
18285                  * fallout from our resume register restoring. Disable
18286                  * the encoder manually again. */
18287                 if (crtc_state) {
18288                         struct drm_encoder *best_encoder;
18289
18290                         drm_dbg_kms(&dev_priv->drm,
18291                                     "[ENCODER:%d:%s] manually disabled\n",
18292                                     encoder->base.base.id,
18293                                     encoder->base.name);
18294
18295                         /* avoid oopsing in case the hooks consult best_encoder */
18296                         best_encoder = connector->base.state->best_encoder;
18297                         connector->base.state->best_encoder = &encoder->base;
18298
18299                         if (encoder->disable)
18300                                 encoder->disable(encoder, crtc_state,
18301                                                  connector->base.state);
18302                         if (encoder->post_disable)
18303                                 encoder->post_disable(encoder, crtc_state,
18304                                                       connector->base.state);
18305
18306                         connector->base.state->best_encoder = best_encoder;
18307                 }
18308                 encoder->base.crtc = NULL;
18309
18310                 /* Inconsistent output/port/pipe state happens presumably due to
18311                  * a bug in one of the get_hw_state functions. Or someplace else
18312                  * in our code, like the register restore mess on resume. Clamp
18313                  * things to off as a safer default. */
18314
18315                 connector->base.dpms = DRM_MODE_DPMS_OFF;
18316                 connector->base.encoder = NULL;
18317         }
18318
18319         /* notify opregion of the sanitized encoder state */
18320         intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
18321
18322         if (INTEL_GEN(dev_priv) >= 11)
18323                 icl_sanitize_encoder_pll_mapping(encoder);
18324 }
18325
18326 /* FIXME read out full plane state for all planes */
18327 static void readout_plane_state(struct drm_i915_private *dev_priv)
18328 {
18329         struct intel_plane *plane;
18330         struct intel_crtc *crtc;
18331
18332         for_each_intel_plane(&dev_priv->drm, plane) {
18333                 struct intel_plane_state *plane_state =
18334                         to_intel_plane_state(plane->base.state);
18335                 struct intel_crtc_state *crtc_state;
18336                 enum pipe pipe = PIPE_A;
18337                 bool visible;
18338
18339                 visible = plane->get_hw_state(plane, &pipe);
18340
18341                 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
18342                 crtc_state = to_intel_crtc_state(crtc->base.state);
18343
18344                 intel_set_plane_visible(crtc_state, plane_state, visible);
18345
18346                 drm_dbg_kms(&dev_priv->drm,
18347                             "[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
18348                             plane->base.base.id, plane->base.name,
18349                             enableddisabled(visible), pipe_name(pipe));
18350         }
18351
18352         for_each_intel_crtc(&dev_priv->drm, crtc) {
18353                 struct intel_crtc_state *crtc_state =
18354                         to_intel_crtc_state(crtc->base.state);
18355
18356                 fixup_active_planes(crtc_state);
18357         }
18358 }
18359
18360 static void intel_modeset_readout_hw_state(struct drm_device *dev)
18361 {
18362         struct drm_i915_private *dev_priv = to_i915(dev);
18363         struct intel_cdclk_state *cdclk_state =
18364                 to_intel_cdclk_state(dev_priv->cdclk.obj.state);
18365         enum pipe pipe;
18366         struct intel_crtc *crtc;
18367         struct intel_encoder *encoder;
18368         struct intel_connector *connector;
18369         struct drm_connector_list_iter conn_iter;
18370         int i;
18371
18372         dev_priv->active_pipes = 0;
18373
18374         for_each_intel_crtc(dev, crtc) {
18375                 struct intel_crtc_state *crtc_state =
18376                         to_intel_crtc_state(crtc->base.state);
18377
18378                 __drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi);
18379                 intel_crtc_free_hw_state(crtc_state);
18380                 intel_crtc_state_reset(crtc_state, crtc);
18381
18382                 crtc_state->hw.active = crtc_state->hw.enable =
18383                         dev_priv->display.get_pipe_config(crtc, crtc_state);
18384
18385                 crtc->base.enabled = crtc_state->hw.enable;
18386                 crtc->active = crtc_state->hw.active;
18387
18388                 if (crtc_state->hw.active)
18389                         dev_priv->active_pipes |= BIT(crtc->pipe);
18390
18391                 drm_dbg_kms(&dev_priv->drm,
18392                             "[CRTC:%d:%s] hw state readout: %s\n",
18393                             crtc->base.base.id, crtc->base.name,
18394                             enableddisabled(crtc_state->hw.active));
18395         }
18396
18397         readout_plane_state(dev_priv);
18398
18399         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
18400                 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
18401
18402                 pll->on = pll->info->funcs->get_hw_state(dev_priv, pll,
18403                                                         &pll->state.hw_state);
18404
18405                 if (IS_ELKHARTLAKE(dev_priv) && pll->on &&
18406                     pll->info->id == DPLL_ID_EHL_DPLL4) {
18407                         pll->wakeref = intel_display_power_get(dev_priv,
18408                                                                POWER_DOMAIN_DPLL_DC_OFF);
18409                 }
18410
18411                 pll->state.crtc_mask = 0;
18412                 for_each_intel_crtc(dev, crtc) {
18413                         struct intel_crtc_state *crtc_state =
18414                                 to_intel_crtc_state(crtc->base.state);
18415
18416                         if (crtc_state->hw.active &&
18417                             crtc_state->shared_dpll == pll)
18418                                 pll->state.crtc_mask |= 1 << crtc->pipe;
18419                 }
18420                 pll->active_mask = pll->state.crtc_mask;
18421
18422                 drm_dbg_kms(&dev_priv->drm,
18423                             "%s hw state readout: crtc_mask 0x%08x, on %i\n",
18424                             pll->info->name, pll->state.crtc_mask, pll->on);
18425         }
18426
18427         for_each_intel_encoder(dev, encoder) {
18428                 pipe = 0;
18429
18430                 if (encoder->get_hw_state(encoder, &pipe)) {
18431                         struct intel_crtc_state *crtc_state;
18432
18433                         crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
18434                         crtc_state = to_intel_crtc_state(crtc->base.state);
18435
18436                         encoder->base.crtc = &crtc->base;
18437                         encoder->get_config(encoder, crtc_state);
18438                 } else {
18439                         encoder->base.crtc = NULL;
18440                 }
18441
18442                 drm_dbg_kms(&dev_priv->drm,
18443                             "[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
18444                             encoder->base.base.id, encoder->base.name,
18445                             enableddisabled(encoder->base.crtc),
18446                             pipe_name(pipe));
18447         }
18448
18449         drm_connector_list_iter_begin(dev, &conn_iter);
18450         for_each_intel_connector_iter(connector, &conn_iter) {
18451                 if (connector->get_hw_state(connector)) {
18452                         struct intel_crtc_state *crtc_state;
18453                         struct intel_crtc *crtc;
18454
18455                         connector->base.dpms = DRM_MODE_DPMS_ON;
18456
18457                         encoder = intel_attached_encoder(connector);
18458                         connector->base.encoder = &encoder->base;
18459
18460                         crtc = to_intel_crtc(encoder->base.crtc);
18461                         crtc_state = crtc ? to_intel_crtc_state(crtc->base.state) : NULL;
18462
18463                         if (crtc_state && crtc_state->hw.active) {
18464                                 /*
18465                                  * This has to be done during hardware readout
18466                                  * because anything calling .crtc_disable may
18467                                  * rely on the connector_mask being accurate.
18468                                  */
18469                                 crtc_state->uapi.connector_mask |=
18470                                         drm_connector_mask(&connector->base);
18471                                 crtc_state->uapi.encoder_mask |=
18472                                         drm_encoder_mask(&encoder->base);
18473                         }
18474                 } else {
18475                         connector->base.dpms = DRM_MODE_DPMS_OFF;
18476                         connector->base.encoder = NULL;
18477                 }
18478                 drm_dbg_kms(&dev_priv->drm,
18479                             "[CONNECTOR:%d:%s] hw state readout: %s\n",
18480                             connector->base.base.id, connector->base.name,
18481                             enableddisabled(connector->base.encoder));
18482         }
18483         drm_connector_list_iter_end(&conn_iter);
18484
18485         for_each_intel_crtc(dev, crtc) {
18486                 struct intel_bw_state *bw_state =
18487                         to_intel_bw_state(dev_priv->bw_obj.state);
18488                 struct intel_crtc_state *crtc_state =
18489                         to_intel_crtc_state(crtc->base.state);
18490                 struct intel_plane *plane;
18491                 int min_cdclk = 0;
18492
18493                 if (crtc_state->hw.active) {
18494                         struct drm_display_mode *mode = &crtc_state->hw.mode;
18495
18496                         intel_mode_from_pipe_config(&crtc_state->hw.adjusted_mode,
18497                                                     crtc_state);
18498
18499                         *mode = crtc_state->hw.adjusted_mode;
18500                         mode->hdisplay = crtc_state->pipe_src_w;
18501                         mode->vdisplay = crtc_state->pipe_src_h;
18502
18503                         /*
18504                          * The initial mode needs to be set in order to keep
18505                          * the atomic core happy. It wants a valid mode if the
18506                          * crtc's enabled, so we do the above call.
18507                          *
18508                          * But we don't set all the derived state fully, hence
18509                          * set a flag to indicate that a full recalculation is
18510                          * needed on the next commit.
18511                          */
18512                         mode->private_flags = I915_MODE_FLAG_INHERITED;
18513
18514                         intel_crtc_compute_pixel_rate(crtc_state);
18515
18516                         intel_crtc_update_active_timings(crtc_state);
18517
18518                         intel_crtc_copy_hw_to_uapi_state(crtc_state);
18519                 }
18520
18521                 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
18522                         const struct intel_plane_state *plane_state =
18523                                 to_intel_plane_state(plane->base.state);
18524
18525                         /*
18526                          * FIXME don't have the fb yet, so can't
18527                          * use intel_plane_data_rate() :(
18528                          */
18529                         if (plane_state->uapi.visible)
18530                                 crtc_state->data_rate[plane->id] =
18531                                         4 * crtc_state->pixel_rate;
18532                         /*
18533                          * FIXME don't have the fb yet, so can't
18534                          * use plane->min_cdclk() :(
18535                          */
18536                         if (plane_state->uapi.visible && plane->min_cdclk) {
18537                                 if (crtc_state->double_wide ||
18538                                     INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
18539                                         crtc_state->min_cdclk[plane->id] =
18540                                                 DIV_ROUND_UP(crtc_state->pixel_rate, 2);
18541                                 else
18542                                         crtc_state->min_cdclk[plane->id] =
18543                                                 crtc_state->pixel_rate;
18544                         }
18545                         drm_dbg_kms(&dev_priv->drm,
18546                                     "[PLANE:%d:%s] min_cdclk %d kHz\n",
18547                                     plane->base.base.id, plane->base.name,
18548                                     crtc_state->min_cdclk[plane->id]);
18549                 }
18550
18551                 if (crtc_state->hw.active) {
18552                         min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
18553                         if (WARN_ON(min_cdclk < 0))
18554                                 min_cdclk = 0;
18555                 }
18556
18557                 cdclk_state->min_cdclk[crtc->pipe] = min_cdclk;
18558                 cdclk_state->min_voltage_level[crtc->pipe] =
18559                         crtc_state->min_voltage_level;
18560
18561                 intel_bw_crtc_update(bw_state, crtc_state);
18562
18563                 intel_pipe_config_sanity_check(dev_priv, crtc_state);
18564         }
18565 }
18566
18567 static void
18568 get_encoder_power_domains(struct drm_i915_private *dev_priv)
18569 {
18570         struct intel_encoder *encoder;
18571
18572         for_each_intel_encoder(&dev_priv->drm, encoder) {
18573                 struct intel_crtc_state *crtc_state;
18574
18575                 if (!encoder->get_power_domains)
18576                         continue;
18577
18578                 /*
18579                  * MST-primary and inactive encoders don't have a crtc state
18580                  * and neither of these require any power domain references.
18581                  */
18582                 if (!encoder->base.crtc)
18583                         continue;
18584
18585                 crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
18586                 encoder->get_power_domains(encoder, crtc_state);
18587         }
18588 }
18589
18590 static void intel_early_display_was(struct drm_i915_private *dev_priv)
18591 {
18592         /*
18593          * Display WA #1185 WaDisableDARBFClkGating:cnl,glk,icl,ehl,tgl
18594          * Also known as Wa_14010480278.
18595          */
18596         if (IS_GEN_RANGE(dev_priv, 10, 12) || IS_GEMINILAKE(dev_priv))
18597                 intel_de_write(dev_priv, GEN9_CLKGATE_DIS_0,
18598                                intel_de_read(dev_priv, GEN9_CLKGATE_DIS_0) | DARBF_GATING_DIS);
18599
18600         if (IS_HASWELL(dev_priv)) {
18601                 /*
18602                  * WaRsPkgCStateDisplayPMReq:hsw
18603                  * System hang if this isn't done before disabling all planes!
18604                  */
18605                 intel_de_write(dev_priv, CHICKEN_PAR1_1,
18606                                intel_de_read(dev_priv, CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
18607         }
18608 }
18609
18610 static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
18611                                        enum port port, i915_reg_t hdmi_reg)
18612 {
18613         u32 val = intel_de_read(dev_priv, hdmi_reg);
18614
18615         if (val & SDVO_ENABLE ||
18616             (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
18617                 return;
18618
18619         drm_dbg_kms(&dev_priv->drm,
18620                     "Sanitizing transcoder select for HDMI %c\n",
18621                     port_name(port));
18622
18623         val &= ~SDVO_PIPE_SEL_MASK;
18624         val |= SDVO_PIPE_SEL(PIPE_A);
18625
18626         intel_de_write(dev_priv, hdmi_reg, val);
18627 }
18628
18629 static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
18630                                      enum port port, i915_reg_t dp_reg)
18631 {
18632         u32 val = intel_de_read(dev_priv, dp_reg);
18633
18634         if (val & DP_PORT_EN ||
18635             (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
18636                 return;
18637
18638         drm_dbg_kms(&dev_priv->drm,
18639                     "Sanitizing transcoder select for DP %c\n",
18640                     port_name(port));
18641
18642         val &= ~DP_PIPE_SEL_MASK;
18643         val |= DP_PIPE_SEL(PIPE_A);
18644
18645         intel_de_write(dev_priv, dp_reg, val);
18646 }
18647
18648 static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
18649 {
18650         /*
18651          * The BIOS may select transcoder B on some of the PCH
18652          * ports even it doesn't enable the port. This would trip
18653          * assert_pch_dp_disabled() and assert_pch_hdmi_disabled().
18654          * Sanitize the transcoder select bits to prevent that. We
18655          * assume that the BIOS never actually enabled the port,
18656          * because if it did we'd actually have to toggle the port
18657          * on and back off to make the transcoder A select stick
18658          * (see. intel_dp_link_down(), intel_disable_hdmi(),
18659          * intel_disable_sdvo()).
18660          */
18661         ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B);
18662         ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C);
18663         ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D);
18664
18665         /* PCH SDVOB multiplex with HDMIB */
18666         ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB);
18667         ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC);
18668         ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID);
18669 }
18670
18671 /* Scan out the current hw modeset state,
18672  * and sanitizes it to the current state
18673  */
18674 static void
18675 intel_modeset_setup_hw_state(struct drm_device *dev,
18676                              struct drm_modeset_acquire_ctx *ctx)
18677 {
18678         struct drm_i915_private *dev_priv = to_i915(dev);
18679         struct intel_encoder *encoder;
18680         struct intel_crtc *crtc;
18681         intel_wakeref_t wakeref;
18682         int i;
18683
18684         wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
18685
18686         intel_early_display_was(dev_priv);
18687         intel_modeset_readout_hw_state(dev);
18688
18689         /* HW state is read out, now we need to sanitize this mess. */
18690
18691         /* Sanitize the TypeC port mode upfront, encoders depend on this */
18692         for_each_intel_encoder(dev, encoder) {
18693                 enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
18694
18695                 /* We need to sanitize only the MST primary port. */
18696                 if (encoder->type != INTEL_OUTPUT_DP_MST &&
18697                     intel_phy_is_tc(dev_priv, phy))
18698                         intel_tc_port_sanitize(enc_to_dig_port(encoder));
18699         }
18700
18701         get_encoder_power_domains(dev_priv);
18702
18703         if (HAS_PCH_IBX(dev_priv))
18704                 ibx_sanitize_pch_ports(dev_priv);
18705
18706         /*
18707          * intel_sanitize_plane_mapping() may need to do vblank
18708          * waits, so we need vblank interrupts restored beforehand.
18709          */
18710         for_each_intel_crtc(&dev_priv->drm, crtc) {
18711                 struct intel_crtc_state *crtc_state =
18712                         to_intel_crtc_state(crtc->base.state);
18713
18714                 drm_crtc_vblank_reset(&crtc->base);
18715
18716                 if (crtc_state->hw.active)
18717                         intel_crtc_vblank_on(crtc_state);
18718         }
18719
18720         intel_sanitize_plane_mapping(dev_priv);
18721
18722         for_each_intel_encoder(dev, encoder)
18723                 intel_sanitize_encoder(encoder);
18724
18725         for_each_intel_crtc(&dev_priv->drm, crtc) {
18726                 struct intel_crtc_state *crtc_state =
18727                         to_intel_crtc_state(crtc->base.state);
18728
18729                 intel_sanitize_crtc(crtc, ctx);
18730                 intel_dump_pipe_config(crtc_state, NULL, "[setup_hw_state]");
18731         }
18732
18733         intel_modeset_update_connector_atomic_state(dev);
18734
18735         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
18736                 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
18737
18738                 if (!pll->on || pll->active_mask)
18739                         continue;
18740
18741                 drm_dbg_kms(&dev_priv->drm,
18742                             "%s enabled but not in use, disabling\n",
18743                             pll->info->name);
18744
18745                 pll->info->funcs->disable(dev_priv, pll);
18746                 pll->on = false;
18747         }
18748
18749         if (IS_G4X(dev_priv)) {
18750                 g4x_wm_get_hw_state(dev_priv);
18751                 g4x_wm_sanitize(dev_priv);
18752         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
18753                 vlv_wm_get_hw_state(dev_priv);
18754                 vlv_wm_sanitize(dev_priv);
18755         } else if (INTEL_GEN(dev_priv) >= 9) {
18756                 skl_wm_get_hw_state(dev_priv);
18757         } else if (HAS_PCH_SPLIT(dev_priv)) {
18758                 ilk_wm_get_hw_state(dev_priv);
18759         }
18760
18761         for_each_intel_crtc(dev, crtc) {
18762                 struct intel_crtc_state *crtc_state =
18763                         to_intel_crtc_state(crtc->base.state);
18764                 u64 put_domains;
18765
18766                 put_domains = modeset_get_crtc_power_domains(crtc_state);
18767                 if (WARN_ON(put_domains))
18768                         modeset_put_power_domains(dev_priv, put_domains);
18769         }
18770
18771         intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
18772 }
18773
18774 void intel_display_resume(struct drm_device *dev)
18775 {
18776         struct drm_i915_private *dev_priv = to_i915(dev);
18777         struct drm_atomic_state *state = dev_priv->modeset_restore_state;
18778         struct drm_modeset_acquire_ctx ctx;
18779         int ret;
18780
18781         dev_priv->modeset_restore_state = NULL;
18782         if (state)
18783                 state->acquire_ctx = &ctx;
18784
18785         drm_modeset_acquire_init(&ctx, 0);
18786
18787         while (1) {
18788                 ret = drm_modeset_lock_all_ctx(dev, &ctx);
18789                 if (ret != -EDEADLK)
18790                         break;
18791
18792                 drm_modeset_backoff(&ctx);
18793         }
18794
18795         if (!ret)
18796                 ret = __intel_display_resume(dev, state, &ctx);
18797
18798         intel_enable_ipc(dev_priv);
18799         drm_modeset_drop_locks(&ctx);
18800         drm_modeset_acquire_fini(&ctx);
18801
18802         if (ret)
18803                 drm_err(&dev_priv->drm,
18804                         "Restoring old state failed with %i\n", ret);
18805         if (state)
18806                 drm_atomic_state_put(state);
18807 }
18808
18809 static void intel_hpd_poll_fini(struct drm_i915_private *i915)
18810 {
18811         struct intel_connector *connector;
18812         struct drm_connector_list_iter conn_iter;
18813
18814         /* Kill all the work that may have been queued by hpd. */
18815         drm_connector_list_iter_begin(&i915->drm, &conn_iter);
18816         for_each_intel_connector_iter(connector, &conn_iter) {
18817                 if (connector->modeset_retry_work.func)
18818                         cancel_work_sync(&connector->modeset_retry_work);
18819                 if (connector->hdcp.shim) {
18820                         cancel_delayed_work_sync(&connector->hdcp.check_work);
18821                         cancel_work_sync(&connector->hdcp.prop_work);
18822                 }
18823         }
18824         drm_connector_list_iter_end(&conn_iter);
18825 }
18826
18827 void intel_modeset_driver_remove(struct drm_i915_private *i915)
18828 {
18829         flush_workqueue(i915->flip_wq);
18830         flush_workqueue(i915->modeset_wq);
18831
18832         flush_work(&i915->atomic_helper.free_work);
18833         WARN_ON(!llist_empty(&i915->atomic_helper.free_list));
18834
18835         /*
18836          * Interrupts and polling as the first thing to avoid creating havoc.
18837          * Too much stuff here (turning of connectors, ...) would
18838          * experience fancy races otherwise.
18839          */
18840         intel_irq_uninstall(i915);
18841
18842         /*
18843          * Due to the hpd irq storm handling the hotplug work can re-arm the
18844          * poll handlers. Hence disable polling after hpd handling is shut down.
18845          */
18846         intel_hpd_poll_fini(i915);
18847
18848         /*
18849          * MST topology needs to be suspended so we don't have any calls to
18850          * fbdev after it's finalized. MST will be destroyed later as part of
18851          * drm_mode_config_cleanup()
18852          */
18853         intel_dp_mst_suspend(i915);
18854
18855         /* poll work can call into fbdev, hence clean that up afterwards */
18856         intel_fbdev_fini(i915);
18857
18858         intel_unregister_dsm_handler();
18859
18860         intel_fbc_global_disable(i915);
18861
18862         /* flush any delayed tasks or pending work */
18863         flush_scheduled_work();
18864
18865         intel_hdcp_component_fini(i915);
18866
18867         intel_mode_config_cleanup(i915);
18868
18869         intel_overlay_cleanup(i915);
18870
18871         intel_gmbus_teardown(i915);
18872
18873         destroy_workqueue(i915->flip_wq);
18874         destroy_workqueue(i915->modeset_wq);
18875
18876         intel_fbc_cleanup_cfb(i915);
18877 }
18878
18879 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
18880
18881 struct intel_display_error_state {
18882
18883         u32 power_well_driver;
18884
18885         struct intel_cursor_error_state {
18886                 u32 control;
18887                 u32 position;
18888                 u32 base;
18889                 u32 size;
18890         } cursor[I915_MAX_PIPES];
18891
18892         struct intel_pipe_error_state {
18893                 bool power_domain_on;
18894                 u32 source;
18895                 u32 stat;
18896         } pipe[I915_MAX_PIPES];
18897
18898         struct intel_plane_error_state {
18899                 u32 control;
18900                 u32 stride;
18901                 u32 size;
18902                 u32 pos;
18903                 u32 addr;
18904                 u32 surface;
18905                 u32 tile_offset;
18906         } plane[I915_MAX_PIPES];
18907
18908         struct intel_transcoder_error_state {
18909                 bool available;
18910                 bool power_domain_on;
18911                 enum transcoder cpu_transcoder;
18912
18913                 u32 conf;
18914
18915                 u32 htotal;
18916                 u32 hblank;
18917                 u32 hsync;
18918                 u32 vtotal;
18919                 u32 vblank;
18920                 u32 vsync;
18921         } transcoder[5];
18922 };
18923
18924 struct intel_display_error_state *
18925 intel_display_capture_error_state(struct drm_i915_private *dev_priv)
18926 {
18927         struct intel_display_error_state *error;
18928         int transcoders[] = {
18929                 TRANSCODER_A,
18930                 TRANSCODER_B,
18931                 TRANSCODER_C,
18932                 TRANSCODER_D,
18933                 TRANSCODER_EDP,
18934         };
18935         int i;
18936
18937         BUILD_BUG_ON(ARRAY_SIZE(transcoders) != ARRAY_SIZE(error->transcoder));
18938
18939         if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv))
18940                 return NULL;
18941
18942         error = kzalloc(sizeof(*error), GFP_ATOMIC);
18943         if (error == NULL)
18944                 return NULL;
18945
18946         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
18947                 error->power_well_driver = intel_de_read(dev_priv,
18948                                                          HSW_PWR_WELL_CTL2);
18949
18950         for_each_pipe(dev_priv, i) {
18951                 error->pipe[i].power_domain_on =
18952                         __intel_display_power_is_enabled(dev_priv,
18953                                                          POWER_DOMAIN_PIPE(i));
18954                 if (!error->pipe[i].power_domain_on)
18955                         continue;
18956
18957                 error->cursor[i].control = intel_de_read(dev_priv, CURCNTR(i));
18958                 error->cursor[i].position = intel_de_read(dev_priv, CURPOS(i));
18959                 error->cursor[i].base = intel_de_read(dev_priv, CURBASE(i));
18960
18961                 error->plane[i].control = intel_de_read(dev_priv, DSPCNTR(i));
18962                 error->plane[i].stride = intel_de_read(dev_priv, DSPSTRIDE(i));
18963                 if (INTEL_GEN(dev_priv) <= 3) {
18964                         error->plane[i].size = intel_de_read(dev_priv,
18965                                                              DSPSIZE(i));
18966                         error->plane[i].pos = intel_de_read(dev_priv,
18967                                                             DSPPOS(i));
18968                 }
18969                 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
18970                         error->plane[i].addr = intel_de_read(dev_priv,
18971                                                              DSPADDR(i));
18972                 if (INTEL_GEN(dev_priv) >= 4) {
18973                         error->plane[i].surface = intel_de_read(dev_priv,
18974                                                                 DSPSURF(i));
18975                         error->plane[i].tile_offset = intel_de_read(dev_priv,
18976                                                                     DSPTILEOFF(i));
18977                 }
18978
18979                 error->pipe[i].source = intel_de_read(dev_priv, PIPESRC(i));
18980
18981                 if (HAS_GMCH(dev_priv))
18982                         error->pipe[i].stat = intel_de_read(dev_priv,
18983                                                             PIPESTAT(i));
18984         }
18985
18986         for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
18987                 enum transcoder cpu_transcoder = transcoders[i];
18988
18989                 if (!INTEL_INFO(dev_priv)->trans_offsets[cpu_transcoder])
18990                         continue;
18991
18992                 error->transcoder[i].available = true;
18993                 error->transcoder[i].power_domain_on =
18994                         __intel_display_power_is_enabled(dev_priv,
18995                                 POWER_DOMAIN_TRANSCODER(cpu_transcoder));
18996                 if (!error->transcoder[i].power_domain_on)
18997                         continue;
18998
18999                 error->transcoder[i].cpu_transcoder = cpu_transcoder;
19000
19001                 error->transcoder[i].conf = intel_de_read(dev_priv,
19002                                                           PIPECONF(cpu_transcoder));
19003                 error->transcoder[i].htotal = intel_de_read(dev_priv,
19004                                                             HTOTAL(cpu_transcoder));
19005                 error->transcoder[i].hblank = intel_de_read(dev_priv,
19006                                                             HBLANK(cpu_transcoder));
19007                 error->transcoder[i].hsync = intel_de_read(dev_priv,
19008                                                            HSYNC(cpu_transcoder));
19009                 error->transcoder[i].vtotal = intel_de_read(dev_priv,
19010                                                             VTOTAL(cpu_transcoder));
19011                 error->transcoder[i].vblank = intel_de_read(dev_priv,
19012                                                             VBLANK(cpu_transcoder));
19013                 error->transcoder[i].vsync = intel_de_read(dev_priv,
19014                                                            VSYNC(cpu_transcoder));
19015         }
19016
19017         return error;
19018 }
19019
19020 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
19021
19022 void
19023 intel_display_print_error_state(struct drm_i915_error_state_buf *m,
19024                                 struct intel_display_error_state *error)
19025 {
19026         struct drm_i915_private *dev_priv = m->i915;
19027         int i;
19028
19029         if (!error)
19030                 return;
19031
19032         err_printf(m, "Num Pipes: %d\n", INTEL_NUM_PIPES(dev_priv));
19033         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
19034                 err_printf(m, "PWR_WELL_CTL2: %08x\n",
19035                            error->power_well_driver);
19036         for_each_pipe(dev_priv, i) {
19037                 err_printf(m, "Pipe [%d]:\n", i);
19038                 err_printf(m, "  Power: %s\n",
19039                            onoff(error->pipe[i].power_domain_on));
19040                 err_printf(m, "  SRC: %08x\n", error->pipe[i].source);
19041                 err_printf(m, "  STAT: %08x\n", error->pipe[i].stat);
19042
19043                 err_printf(m, "Plane [%d]:\n", i);
19044                 err_printf(m, "  CNTR: %08x\n", error->plane[i].control);
19045                 err_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
19046                 if (INTEL_GEN(dev_priv) <= 3) {
19047                         err_printf(m, "  SIZE: %08x\n", error->plane[i].size);
19048                         err_printf(m, "  POS: %08x\n", error->plane[i].pos);
19049                 }
19050                 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
19051                         err_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
19052                 if (INTEL_GEN(dev_priv) >= 4) {
19053                         err_printf(m, "  SURF: %08x\n", error->plane[i].surface);
19054                         err_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
19055                 }
19056
19057                 err_printf(m, "Cursor [%d]:\n", i);
19058                 err_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
19059                 err_printf(m, "  POS: %08x\n", error->cursor[i].position);
19060                 err_printf(m, "  BASE: %08x\n", error->cursor[i].base);
19061         }
19062
19063         for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
19064                 if (!error->transcoder[i].available)
19065                         continue;
19066
19067                 err_printf(m, "CPU transcoder: %s\n",
19068                            transcoder_name(error->transcoder[i].cpu_transcoder));
19069                 err_printf(m, "  Power: %s\n",
19070                            onoff(error->transcoder[i].power_domain_on));
19071                 err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
19072                 err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
19073                 err_printf(m, "  HBLANK: %08x\n", error->transcoder[i].hblank);
19074                 err_printf(m, "  HSYNC: %08x\n", error->transcoder[i].hsync);
19075                 err_printf(m, "  VTOTAL: %08x\n", error->transcoder[i].vtotal);
19076                 err_printf(m, "  VBLANK: %08x\n", error->transcoder[i].vblank);
19077                 err_printf(m, "  VSYNC: %08x\n", error->transcoder[i].vsync);
19078         }
19079 }
19080
19081 #endif