Merge tag 'drm-intel-next-2018-09-06-2' of git://anongit.freedesktop.org/drm/drm...
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / intel_display.c
1 /*
2  * Copyright © 2006-2007 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *      Eric Anholt <eric@anholt.net>
25  */
26
27 #include <linux/dmi.h>
28 #include <linux/module.h>
29 #include <linux/input.h>
30 #include <linux/i2c.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/vgaarb.h>
34 #include <drm/drm_edid.h>
35 #include <drm/drmP.h>
36 #include "intel_drv.h"
37 #include "intel_frontbuffer.h"
38 #include <drm/i915_drm.h>
39 #include "i915_drv.h"
40 #include "i915_gem_clflush.h"
41 #include "intel_dsi.h"
42 #include "i915_trace.h"
43 #include <drm/drm_atomic.h>
44 #include <drm/drm_atomic_helper.h>
45 #include <drm/drm_dp_helper.h>
46 #include <drm/drm_crtc_helper.h>
47 #include <drm/drm_plane_helper.h>
48 #include <drm/drm_rect.h>
49 #include <linux/dma_remapping.h>
50 #include <linux/reservation.h>
51
52 /* Primary plane formats for gen <= 3 */
53 static const uint32_t i8xx_primary_formats[] = {
54         DRM_FORMAT_C8,
55         DRM_FORMAT_RGB565,
56         DRM_FORMAT_XRGB1555,
57         DRM_FORMAT_XRGB8888,
58 };
59
60 /* Primary plane formats for gen >= 4 */
61 static const uint32_t i965_primary_formats[] = {
62         DRM_FORMAT_C8,
63         DRM_FORMAT_RGB565,
64         DRM_FORMAT_XRGB8888,
65         DRM_FORMAT_XBGR8888,
66         DRM_FORMAT_XRGB2101010,
67         DRM_FORMAT_XBGR2101010,
68 };
69
70 static const uint64_t i9xx_format_modifiers[] = {
71         I915_FORMAT_MOD_X_TILED,
72         DRM_FORMAT_MOD_LINEAR,
73         DRM_FORMAT_MOD_INVALID
74 };
75
76 static const uint32_t skl_primary_formats[] = {
77         DRM_FORMAT_C8,
78         DRM_FORMAT_RGB565,
79         DRM_FORMAT_XRGB8888,
80         DRM_FORMAT_XBGR8888,
81         DRM_FORMAT_ARGB8888,
82         DRM_FORMAT_ABGR8888,
83         DRM_FORMAT_XRGB2101010,
84         DRM_FORMAT_XBGR2101010,
85         DRM_FORMAT_YUYV,
86         DRM_FORMAT_YVYU,
87         DRM_FORMAT_UYVY,
88         DRM_FORMAT_VYUY,
89 };
90
91 static const uint32_t skl_pri_planar_formats[] = {
92         DRM_FORMAT_C8,
93         DRM_FORMAT_RGB565,
94         DRM_FORMAT_XRGB8888,
95         DRM_FORMAT_XBGR8888,
96         DRM_FORMAT_ARGB8888,
97         DRM_FORMAT_ABGR8888,
98         DRM_FORMAT_XRGB2101010,
99         DRM_FORMAT_XBGR2101010,
100         DRM_FORMAT_YUYV,
101         DRM_FORMAT_YVYU,
102         DRM_FORMAT_UYVY,
103         DRM_FORMAT_VYUY,
104         DRM_FORMAT_NV12,
105 };
106
107 static const uint64_t skl_format_modifiers_noccs[] = {
108         I915_FORMAT_MOD_Yf_TILED,
109         I915_FORMAT_MOD_Y_TILED,
110         I915_FORMAT_MOD_X_TILED,
111         DRM_FORMAT_MOD_LINEAR,
112         DRM_FORMAT_MOD_INVALID
113 };
114
115 static const uint64_t skl_format_modifiers_ccs[] = {
116         I915_FORMAT_MOD_Yf_TILED_CCS,
117         I915_FORMAT_MOD_Y_TILED_CCS,
118         I915_FORMAT_MOD_Yf_TILED,
119         I915_FORMAT_MOD_Y_TILED,
120         I915_FORMAT_MOD_X_TILED,
121         DRM_FORMAT_MOD_LINEAR,
122         DRM_FORMAT_MOD_INVALID
123 };
124
125 /* Cursor formats */
126 static const uint32_t intel_cursor_formats[] = {
127         DRM_FORMAT_ARGB8888,
128 };
129
130 static const uint64_t cursor_format_modifiers[] = {
131         DRM_FORMAT_MOD_LINEAR,
132         DRM_FORMAT_MOD_INVALID
133 };
134
135 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
136                                 struct intel_crtc_state *pipe_config);
137 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
138                                    struct intel_crtc_state *pipe_config);
139
140 static int intel_framebuffer_init(struct intel_framebuffer *ifb,
141                                   struct drm_i915_gem_object *obj,
142                                   struct drm_mode_fb_cmd2 *mode_cmd);
143 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc);
144 static void intel_set_pipe_timings(struct intel_crtc *intel_crtc);
145 static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc);
146 static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
147                                          struct intel_link_m_n *m_n,
148                                          struct intel_link_m_n *m2_n2);
149 static void ironlake_set_pipeconf(struct drm_crtc *crtc);
150 static void haswell_set_pipeconf(struct drm_crtc *crtc);
151 static void haswell_set_pipemisc(struct drm_crtc *crtc);
152 static void vlv_prepare_pll(struct intel_crtc *crtc,
153                             const struct intel_crtc_state *pipe_config);
154 static void chv_prepare_pll(struct intel_crtc *crtc,
155                             const struct intel_crtc_state *pipe_config);
156 static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
157 static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
158 static void intel_crtc_init_scalers(struct intel_crtc *crtc,
159                                     struct intel_crtc_state *crtc_state);
160 static void skylake_pfit_enable(struct intel_crtc *crtc);
161 static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
162 static void ironlake_pfit_enable(struct intel_crtc *crtc);
163 static void intel_modeset_setup_hw_state(struct drm_device *dev,
164                                          struct drm_modeset_acquire_ctx *ctx);
165 static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
166
167 struct intel_limit {
168         struct {
169                 int min, max;
170         } dot, vco, n, m, m1, m2, p, p1;
171
172         struct {
173                 int dot_limit;
174                 int p2_slow, p2_fast;
175         } p2;
176 };
177
178 /* returns HPLL frequency in kHz */
179 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
180 {
181         int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
182
183         /* Obtain SKU information */
184         mutex_lock(&dev_priv->sb_lock);
185         hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
186                 CCK_FUSE_HPLL_FREQ_MASK;
187         mutex_unlock(&dev_priv->sb_lock);
188
189         return vco_freq[hpll_freq] * 1000;
190 }
191
192 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
193                       const char *name, u32 reg, int ref_freq)
194 {
195         u32 val;
196         int divider;
197
198         mutex_lock(&dev_priv->sb_lock);
199         val = vlv_cck_read(dev_priv, reg);
200         mutex_unlock(&dev_priv->sb_lock);
201
202         divider = val & CCK_FREQUENCY_VALUES;
203
204         WARN((val & CCK_FREQUENCY_STATUS) !=
205              (divider << CCK_FREQUENCY_STATUS_SHIFT),
206              "%s change in progress\n", name);
207
208         return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
209 }
210
211 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
212                            const char *name, u32 reg)
213 {
214         if (dev_priv->hpll_freq == 0)
215                 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
216
217         return vlv_get_cck_clock(dev_priv, name, reg,
218                                  dev_priv->hpll_freq);
219 }
220
221 static void intel_update_czclk(struct drm_i915_private *dev_priv)
222 {
223         if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
224                 return;
225
226         dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
227                                                       CCK_CZ_CLOCK_CONTROL);
228
229         DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
230 }
231
232 static inline u32 /* units of 100MHz */
233 intel_fdi_link_freq(struct drm_i915_private *dev_priv,
234                     const struct intel_crtc_state *pipe_config)
235 {
236         if (HAS_DDI(dev_priv))
237                 return pipe_config->port_clock; /* SPLL */
238         else
239                 return dev_priv->fdi_pll_freq;
240 }
241
242 static const struct intel_limit intel_limits_i8xx_dac = {
243         .dot = { .min = 25000, .max = 350000 },
244         .vco = { .min = 908000, .max = 1512000 },
245         .n = { .min = 2, .max = 16 },
246         .m = { .min = 96, .max = 140 },
247         .m1 = { .min = 18, .max = 26 },
248         .m2 = { .min = 6, .max = 16 },
249         .p = { .min = 4, .max = 128 },
250         .p1 = { .min = 2, .max = 33 },
251         .p2 = { .dot_limit = 165000,
252                 .p2_slow = 4, .p2_fast = 2 },
253 };
254
255 static const struct intel_limit intel_limits_i8xx_dvo = {
256         .dot = { .min = 25000, .max = 350000 },
257         .vco = { .min = 908000, .max = 1512000 },
258         .n = { .min = 2, .max = 16 },
259         .m = { .min = 96, .max = 140 },
260         .m1 = { .min = 18, .max = 26 },
261         .m2 = { .min = 6, .max = 16 },
262         .p = { .min = 4, .max = 128 },
263         .p1 = { .min = 2, .max = 33 },
264         .p2 = { .dot_limit = 165000,
265                 .p2_slow = 4, .p2_fast = 4 },
266 };
267
268 static const struct intel_limit intel_limits_i8xx_lvds = {
269         .dot = { .min = 25000, .max = 350000 },
270         .vco = { .min = 908000, .max = 1512000 },
271         .n = { .min = 2, .max = 16 },
272         .m = { .min = 96, .max = 140 },
273         .m1 = { .min = 18, .max = 26 },
274         .m2 = { .min = 6, .max = 16 },
275         .p = { .min = 4, .max = 128 },
276         .p1 = { .min = 1, .max = 6 },
277         .p2 = { .dot_limit = 165000,
278                 .p2_slow = 14, .p2_fast = 7 },
279 };
280
281 static const struct intel_limit intel_limits_i9xx_sdvo = {
282         .dot = { .min = 20000, .max = 400000 },
283         .vco = { .min = 1400000, .max = 2800000 },
284         .n = { .min = 1, .max = 6 },
285         .m = { .min = 70, .max = 120 },
286         .m1 = { .min = 8, .max = 18 },
287         .m2 = { .min = 3, .max = 7 },
288         .p = { .min = 5, .max = 80 },
289         .p1 = { .min = 1, .max = 8 },
290         .p2 = { .dot_limit = 200000,
291                 .p2_slow = 10, .p2_fast = 5 },
292 };
293
294 static const struct intel_limit intel_limits_i9xx_lvds = {
295         .dot = { .min = 20000, .max = 400000 },
296         .vco = { .min = 1400000, .max = 2800000 },
297         .n = { .min = 1, .max = 6 },
298         .m = { .min = 70, .max = 120 },
299         .m1 = { .min = 8, .max = 18 },
300         .m2 = { .min = 3, .max = 7 },
301         .p = { .min = 7, .max = 98 },
302         .p1 = { .min = 1, .max = 8 },
303         .p2 = { .dot_limit = 112000,
304                 .p2_slow = 14, .p2_fast = 7 },
305 };
306
307
308 static const struct intel_limit intel_limits_g4x_sdvo = {
309         .dot = { .min = 25000, .max = 270000 },
310         .vco = { .min = 1750000, .max = 3500000},
311         .n = { .min = 1, .max = 4 },
312         .m = { .min = 104, .max = 138 },
313         .m1 = { .min = 17, .max = 23 },
314         .m2 = { .min = 5, .max = 11 },
315         .p = { .min = 10, .max = 30 },
316         .p1 = { .min = 1, .max = 3},
317         .p2 = { .dot_limit = 270000,
318                 .p2_slow = 10,
319                 .p2_fast = 10
320         },
321 };
322
323 static const struct intel_limit intel_limits_g4x_hdmi = {
324         .dot = { .min = 22000, .max = 400000 },
325         .vco = { .min = 1750000, .max = 3500000},
326         .n = { .min = 1, .max = 4 },
327         .m = { .min = 104, .max = 138 },
328         .m1 = { .min = 16, .max = 23 },
329         .m2 = { .min = 5, .max = 11 },
330         .p = { .min = 5, .max = 80 },
331         .p1 = { .min = 1, .max = 8},
332         .p2 = { .dot_limit = 165000,
333                 .p2_slow = 10, .p2_fast = 5 },
334 };
335
336 static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
337         .dot = { .min = 20000, .max = 115000 },
338         .vco = { .min = 1750000, .max = 3500000 },
339         .n = { .min = 1, .max = 3 },
340         .m = { .min = 104, .max = 138 },
341         .m1 = { .min = 17, .max = 23 },
342         .m2 = { .min = 5, .max = 11 },
343         .p = { .min = 28, .max = 112 },
344         .p1 = { .min = 2, .max = 8 },
345         .p2 = { .dot_limit = 0,
346                 .p2_slow = 14, .p2_fast = 14
347         },
348 };
349
350 static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
351         .dot = { .min = 80000, .max = 224000 },
352         .vco = { .min = 1750000, .max = 3500000 },
353         .n = { .min = 1, .max = 3 },
354         .m = { .min = 104, .max = 138 },
355         .m1 = { .min = 17, .max = 23 },
356         .m2 = { .min = 5, .max = 11 },
357         .p = { .min = 14, .max = 42 },
358         .p1 = { .min = 2, .max = 6 },
359         .p2 = { .dot_limit = 0,
360                 .p2_slow = 7, .p2_fast = 7
361         },
362 };
363
364 static const struct intel_limit intel_limits_pineview_sdvo = {
365         .dot = { .min = 20000, .max = 400000},
366         .vco = { .min = 1700000, .max = 3500000 },
367         /* Pineview's Ncounter is a ring counter */
368         .n = { .min = 3, .max = 6 },
369         .m = { .min = 2, .max = 256 },
370         /* Pineview only has one combined m divider, which we treat as m2. */
371         .m1 = { .min = 0, .max = 0 },
372         .m2 = { .min = 0, .max = 254 },
373         .p = { .min = 5, .max = 80 },
374         .p1 = { .min = 1, .max = 8 },
375         .p2 = { .dot_limit = 200000,
376                 .p2_slow = 10, .p2_fast = 5 },
377 };
378
379 static const struct intel_limit intel_limits_pineview_lvds = {
380         .dot = { .min = 20000, .max = 400000 },
381         .vco = { .min = 1700000, .max = 3500000 },
382         .n = { .min = 3, .max = 6 },
383         .m = { .min = 2, .max = 256 },
384         .m1 = { .min = 0, .max = 0 },
385         .m2 = { .min = 0, .max = 254 },
386         .p = { .min = 7, .max = 112 },
387         .p1 = { .min = 1, .max = 8 },
388         .p2 = { .dot_limit = 112000,
389                 .p2_slow = 14, .p2_fast = 14 },
390 };
391
392 /* Ironlake / Sandybridge
393  *
394  * We calculate clock using (register_value + 2) for N/M1/M2, so here
395  * the range value for them is (actual_value - 2).
396  */
397 static const struct intel_limit intel_limits_ironlake_dac = {
398         .dot = { .min = 25000, .max = 350000 },
399         .vco = { .min = 1760000, .max = 3510000 },
400         .n = { .min = 1, .max = 5 },
401         .m = { .min = 79, .max = 127 },
402         .m1 = { .min = 12, .max = 22 },
403         .m2 = { .min = 5, .max = 9 },
404         .p = { .min = 5, .max = 80 },
405         .p1 = { .min = 1, .max = 8 },
406         .p2 = { .dot_limit = 225000,
407                 .p2_slow = 10, .p2_fast = 5 },
408 };
409
410 static const struct intel_limit intel_limits_ironlake_single_lvds = {
411         .dot = { .min = 25000, .max = 350000 },
412         .vco = { .min = 1760000, .max = 3510000 },
413         .n = { .min = 1, .max = 3 },
414         .m = { .min = 79, .max = 118 },
415         .m1 = { .min = 12, .max = 22 },
416         .m2 = { .min = 5, .max = 9 },
417         .p = { .min = 28, .max = 112 },
418         .p1 = { .min = 2, .max = 8 },
419         .p2 = { .dot_limit = 225000,
420                 .p2_slow = 14, .p2_fast = 14 },
421 };
422
423 static const struct intel_limit intel_limits_ironlake_dual_lvds = {
424         .dot = { .min = 25000, .max = 350000 },
425         .vco = { .min = 1760000, .max = 3510000 },
426         .n = { .min = 1, .max = 3 },
427         .m = { .min = 79, .max = 127 },
428         .m1 = { .min = 12, .max = 22 },
429         .m2 = { .min = 5, .max = 9 },
430         .p = { .min = 14, .max = 56 },
431         .p1 = { .min = 2, .max = 8 },
432         .p2 = { .dot_limit = 225000,
433                 .p2_slow = 7, .p2_fast = 7 },
434 };
435
436 /* LVDS 100mhz refclk limits. */
437 static const struct intel_limit intel_limits_ironlake_single_lvds_100m = {
438         .dot = { .min = 25000, .max = 350000 },
439         .vco = { .min = 1760000, .max = 3510000 },
440         .n = { .min = 1, .max = 2 },
441         .m = { .min = 79, .max = 126 },
442         .m1 = { .min = 12, .max = 22 },
443         .m2 = { .min = 5, .max = 9 },
444         .p = { .min = 28, .max = 112 },
445         .p1 = { .min = 2, .max = 8 },
446         .p2 = { .dot_limit = 225000,
447                 .p2_slow = 14, .p2_fast = 14 },
448 };
449
450 static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = {
451         .dot = { .min = 25000, .max = 350000 },
452         .vco = { .min = 1760000, .max = 3510000 },
453         .n = { .min = 1, .max = 3 },
454         .m = { .min = 79, .max = 126 },
455         .m1 = { .min = 12, .max = 22 },
456         .m2 = { .min = 5, .max = 9 },
457         .p = { .min = 14, .max = 42 },
458         .p1 = { .min = 2, .max = 6 },
459         .p2 = { .dot_limit = 225000,
460                 .p2_slow = 7, .p2_fast = 7 },
461 };
462
463 static const struct intel_limit intel_limits_vlv = {
464          /*
465           * These are the data rate limits (measured in fast clocks)
466           * since those are the strictest limits we have. The fast
467           * clock and actual rate limits are more relaxed, so checking
468           * them would make no difference.
469           */
470         .dot = { .min = 25000 * 5, .max = 270000 * 5 },
471         .vco = { .min = 4000000, .max = 6000000 },
472         .n = { .min = 1, .max = 7 },
473         .m1 = { .min = 2, .max = 3 },
474         .m2 = { .min = 11, .max = 156 },
475         .p1 = { .min = 2, .max = 3 },
476         .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
477 };
478
479 static const struct intel_limit intel_limits_chv = {
480         /*
481          * These are the data rate limits (measured in fast clocks)
482          * since those are the strictest limits we have.  The fast
483          * clock and actual rate limits are more relaxed, so checking
484          * them would make no difference.
485          */
486         .dot = { .min = 25000 * 5, .max = 540000 * 5},
487         .vco = { .min = 4800000, .max = 6480000 },
488         .n = { .min = 1, .max = 1 },
489         .m1 = { .min = 2, .max = 2 },
490         .m2 = { .min = 24 << 22, .max = 175 << 22 },
491         .p1 = { .min = 2, .max = 4 },
492         .p2 = { .p2_slow = 1, .p2_fast = 14 },
493 };
494
495 static const struct intel_limit intel_limits_bxt = {
496         /* FIXME: find real dot limits */
497         .dot = { .min = 0, .max = INT_MAX },
498         .vco = { .min = 4800000, .max = 6700000 },
499         .n = { .min = 1, .max = 1 },
500         .m1 = { .min = 2, .max = 2 },
501         /* FIXME: find real m2 limits */
502         .m2 = { .min = 2 << 22, .max = 255 << 22 },
503         .p1 = { .min = 2, .max = 4 },
504         .p2 = { .p2_slow = 1, .p2_fast = 20 },
505 };
506
507 static void
508 skl_wa_528(struct drm_i915_private *dev_priv, int pipe, bool enable)
509 {
510         if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
511                 return;
512
513         if (enable)
514                 I915_WRITE(CHICKEN_PIPESL_1(pipe), HSW_FBCQ_DIS);
515         else
516                 I915_WRITE(CHICKEN_PIPESL_1(pipe), 0);
517 }
518
519 static void
520 skl_wa_clkgate(struct drm_i915_private *dev_priv, int pipe, bool enable)
521 {
522         if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
523                 return;
524
525         if (enable)
526                 I915_WRITE(CLKGATE_DIS_PSL(pipe),
527                            DUPS1_GATING_DIS | DUPS2_GATING_DIS);
528         else
529                 I915_WRITE(CLKGATE_DIS_PSL(pipe),
530                            I915_READ(CLKGATE_DIS_PSL(pipe)) &
531                            ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
532 }
533
534 static bool
535 needs_modeset(const struct drm_crtc_state *state)
536 {
537         return drm_atomic_crtc_needs_modeset(state);
538 }
539
540 /*
541  * Platform specific helpers to calculate the port PLL loopback- (clock.m),
542  * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
543  * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
544  * The helpers' return value is the rate of the clock that is fed to the
545  * display engine's pipe which can be the above fast dot clock rate or a
546  * divided-down version of it.
547  */
548 /* m1 is reserved as 0 in Pineview, n is a ring counter */
549 static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
550 {
551         clock->m = clock->m2 + 2;
552         clock->p = clock->p1 * clock->p2;
553         if (WARN_ON(clock->n == 0 || clock->p == 0))
554                 return 0;
555         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
556         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
557
558         return clock->dot;
559 }
560
561 static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
562 {
563         return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
564 }
565
566 static int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
567 {
568         clock->m = i9xx_dpll_compute_m(clock);
569         clock->p = clock->p1 * clock->p2;
570         if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
571                 return 0;
572         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
573         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
574
575         return clock->dot;
576 }
577
578 static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
579 {
580         clock->m = clock->m1 * clock->m2;
581         clock->p = clock->p1 * clock->p2;
582         if (WARN_ON(clock->n == 0 || clock->p == 0))
583                 return 0;
584         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
585         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
586
587         return clock->dot / 5;
588 }
589
590 int chv_calc_dpll_params(int refclk, struct dpll *clock)
591 {
592         clock->m = clock->m1 * clock->m2;
593         clock->p = clock->p1 * clock->p2;
594         if (WARN_ON(clock->n == 0 || clock->p == 0))
595                 return 0;
596         clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m,
597                         clock->n << 22);
598         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
599
600         return clock->dot / 5;
601 }
602
603 #define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
604
605 /*
606  * Returns whether the given set of divisors are valid for a given refclk with
607  * the given connectors.
608  */
609 static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv,
610                                const struct intel_limit *limit,
611                                const struct dpll *clock)
612 {
613         if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
614                 INTELPllInvalid("n out of range\n");
615         if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
616                 INTELPllInvalid("p1 out of range\n");
617         if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
618                 INTELPllInvalid("m2 out of range\n");
619         if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
620                 INTELPllInvalid("m1 out of range\n");
621
622         if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
623             !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv))
624                 if (clock->m1 <= clock->m2)
625                         INTELPllInvalid("m1 <= m2\n");
626
627         if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
628             !IS_GEN9_LP(dev_priv)) {
629                 if (clock->p < limit->p.min || limit->p.max < clock->p)
630                         INTELPllInvalid("p out of range\n");
631                 if (clock->m < limit->m.min || limit->m.max < clock->m)
632                         INTELPllInvalid("m out of range\n");
633         }
634
635         if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
636                 INTELPllInvalid("vco out of range\n");
637         /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
638          * connector, etc., rather than just a single range.
639          */
640         if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
641                 INTELPllInvalid("dot out of range\n");
642
643         return true;
644 }
645
646 static int
647 i9xx_select_p2_div(const struct intel_limit *limit,
648                    const struct intel_crtc_state *crtc_state,
649                    int target)
650 {
651         struct drm_device *dev = crtc_state->base.crtc->dev;
652
653         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
654                 /*
655                  * For LVDS just rely on its current settings for dual-channel.
656                  * We haven't figured out how to reliably set up different
657                  * single/dual channel state, if we even can.
658                  */
659                 if (intel_is_dual_link_lvds(dev))
660                         return limit->p2.p2_fast;
661                 else
662                         return limit->p2.p2_slow;
663         } else {
664                 if (target < limit->p2.dot_limit)
665                         return limit->p2.p2_slow;
666                 else
667                         return limit->p2.p2_fast;
668         }
669 }
670
671 /*
672  * Returns a set of divisors for the desired target clock with the given
673  * refclk, or FALSE.  The returned values represent the clock equation:
674  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
675  *
676  * Target and reference clocks are specified in kHz.
677  *
678  * If match_clock is provided, then best_clock P divider must match the P
679  * divider from @match_clock used for LVDS downclocking.
680  */
681 static bool
682 i9xx_find_best_dpll(const struct intel_limit *limit,
683                     struct intel_crtc_state *crtc_state,
684                     int target, int refclk, struct dpll *match_clock,
685                     struct dpll *best_clock)
686 {
687         struct drm_device *dev = crtc_state->base.crtc->dev;
688         struct dpll clock;
689         int err = target;
690
691         memset(best_clock, 0, sizeof(*best_clock));
692
693         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
694
695         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
696              clock.m1++) {
697                 for (clock.m2 = limit->m2.min;
698                      clock.m2 <= limit->m2.max; clock.m2++) {
699                         if (clock.m2 >= clock.m1)
700                                 break;
701                         for (clock.n = limit->n.min;
702                              clock.n <= limit->n.max; clock.n++) {
703                                 for (clock.p1 = limit->p1.min;
704                                         clock.p1 <= limit->p1.max; clock.p1++) {
705                                         int this_err;
706
707                                         i9xx_calc_dpll_params(refclk, &clock);
708                                         if (!intel_PLL_is_valid(to_i915(dev),
709                                                                 limit,
710                                                                 &clock))
711                                                 continue;
712                                         if (match_clock &&
713                                             clock.p != match_clock->p)
714                                                 continue;
715
716                                         this_err = abs(clock.dot - target);
717                                         if (this_err < err) {
718                                                 *best_clock = clock;
719                                                 err = this_err;
720                                         }
721                                 }
722                         }
723                 }
724         }
725
726         return (err != target);
727 }
728
729 /*
730  * Returns a set of divisors for the desired target clock with the given
731  * refclk, or FALSE.  The returned values represent the clock equation:
732  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
733  *
734  * Target and reference clocks are specified in kHz.
735  *
736  * If match_clock is provided, then best_clock P divider must match the P
737  * divider from @match_clock used for LVDS downclocking.
738  */
739 static bool
740 pnv_find_best_dpll(const struct intel_limit *limit,
741                    struct intel_crtc_state *crtc_state,
742                    int target, int refclk, struct dpll *match_clock,
743                    struct dpll *best_clock)
744 {
745         struct drm_device *dev = crtc_state->base.crtc->dev;
746         struct dpll clock;
747         int err = target;
748
749         memset(best_clock, 0, sizeof(*best_clock));
750
751         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
752
753         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
754              clock.m1++) {
755                 for (clock.m2 = limit->m2.min;
756                      clock.m2 <= limit->m2.max; clock.m2++) {
757                         for (clock.n = limit->n.min;
758                              clock.n <= limit->n.max; clock.n++) {
759                                 for (clock.p1 = limit->p1.min;
760                                         clock.p1 <= limit->p1.max; clock.p1++) {
761                                         int this_err;
762
763                                         pnv_calc_dpll_params(refclk, &clock);
764                                         if (!intel_PLL_is_valid(to_i915(dev),
765                                                                 limit,
766                                                                 &clock))
767                                                 continue;
768                                         if (match_clock &&
769                                             clock.p != match_clock->p)
770                                                 continue;
771
772                                         this_err = abs(clock.dot - target);
773                                         if (this_err < err) {
774                                                 *best_clock = clock;
775                                                 err = this_err;
776                                         }
777                                 }
778                         }
779                 }
780         }
781
782         return (err != target);
783 }
784
785 /*
786  * Returns a set of divisors for the desired target clock with the given
787  * refclk, or FALSE.  The returned values represent the clock equation:
788  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
789  *
790  * Target and reference clocks are specified in kHz.
791  *
792  * If match_clock is provided, then best_clock P divider must match the P
793  * divider from @match_clock used for LVDS downclocking.
794  */
795 static bool
796 g4x_find_best_dpll(const struct intel_limit *limit,
797                    struct intel_crtc_state *crtc_state,
798                    int target, int refclk, struct dpll *match_clock,
799                    struct dpll *best_clock)
800 {
801         struct drm_device *dev = crtc_state->base.crtc->dev;
802         struct dpll clock;
803         int max_n;
804         bool found = false;
805         /* approximately equals target * 0.00585 */
806         int err_most = (target >> 8) + (target >> 9);
807
808         memset(best_clock, 0, sizeof(*best_clock));
809
810         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
811
812         max_n = limit->n.max;
813         /* based on hardware requirement, prefer smaller n to precision */
814         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
815                 /* based on hardware requirement, prefere larger m1,m2 */
816                 for (clock.m1 = limit->m1.max;
817                      clock.m1 >= limit->m1.min; clock.m1--) {
818                         for (clock.m2 = limit->m2.max;
819                              clock.m2 >= limit->m2.min; clock.m2--) {
820                                 for (clock.p1 = limit->p1.max;
821                                      clock.p1 >= limit->p1.min; clock.p1--) {
822                                         int this_err;
823
824                                         i9xx_calc_dpll_params(refclk, &clock);
825                                         if (!intel_PLL_is_valid(to_i915(dev),
826                                                                 limit,
827                                                                 &clock))
828                                                 continue;
829
830                                         this_err = abs(clock.dot - target);
831                                         if (this_err < err_most) {
832                                                 *best_clock = clock;
833                                                 err_most = this_err;
834                                                 max_n = clock.n;
835                                                 found = true;
836                                         }
837                                 }
838                         }
839                 }
840         }
841         return found;
842 }
843
844 /*
845  * Check if the calculated PLL configuration is more optimal compared to the
846  * best configuration and error found so far. Return the calculated error.
847  */
848 static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
849                                const struct dpll *calculated_clock,
850                                const struct dpll *best_clock,
851                                unsigned int best_error_ppm,
852                                unsigned int *error_ppm)
853 {
854         /*
855          * For CHV ignore the error and consider only the P value.
856          * Prefer a bigger P value based on HW requirements.
857          */
858         if (IS_CHERRYVIEW(to_i915(dev))) {
859                 *error_ppm = 0;
860
861                 return calculated_clock->p > best_clock->p;
862         }
863
864         if (WARN_ON_ONCE(!target_freq))
865                 return false;
866
867         *error_ppm = div_u64(1000000ULL *
868                                 abs(target_freq - calculated_clock->dot),
869                              target_freq);
870         /*
871          * Prefer a better P value over a better (smaller) error if the error
872          * is small. Ensure this preference for future configurations too by
873          * setting the error to 0.
874          */
875         if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
876                 *error_ppm = 0;
877
878                 return true;
879         }
880
881         return *error_ppm + 10 < best_error_ppm;
882 }
883
884 /*
885  * Returns a set of divisors for the desired target clock with the given
886  * refclk, or FALSE.  The returned values represent the clock equation:
887  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
888  */
889 static bool
890 vlv_find_best_dpll(const struct intel_limit *limit,
891                    struct intel_crtc_state *crtc_state,
892                    int target, int refclk, struct dpll *match_clock,
893                    struct dpll *best_clock)
894 {
895         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
896         struct drm_device *dev = crtc->base.dev;
897         struct dpll clock;
898         unsigned int bestppm = 1000000;
899         /* min update 19.2 MHz */
900         int max_n = min(limit->n.max, refclk / 19200);
901         bool found = false;
902
903         target *= 5; /* fast clock */
904
905         memset(best_clock, 0, sizeof(*best_clock));
906
907         /* based on hardware requirement, prefer smaller n to precision */
908         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
909                 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
910                         for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
911                              clock.p2 -= clock.p2 > 10 ? 2 : 1) {
912                                 clock.p = clock.p1 * clock.p2;
913                                 /* based on hardware requirement, prefer bigger m1,m2 values */
914                                 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
915                                         unsigned int ppm;
916
917                                         clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
918                                                                      refclk * clock.m1);
919
920                                         vlv_calc_dpll_params(refclk, &clock);
921
922                                         if (!intel_PLL_is_valid(to_i915(dev),
923                                                                 limit,
924                                                                 &clock))
925                                                 continue;
926
927                                         if (!vlv_PLL_is_optimal(dev, target,
928                                                                 &clock,
929                                                                 best_clock,
930                                                                 bestppm, &ppm))
931                                                 continue;
932
933                                         *best_clock = clock;
934                                         bestppm = ppm;
935                                         found = true;
936                                 }
937                         }
938                 }
939         }
940
941         return found;
942 }
943
944 /*
945  * Returns a set of divisors for the desired target clock with the given
946  * refclk, or FALSE.  The returned values represent the clock equation:
947  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
948  */
949 static bool
950 chv_find_best_dpll(const struct intel_limit *limit,
951                    struct intel_crtc_state *crtc_state,
952                    int target, int refclk, struct dpll *match_clock,
953                    struct dpll *best_clock)
954 {
955         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
956         struct drm_device *dev = crtc->base.dev;
957         unsigned int best_error_ppm;
958         struct dpll clock;
959         uint64_t m2;
960         int found = false;
961
962         memset(best_clock, 0, sizeof(*best_clock));
963         best_error_ppm = 1000000;
964
965         /*
966          * Based on hardware doc, the n always set to 1, and m1 always
967          * set to 2.  If requires to support 200Mhz refclk, we need to
968          * revisit this because n may not 1 anymore.
969          */
970         clock.n = 1, clock.m1 = 2;
971         target *= 5;    /* fast clock */
972
973         for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
974                 for (clock.p2 = limit->p2.p2_fast;
975                                 clock.p2 >= limit->p2.p2_slow;
976                                 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
977                         unsigned int error_ppm;
978
979                         clock.p = clock.p1 * clock.p2;
980
981                         m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p *
982                                         clock.n) << 22, refclk * clock.m1);
983
984                         if (m2 > INT_MAX/clock.m1)
985                                 continue;
986
987                         clock.m2 = m2;
988
989                         chv_calc_dpll_params(refclk, &clock);
990
991                         if (!intel_PLL_is_valid(to_i915(dev), limit, &clock))
992                                 continue;
993
994                         if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
995                                                 best_error_ppm, &error_ppm))
996                                 continue;
997
998                         *best_clock = clock;
999                         best_error_ppm = error_ppm;
1000                         found = true;
1001                 }
1002         }
1003
1004         return found;
1005 }
1006
1007 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
1008                         struct dpll *best_clock)
1009 {
1010         int refclk = 100000;
1011         const struct intel_limit *limit = &intel_limits_bxt;
1012
1013         return chv_find_best_dpll(limit, crtc_state,
1014                                   target_clock, refclk, NULL, best_clock);
1015 }
1016
1017 bool intel_crtc_active(struct intel_crtc *crtc)
1018 {
1019         /* Be paranoid as we can arrive here with only partial
1020          * state retrieved from the hardware during setup.
1021          *
1022          * We can ditch the adjusted_mode.crtc_clock check as soon
1023          * as Haswell has gained clock readout/fastboot support.
1024          *
1025          * We can ditch the crtc->primary->state->fb check as soon as we can
1026          * properly reconstruct framebuffers.
1027          *
1028          * FIXME: The intel_crtc->active here should be switched to
1029          * crtc->state->active once we have proper CRTC states wired up
1030          * for atomic.
1031          */
1032         return crtc->active && crtc->base.primary->state->fb &&
1033                 crtc->config->base.adjusted_mode.crtc_clock;
1034 }
1035
1036 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
1037                                              enum pipe pipe)
1038 {
1039         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
1040
1041         return crtc->config->cpu_transcoder;
1042 }
1043
1044 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
1045                                     enum pipe pipe)
1046 {
1047         i915_reg_t reg = PIPEDSL(pipe);
1048         u32 line1, line2;
1049         u32 line_mask;
1050
1051         if (IS_GEN2(dev_priv))
1052                 line_mask = DSL_LINEMASK_GEN2;
1053         else
1054                 line_mask = DSL_LINEMASK_GEN3;
1055
1056         line1 = I915_READ(reg) & line_mask;
1057         msleep(5);
1058         line2 = I915_READ(reg) & line_mask;
1059
1060         return line1 != line2;
1061 }
1062
1063 static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
1064 {
1065         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1066         enum pipe pipe = crtc->pipe;
1067
1068         /* Wait for the display line to settle/start moving */
1069         if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
1070                 DRM_ERROR("pipe %c scanline %s wait timed out\n",
1071                           pipe_name(pipe), onoff(state));
1072 }
1073
1074 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
1075 {
1076         wait_for_pipe_scanline_moving(crtc, false);
1077 }
1078
1079 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
1080 {
1081         wait_for_pipe_scanline_moving(crtc, true);
1082 }
1083
1084 static void
1085 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
1086 {
1087         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
1088         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1089
1090         if (INTEL_GEN(dev_priv) >= 4) {
1091                 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1092                 i915_reg_t reg = PIPECONF(cpu_transcoder);
1093
1094                 /* Wait for the Pipe State to go off */
1095                 if (intel_wait_for_register(dev_priv,
1096                                             reg, I965_PIPECONF_ACTIVE, 0,
1097                                             100))
1098                         WARN(1, "pipe_off wait timed out\n");
1099         } else {
1100                 intel_wait_for_pipe_scanline_stopped(crtc);
1101         }
1102 }
1103
1104 /* Only for pre-ILK configs */
1105 void assert_pll(struct drm_i915_private *dev_priv,
1106                 enum pipe pipe, bool state)
1107 {
1108         u32 val;
1109         bool cur_state;
1110
1111         val = I915_READ(DPLL(pipe));
1112         cur_state = !!(val & DPLL_VCO_ENABLE);
1113         I915_STATE_WARN(cur_state != state,
1114              "PLL state assertion failure (expected %s, current %s)\n",
1115                         onoff(state), onoff(cur_state));
1116 }
1117
1118 /* XXX: the dsi pll is shared between MIPI DSI ports */
1119 void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
1120 {
1121         u32 val;
1122         bool cur_state;
1123
1124         mutex_lock(&dev_priv->sb_lock);
1125         val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
1126         mutex_unlock(&dev_priv->sb_lock);
1127
1128         cur_state = val & DSI_PLL_VCO_EN;
1129         I915_STATE_WARN(cur_state != state,
1130              "DSI PLL state assertion failure (expected %s, current %s)\n",
1131                         onoff(state), onoff(cur_state));
1132 }
1133
1134 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1135                           enum pipe pipe, bool state)
1136 {
1137         bool cur_state;
1138         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1139                                                                       pipe);
1140
1141         if (HAS_DDI(dev_priv)) {
1142                 /* DDI does not have a specific FDI_TX register */
1143                 u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
1144                 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
1145         } else {
1146                 u32 val = I915_READ(FDI_TX_CTL(pipe));
1147                 cur_state = !!(val & FDI_TX_ENABLE);
1148         }
1149         I915_STATE_WARN(cur_state != state,
1150              "FDI TX state assertion failure (expected %s, current %s)\n",
1151                         onoff(state), onoff(cur_state));
1152 }
1153 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1154 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1155
1156 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1157                           enum pipe pipe, bool state)
1158 {
1159         u32 val;
1160         bool cur_state;
1161
1162         val = I915_READ(FDI_RX_CTL(pipe));
1163         cur_state = !!(val & FDI_RX_ENABLE);
1164         I915_STATE_WARN(cur_state != state,
1165              "FDI RX state assertion failure (expected %s, current %s)\n",
1166                         onoff(state), onoff(cur_state));
1167 }
1168 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1169 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1170
1171 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1172                                       enum pipe pipe)
1173 {
1174         u32 val;
1175
1176         /* ILK FDI PLL is always enabled */
1177         if (IS_GEN5(dev_priv))
1178                 return;
1179
1180         /* On Haswell, DDI ports are responsible for the FDI PLL setup */
1181         if (HAS_DDI(dev_priv))
1182                 return;
1183
1184         val = I915_READ(FDI_TX_CTL(pipe));
1185         I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1186 }
1187
1188 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1189                        enum pipe pipe, bool state)
1190 {
1191         u32 val;
1192         bool cur_state;
1193
1194         val = I915_READ(FDI_RX_CTL(pipe));
1195         cur_state = !!(val & FDI_RX_PLL_ENABLE);
1196         I915_STATE_WARN(cur_state != state,
1197              "FDI RX PLL assertion failure (expected %s, current %s)\n",
1198                         onoff(state), onoff(cur_state));
1199 }
1200
1201 void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
1202 {
1203         i915_reg_t pp_reg;
1204         u32 val;
1205         enum pipe panel_pipe = INVALID_PIPE;
1206         bool locked = true;
1207
1208         if (WARN_ON(HAS_DDI(dev_priv)))
1209                 return;
1210
1211         if (HAS_PCH_SPLIT(dev_priv)) {
1212                 u32 port_sel;
1213
1214                 pp_reg = PP_CONTROL(0);
1215                 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1216
1217                 switch (port_sel) {
1218                 case PANEL_PORT_SELECT_LVDS:
1219                         intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
1220                         break;
1221                 case PANEL_PORT_SELECT_DPA:
1222                         intel_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
1223                         break;
1224                 case PANEL_PORT_SELECT_DPC:
1225                         intel_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
1226                         break;
1227                 case PANEL_PORT_SELECT_DPD:
1228                         intel_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
1229                         break;
1230                 default:
1231                         MISSING_CASE(port_sel);
1232                         break;
1233                 }
1234         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1235                 /* presumably write lock depends on pipe, not port select */
1236                 pp_reg = PP_CONTROL(pipe);
1237                 panel_pipe = pipe;
1238         } else {
1239                 u32 port_sel;
1240
1241                 pp_reg = PP_CONTROL(0);
1242                 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1243
1244                 WARN_ON(port_sel != PANEL_PORT_SELECT_LVDS);
1245                 intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
1246         }
1247
1248         val = I915_READ(pp_reg);
1249         if (!(val & PANEL_POWER_ON) ||
1250             ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
1251                 locked = false;
1252
1253         I915_STATE_WARN(panel_pipe == pipe && locked,
1254              "panel assertion failure, pipe %c regs locked\n",
1255              pipe_name(pipe));
1256 }
1257
1258 void assert_pipe(struct drm_i915_private *dev_priv,
1259                  enum pipe pipe, bool state)
1260 {
1261         bool cur_state;
1262         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1263                                                                       pipe);
1264         enum intel_display_power_domain power_domain;
1265
1266         /* we keep both pipes enabled on 830 */
1267         if (IS_I830(dev_priv))
1268                 state = true;
1269
1270         power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
1271         if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
1272                 u32 val = I915_READ(PIPECONF(cpu_transcoder));
1273                 cur_state = !!(val & PIPECONF_ENABLE);
1274
1275                 intel_display_power_put(dev_priv, power_domain);
1276         } else {
1277                 cur_state = false;
1278         }
1279
1280         I915_STATE_WARN(cur_state != state,
1281              "pipe %c assertion failure (expected %s, current %s)\n",
1282                         pipe_name(pipe), onoff(state), onoff(cur_state));
1283 }
1284
1285 static void assert_plane(struct intel_plane *plane, bool state)
1286 {
1287         enum pipe pipe;
1288         bool cur_state;
1289
1290         cur_state = plane->get_hw_state(plane, &pipe);
1291
1292         I915_STATE_WARN(cur_state != state,
1293                         "%s assertion failure (expected %s, current %s)\n",
1294                         plane->base.name, onoff(state), onoff(cur_state));
1295 }
1296
1297 #define assert_plane_enabled(p) assert_plane(p, true)
1298 #define assert_plane_disabled(p) assert_plane(p, false)
1299
1300 static void assert_planes_disabled(struct intel_crtc *crtc)
1301 {
1302         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1303         struct intel_plane *plane;
1304
1305         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
1306                 assert_plane_disabled(plane);
1307 }
1308
1309 static void assert_vblank_disabled(struct drm_crtc *crtc)
1310 {
1311         if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
1312                 drm_crtc_vblank_put(crtc);
1313 }
1314
1315 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1316                                     enum pipe pipe)
1317 {
1318         u32 val;
1319         bool enabled;
1320
1321         val = I915_READ(PCH_TRANSCONF(pipe));
1322         enabled = !!(val & TRANS_ENABLE);
1323         I915_STATE_WARN(enabled,
1324              "transcoder assertion failed, should be off on pipe %c but is still active\n",
1325              pipe_name(pipe));
1326 }
1327
1328 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1329                                    enum pipe pipe, enum port port,
1330                                    i915_reg_t dp_reg)
1331 {
1332         enum pipe port_pipe;
1333         bool state;
1334
1335         state = intel_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
1336
1337         I915_STATE_WARN(state && port_pipe == pipe,
1338                         "PCH DP %c enabled on transcoder %c, should be disabled\n",
1339                         port_name(port), pipe_name(pipe));
1340
1341         I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1342                         "IBX PCH DP %c still using transcoder B\n",
1343                         port_name(port));
1344 }
1345
1346 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1347                                      enum pipe pipe, enum port port,
1348                                      i915_reg_t hdmi_reg)
1349 {
1350         enum pipe port_pipe;
1351         bool state;
1352
1353         state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
1354
1355         I915_STATE_WARN(state && port_pipe == pipe,
1356                         "PCH HDMI %c enabled on transcoder %c, should be disabled\n",
1357                         port_name(port), pipe_name(pipe));
1358
1359         I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1360                         "IBX PCH HDMI %c still using transcoder B\n",
1361                         port_name(port));
1362 }
1363
1364 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1365                                       enum pipe pipe)
1366 {
1367         enum pipe port_pipe;
1368
1369         assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
1370         assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
1371         assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
1372
1373         I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) &&
1374                         port_pipe == pipe,
1375                         "PCH VGA enabled on transcoder %c, should be disabled\n",
1376                         pipe_name(pipe));
1377
1378         I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) &&
1379                         port_pipe == pipe,
1380                         "PCH LVDS enabled on transcoder %c, should be disabled\n",
1381                         pipe_name(pipe));
1382
1383         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
1384         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
1385         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
1386 }
1387
1388 static void _vlv_enable_pll(struct intel_crtc *crtc,
1389                             const struct intel_crtc_state *pipe_config)
1390 {
1391         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1392         enum pipe pipe = crtc->pipe;
1393
1394         I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1395         POSTING_READ(DPLL(pipe));
1396         udelay(150);
1397
1398         if (intel_wait_for_register(dev_priv,
1399                                     DPLL(pipe),
1400                                     DPLL_LOCK_VLV,
1401                                     DPLL_LOCK_VLV,
1402                                     1))
1403                 DRM_ERROR("DPLL %d failed to lock\n", pipe);
1404 }
1405
1406 static void vlv_enable_pll(struct intel_crtc *crtc,
1407                            const struct intel_crtc_state *pipe_config)
1408 {
1409         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1410         enum pipe pipe = crtc->pipe;
1411
1412         assert_pipe_disabled(dev_priv, pipe);
1413
1414         /* PLL is protected by panel, make sure we can write it */
1415         assert_panel_unlocked(dev_priv, pipe);
1416
1417         if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1418                 _vlv_enable_pll(crtc, pipe_config);
1419
1420         I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1421         POSTING_READ(DPLL_MD(pipe));
1422 }
1423
1424
1425 static void _chv_enable_pll(struct intel_crtc *crtc,
1426                             const struct intel_crtc_state *pipe_config)
1427 {
1428         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1429         enum pipe pipe = crtc->pipe;
1430         enum dpio_channel port = vlv_pipe_to_channel(pipe);
1431         u32 tmp;
1432
1433         mutex_lock(&dev_priv->sb_lock);
1434
1435         /* Enable back the 10bit clock to display controller */
1436         tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1437         tmp |= DPIO_DCLKP_EN;
1438         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1439
1440         mutex_unlock(&dev_priv->sb_lock);
1441
1442         /*
1443          * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1444          */
1445         udelay(1);
1446
1447         /* Enable PLL */
1448         I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1449
1450         /* Check PLL is locked */
1451         if (intel_wait_for_register(dev_priv,
1452                                     DPLL(pipe), DPLL_LOCK_VLV, DPLL_LOCK_VLV,
1453                                     1))
1454                 DRM_ERROR("PLL %d failed to lock\n", pipe);
1455 }
1456
1457 static void chv_enable_pll(struct intel_crtc *crtc,
1458                            const struct intel_crtc_state *pipe_config)
1459 {
1460         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1461         enum pipe pipe = crtc->pipe;
1462
1463         assert_pipe_disabled(dev_priv, pipe);
1464
1465         /* PLL is protected by panel, make sure we can write it */
1466         assert_panel_unlocked(dev_priv, pipe);
1467
1468         if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1469                 _chv_enable_pll(crtc, pipe_config);
1470
1471         if (pipe != PIPE_A) {
1472                 /*
1473                  * WaPixelRepeatModeFixForC0:chv
1474                  *
1475                  * DPLLCMD is AWOL. Use chicken bits to propagate
1476                  * the value from DPLLBMD to either pipe B or C.
1477                  */
1478                 I915_WRITE(CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
1479                 I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md);
1480                 I915_WRITE(CBR4_VLV, 0);
1481                 dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
1482
1483                 /*
1484                  * DPLLB VGA mode also seems to cause problems.
1485                  * We should always have it disabled.
1486                  */
1487                 WARN_ON((I915_READ(DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0);
1488         } else {
1489                 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1490                 POSTING_READ(DPLL_MD(pipe));
1491         }
1492 }
1493
1494 static int intel_num_dvo_pipes(struct drm_i915_private *dev_priv)
1495 {
1496         struct intel_crtc *crtc;
1497         int count = 0;
1498
1499         for_each_intel_crtc(&dev_priv->drm, crtc) {
1500                 count += crtc->base.state->active &&
1501                         intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO);
1502         }
1503
1504         return count;
1505 }
1506
1507 static void i9xx_enable_pll(struct intel_crtc *crtc,
1508                             const struct intel_crtc_state *crtc_state)
1509 {
1510         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1511         i915_reg_t reg = DPLL(crtc->pipe);
1512         u32 dpll = crtc_state->dpll_hw_state.dpll;
1513         int i;
1514
1515         assert_pipe_disabled(dev_priv, crtc->pipe);
1516
1517         /* PLL is protected by panel, make sure we can write it */
1518         if (IS_MOBILE(dev_priv) && !IS_I830(dev_priv))
1519                 assert_panel_unlocked(dev_priv, crtc->pipe);
1520
1521         /* Enable DVO 2x clock on both PLLs if necessary */
1522         if (IS_I830(dev_priv) && intel_num_dvo_pipes(dev_priv) > 0) {
1523                 /*
1524                  * It appears to be important that we don't enable this
1525                  * for the current pipe before otherwise configuring the
1526                  * PLL. No idea how this should be handled if multiple
1527                  * DVO outputs are enabled simultaneosly.
1528                  */
1529                 dpll |= DPLL_DVO_2X_MODE;
1530                 I915_WRITE(DPLL(!crtc->pipe),
1531                            I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE);
1532         }
1533
1534         /*
1535          * Apparently we need to have VGA mode enabled prior to changing
1536          * the P1/P2 dividers. Otherwise the DPLL will keep using the old
1537          * dividers, even though the register value does change.
1538          */
1539         I915_WRITE(reg, 0);
1540
1541         I915_WRITE(reg, dpll);
1542
1543         /* Wait for the clocks to stabilize. */
1544         POSTING_READ(reg);
1545         udelay(150);
1546
1547         if (INTEL_GEN(dev_priv) >= 4) {
1548                 I915_WRITE(DPLL_MD(crtc->pipe),
1549                            crtc_state->dpll_hw_state.dpll_md);
1550         } else {
1551                 /* The pixel multiplier can only be updated once the
1552                  * DPLL is enabled and the clocks are stable.
1553                  *
1554                  * So write it again.
1555                  */
1556                 I915_WRITE(reg, dpll);
1557         }
1558
1559         /* We do this three times for luck */
1560         for (i = 0; i < 3; i++) {
1561                 I915_WRITE(reg, dpll);
1562                 POSTING_READ(reg);
1563                 udelay(150); /* wait for warmup */
1564         }
1565 }
1566
1567 static void i9xx_disable_pll(struct intel_crtc *crtc)
1568 {
1569         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1570         enum pipe pipe = crtc->pipe;
1571
1572         /* Disable DVO 2x clock on both PLLs if necessary */
1573         if (IS_I830(dev_priv) &&
1574             intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO) &&
1575             !intel_num_dvo_pipes(dev_priv)) {
1576                 I915_WRITE(DPLL(PIPE_B),
1577                            I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
1578                 I915_WRITE(DPLL(PIPE_A),
1579                            I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE);
1580         }
1581
1582         /* Don't disable pipe or pipe PLLs if needed */
1583         if (IS_I830(dev_priv))
1584                 return;
1585
1586         /* Make sure the pipe isn't still relying on us */
1587         assert_pipe_disabled(dev_priv, pipe);
1588
1589         I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
1590         POSTING_READ(DPLL(pipe));
1591 }
1592
1593 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1594 {
1595         u32 val;
1596
1597         /* Make sure the pipe isn't still relying on us */
1598         assert_pipe_disabled(dev_priv, pipe);
1599
1600         val = DPLL_INTEGRATED_REF_CLK_VLV |
1601                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1602         if (pipe != PIPE_A)
1603                 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1604
1605         I915_WRITE(DPLL(pipe), val);
1606         POSTING_READ(DPLL(pipe));
1607 }
1608
1609 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1610 {
1611         enum dpio_channel port = vlv_pipe_to_channel(pipe);
1612         u32 val;
1613
1614         /* Make sure the pipe isn't still relying on us */
1615         assert_pipe_disabled(dev_priv, pipe);
1616
1617         val = DPLL_SSC_REF_CLK_CHV |
1618                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1619         if (pipe != PIPE_A)
1620                 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1621
1622         I915_WRITE(DPLL(pipe), val);
1623         POSTING_READ(DPLL(pipe));
1624
1625         mutex_lock(&dev_priv->sb_lock);
1626
1627         /* Disable 10bit clock to display controller */
1628         val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1629         val &= ~DPIO_DCLKP_EN;
1630         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1631
1632         mutex_unlock(&dev_priv->sb_lock);
1633 }
1634
1635 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1636                          struct intel_digital_port *dport,
1637                          unsigned int expected_mask)
1638 {
1639         u32 port_mask;
1640         i915_reg_t dpll_reg;
1641
1642         switch (dport->base.port) {
1643         case PORT_B:
1644                 port_mask = DPLL_PORTB_READY_MASK;
1645                 dpll_reg = DPLL(0);
1646                 break;
1647         case PORT_C:
1648                 port_mask = DPLL_PORTC_READY_MASK;
1649                 dpll_reg = DPLL(0);
1650                 expected_mask <<= 4;
1651                 break;
1652         case PORT_D:
1653                 port_mask = DPLL_PORTD_READY_MASK;
1654                 dpll_reg = DPIO_PHY_STATUS;
1655                 break;
1656         default:
1657                 BUG();
1658         }
1659
1660         if (intel_wait_for_register(dev_priv,
1661                                     dpll_reg, port_mask, expected_mask,
1662                                     1000))
1663                 WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
1664                      port_name(dport->base.port),
1665                      I915_READ(dpll_reg) & port_mask, expected_mask);
1666 }
1667
1668 static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1669                                            enum pipe pipe)
1670 {
1671         struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
1672                                                                 pipe);
1673         i915_reg_t reg;
1674         uint32_t val, pipeconf_val;
1675
1676         /* Make sure PCH DPLL is enabled */
1677         assert_shared_dpll_enabled(dev_priv, intel_crtc->config->shared_dpll);
1678
1679         /* FDI must be feeding us bits for PCH ports */
1680         assert_fdi_tx_enabled(dev_priv, pipe);
1681         assert_fdi_rx_enabled(dev_priv, pipe);
1682
1683         if (HAS_PCH_CPT(dev_priv)) {
1684                 /* Workaround: Set the timing override bit before enabling the
1685                  * pch transcoder. */
1686                 reg = TRANS_CHICKEN2(pipe);
1687                 val = I915_READ(reg);
1688                 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1689                 I915_WRITE(reg, val);
1690         }
1691
1692         reg = PCH_TRANSCONF(pipe);
1693         val = I915_READ(reg);
1694         pipeconf_val = I915_READ(PIPECONF(pipe));
1695
1696         if (HAS_PCH_IBX(dev_priv)) {
1697                 /*
1698                  * Make the BPC in transcoder be consistent with
1699                  * that in pipeconf reg. For HDMI we must use 8bpc
1700                  * here for both 8bpc and 12bpc.
1701                  */
1702                 val &= ~PIPECONF_BPC_MASK;
1703                 if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_HDMI))
1704                         val |= PIPECONF_8BPC;
1705                 else
1706                         val |= pipeconf_val & PIPECONF_BPC_MASK;
1707         }
1708
1709         val &= ~TRANS_INTERLACE_MASK;
1710         if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
1711                 if (HAS_PCH_IBX(dev_priv) &&
1712                     intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
1713                         val |= TRANS_LEGACY_INTERLACED_ILK;
1714                 else
1715                         val |= TRANS_INTERLACED;
1716         else
1717                 val |= TRANS_PROGRESSIVE;
1718
1719         I915_WRITE(reg, val | TRANS_ENABLE);
1720         if (intel_wait_for_register(dev_priv,
1721                                     reg, TRANS_STATE_ENABLE, TRANS_STATE_ENABLE,
1722                                     100))
1723                 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
1724 }
1725
1726 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1727                                       enum transcoder cpu_transcoder)
1728 {
1729         u32 val, pipeconf_val;
1730
1731         /* FDI must be feeding us bits for PCH ports */
1732         assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
1733         assert_fdi_rx_enabled(dev_priv, PIPE_A);
1734
1735         /* Workaround: set timing override bit. */
1736         val = I915_READ(TRANS_CHICKEN2(PIPE_A));
1737         val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1738         I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
1739
1740         val = TRANS_ENABLE;
1741         pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
1742
1743         if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1744             PIPECONF_INTERLACED_ILK)
1745                 val |= TRANS_INTERLACED;
1746         else
1747                 val |= TRANS_PROGRESSIVE;
1748
1749         I915_WRITE(LPT_TRANSCONF, val);
1750         if (intel_wait_for_register(dev_priv,
1751                                     LPT_TRANSCONF,
1752                                     TRANS_STATE_ENABLE,
1753                                     TRANS_STATE_ENABLE,
1754                                     100))
1755                 DRM_ERROR("Failed to enable PCH transcoder\n");
1756 }
1757
1758 static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1759                                             enum pipe pipe)
1760 {
1761         i915_reg_t reg;
1762         uint32_t val;
1763
1764         /* FDI relies on the transcoder */
1765         assert_fdi_tx_disabled(dev_priv, pipe);
1766         assert_fdi_rx_disabled(dev_priv, pipe);
1767
1768         /* Ports must be off as well */
1769         assert_pch_ports_disabled(dev_priv, pipe);
1770
1771         reg = PCH_TRANSCONF(pipe);
1772         val = I915_READ(reg);
1773         val &= ~TRANS_ENABLE;
1774         I915_WRITE(reg, val);
1775         /* wait for PCH transcoder off, transcoder state */
1776         if (intel_wait_for_register(dev_priv,
1777                                     reg, TRANS_STATE_ENABLE, 0,
1778                                     50))
1779                 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
1780
1781         if (HAS_PCH_CPT(dev_priv)) {
1782                 /* Workaround: Clear the timing override chicken bit again. */
1783                 reg = TRANS_CHICKEN2(pipe);
1784                 val = I915_READ(reg);
1785                 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1786                 I915_WRITE(reg, val);
1787         }
1788 }
1789
1790 void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1791 {
1792         u32 val;
1793
1794         val = I915_READ(LPT_TRANSCONF);
1795         val &= ~TRANS_ENABLE;
1796         I915_WRITE(LPT_TRANSCONF, val);
1797         /* wait for PCH transcoder off, transcoder state */
1798         if (intel_wait_for_register(dev_priv,
1799                                     LPT_TRANSCONF, TRANS_STATE_ENABLE, 0,
1800                                     50))
1801                 DRM_ERROR("Failed to disable PCH transcoder\n");
1802
1803         /* Workaround: clear timing override bit. */
1804         val = I915_READ(TRANS_CHICKEN2(PIPE_A));
1805         val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1806         I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
1807 }
1808
1809 enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
1810 {
1811         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1812
1813         if (HAS_PCH_LPT(dev_priv))
1814                 return PIPE_A;
1815         else
1816                 return crtc->pipe;
1817 }
1818
1819 static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
1820 {
1821         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
1822         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1823         enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
1824         enum pipe pipe = crtc->pipe;
1825         i915_reg_t reg;
1826         u32 val;
1827
1828         DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
1829
1830         assert_planes_disabled(crtc);
1831
1832         /*
1833          * A pipe without a PLL won't actually be able to drive bits from
1834          * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
1835          * need the check.
1836          */
1837         if (HAS_GMCH_DISPLAY(dev_priv)) {
1838                 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
1839                         assert_dsi_pll_enabled(dev_priv);
1840                 else
1841                         assert_pll_enabled(dev_priv, pipe);
1842         } else {
1843                 if (new_crtc_state->has_pch_encoder) {
1844                         /* if driving the PCH, we need FDI enabled */
1845                         assert_fdi_rx_pll_enabled(dev_priv,
1846                                                   intel_crtc_pch_transcoder(crtc));
1847                         assert_fdi_tx_pll_enabled(dev_priv,
1848                                                   (enum pipe) cpu_transcoder);
1849                 }
1850                 /* FIXME: assert CPU port conditions for SNB+ */
1851         }
1852
1853         reg = PIPECONF(cpu_transcoder);
1854         val = I915_READ(reg);
1855         if (val & PIPECONF_ENABLE) {
1856                 /* we keep both pipes enabled on 830 */
1857                 WARN_ON(!IS_I830(dev_priv));
1858                 return;
1859         }
1860
1861         I915_WRITE(reg, val | PIPECONF_ENABLE);
1862         POSTING_READ(reg);
1863
1864         /*
1865          * Until the pipe starts PIPEDSL reads will return a stale value,
1866          * which causes an apparent vblank timestamp jump when PIPEDSL
1867          * resets to its proper value. That also messes up the frame count
1868          * when it's derived from the timestamps. So let's wait for the
1869          * pipe to start properly before we call drm_crtc_vblank_on()
1870          */
1871         if (dev_priv->drm.max_vblank_count == 0)
1872                 intel_wait_for_pipe_scanline_moving(crtc);
1873 }
1874
1875 static void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
1876 {
1877         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
1878         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1879         enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1880         enum pipe pipe = crtc->pipe;
1881         i915_reg_t reg;
1882         u32 val;
1883
1884         DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
1885
1886         /*
1887          * Make sure planes won't keep trying to pump pixels to us,
1888          * or we might hang the display.
1889          */
1890         assert_planes_disabled(crtc);
1891
1892         reg = PIPECONF(cpu_transcoder);
1893         val = I915_READ(reg);
1894         if ((val & PIPECONF_ENABLE) == 0)
1895                 return;
1896
1897         /*
1898          * Double wide has implications for planes
1899          * so best keep it disabled when not needed.
1900          */
1901         if (old_crtc_state->double_wide)
1902                 val &= ~PIPECONF_DOUBLE_WIDE;
1903
1904         /* Don't disable pipe or pipe PLLs if needed */
1905         if (!IS_I830(dev_priv))
1906                 val &= ~PIPECONF_ENABLE;
1907
1908         I915_WRITE(reg, val);
1909         if ((val & PIPECONF_ENABLE) == 0)
1910                 intel_wait_for_pipe_off(old_crtc_state);
1911 }
1912
1913 static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
1914 {
1915         return IS_GEN2(dev_priv) ? 2048 : 4096;
1916 }
1917
1918 static unsigned int
1919 intel_tile_width_bytes(const struct drm_framebuffer *fb, int plane)
1920 {
1921         struct drm_i915_private *dev_priv = to_i915(fb->dev);
1922         unsigned int cpp = fb->format->cpp[plane];
1923
1924         switch (fb->modifier) {
1925         case DRM_FORMAT_MOD_LINEAR:
1926                 return cpp;
1927         case I915_FORMAT_MOD_X_TILED:
1928                 if (IS_GEN2(dev_priv))
1929                         return 128;
1930                 else
1931                         return 512;
1932         case I915_FORMAT_MOD_Y_TILED_CCS:
1933                 if (plane == 1)
1934                         return 128;
1935                 /* fall through */
1936         case I915_FORMAT_MOD_Y_TILED:
1937                 if (IS_GEN2(dev_priv) || HAS_128_BYTE_Y_TILING(dev_priv))
1938                         return 128;
1939                 else
1940                         return 512;
1941         case I915_FORMAT_MOD_Yf_TILED_CCS:
1942                 if (plane == 1)
1943                         return 128;
1944                 /* fall through */
1945         case I915_FORMAT_MOD_Yf_TILED:
1946                 switch (cpp) {
1947                 case 1:
1948                         return 64;
1949                 case 2:
1950                 case 4:
1951                         return 128;
1952                 case 8:
1953                 case 16:
1954                         return 256;
1955                 default:
1956                         MISSING_CASE(cpp);
1957                         return cpp;
1958                 }
1959                 break;
1960         default:
1961                 MISSING_CASE(fb->modifier);
1962                 return cpp;
1963         }
1964 }
1965
1966 static unsigned int
1967 intel_tile_height(const struct drm_framebuffer *fb, int plane)
1968 {
1969         if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
1970                 return 1;
1971         else
1972                 return intel_tile_size(to_i915(fb->dev)) /
1973                         intel_tile_width_bytes(fb, plane);
1974 }
1975
1976 /* Return the tile dimensions in pixel units */
1977 static void intel_tile_dims(const struct drm_framebuffer *fb, int plane,
1978                             unsigned int *tile_width,
1979                             unsigned int *tile_height)
1980 {
1981         unsigned int tile_width_bytes = intel_tile_width_bytes(fb, plane);
1982         unsigned int cpp = fb->format->cpp[plane];
1983
1984         *tile_width = tile_width_bytes / cpp;
1985         *tile_height = intel_tile_size(to_i915(fb->dev)) / tile_width_bytes;
1986 }
1987
1988 unsigned int
1989 intel_fb_align_height(const struct drm_framebuffer *fb,
1990                       int plane, unsigned int height)
1991 {
1992         unsigned int tile_height = intel_tile_height(fb, plane);
1993
1994         return ALIGN(height, tile_height);
1995 }
1996
1997 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
1998 {
1999         unsigned int size = 0;
2000         int i;
2001
2002         for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
2003                 size += rot_info->plane[i].width * rot_info->plane[i].height;
2004
2005         return size;
2006 }
2007
2008 static void
2009 intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
2010                         const struct drm_framebuffer *fb,
2011                         unsigned int rotation)
2012 {
2013         view->type = I915_GGTT_VIEW_NORMAL;
2014         if (drm_rotation_90_or_270(rotation)) {
2015                 view->type = I915_GGTT_VIEW_ROTATED;
2016                 view->rotated = to_intel_framebuffer(fb)->rot_info;
2017         }
2018 }
2019
2020 static unsigned int intel_cursor_alignment(const struct drm_i915_private *dev_priv)
2021 {
2022         if (IS_I830(dev_priv))
2023                 return 16 * 1024;
2024         else if (IS_I85X(dev_priv))
2025                 return 256;
2026         else if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
2027                 return 32;
2028         else
2029                 return 4 * 1024;
2030 }
2031
2032 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
2033 {
2034         if (INTEL_GEN(dev_priv) >= 9)
2035                 return 256 * 1024;
2036         else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
2037                  IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2038                 return 128 * 1024;
2039         else if (INTEL_GEN(dev_priv) >= 4)
2040                 return 4 * 1024;
2041         else
2042                 return 0;
2043 }
2044
2045 static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
2046                                          int plane)
2047 {
2048         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2049
2050         /* AUX_DIST needs only 4K alignment */
2051         if (plane == 1)
2052                 return 4096;
2053
2054         switch (fb->modifier) {
2055         case DRM_FORMAT_MOD_LINEAR:
2056                 return intel_linear_alignment(dev_priv);
2057         case I915_FORMAT_MOD_X_TILED:
2058                 if (INTEL_GEN(dev_priv) >= 9)
2059                         return 256 * 1024;
2060                 return 0;
2061         case I915_FORMAT_MOD_Y_TILED_CCS:
2062         case I915_FORMAT_MOD_Yf_TILED_CCS:
2063         case I915_FORMAT_MOD_Y_TILED:
2064         case I915_FORMAT_MOD_Yf_TILED:
2065                 return 1 * 1024 * 1024;
2066         default:
2067                 MISSING_CASE(fb->modifier);
2068                 return 0;
2069         }
2070 }
2071
2072 static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
2073 {
2074         struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
2075         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
2076
2077         return INTEL_GEN(dev_priv) < 4 || plane->has_fbc;
2078 }
2079
2080 struct i915_vma *
2081 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
2082                            unsigned int rotation,
2083                            bool uses_fence,
2084                            unsigned long *out_flags)
2085 {
2086         struct drm_device *dev = fb->dev;
2087         struct drm_i915_private *dev_priv = to_i915(dev);
2088         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2089         struct i915_ggtt_view view;
2090         struct i915_vma *vma;
2091         unsigned int pinctl;
2092         u32 alignment;
2093
2094         WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2095
2096         alignment = intel_surf_alignment(fb, 0);
2097
2098         intel_fill_fb_ggtt_view(&view, fb, rotation);
2099
2100         /* Note that the w/a also requires 64 PTE of padding following the
2101          * bo. We currently fill all unused PTE with the shadow page and so
2102          * we should always have valid PTE following the scanout preventing
2103          * the VT-d warning.
2104          */
2105         if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
2106                 alignment = 256 * 1024;
2107
2108         /*
2109          * Global gtt pte registers are special registers which actually forward
2110          * writes to a chunk of system memory. Which means that there is no risk
2111          * that the register values disappear as soon as we call
2112          * intel_runtime_pm_put(), so it is correct to wrap only the
2113          * pin/unpin/fence and not more.
2114          */
2115         intel_runtime_pm_get(dev_priv);
2116
2117         atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
2118
2119         pinctl = 0;
2120
2121         /* Valleyview is definitely limited to scanning out the first
2122          * 512MiB. Lets presume this behaviour was inherited from the
2123          * g4x display engine and that all earlier gen are similarly
2124          * limited. Testing suggests that it is a little more
2125          * complicated than this. For example, Cherryview appears quite
2126          * happy to scanout from anywhere within its global aperture.
2127          */
2128         if (HAS_GMCH_DISPLAY(dev_priv))
2129                 pinctl |= PIN_MAPPABLE;
2130
2131         vma = i915_gem_object_pin_to_display_plane(obj,
2132                                                    alignment, &view, pinctl);
2133         if (IS_ERR(vma))
2134                 goto err;
2135
2136         if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
2137                 int ret;
2138
2139                 /* Install a fence for tiled scan-out. Pre-i965 always needs a
2140                  * fence, whereas 965+ only requires a fence if using
2141                  * framebuffer compression.  For simplicity, we always, when
2142                  * possible, install a fence as the cost is not that onerous.
2143                  *
2144                  * If we fail to fence the tiled scanout, then either the
2145                  * modeset will reject the change (which is highly unlikely as
2146                  * the affected systems, all but one, do not have unmappable
2147                  * space) or we will not be able to enable full powersaving
2148                  * techniques (also likely not to apply due to various limits
2149                  * FBC and the like impose on the size of the buffer, which
2150                  * presumably we violated anyway with this unmappable buffer).
2151                  * Anyway, it is presumably better to stumble onwards with
2152                  * something and try to run the system in a "less than optimal"
2153                  * mode that matches the user configuration.
2154                  */
2155                 ret = i915_vma_pin_fence(vma);
2156                 if (ret != 0 && INTEL_GEN(dev_priv) < 4) {
2157                         i915_gem_object_unpin_from_display_plane(vma);
2158                         vma = ERR_PTR(ret);
2159                         goto err;
2160                 }
2161
2162                 if (ret == 0 && vma->fence)
2163                         *out_flags |= PLANE_HAS_FENCE;
2164         }
2165
2166         i915_vma_get(vma);
2167 err:
2168         atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
2169
2170         intel_runtime_pm_put(dev_priv);
2171         return vma;
2172 }
2173
2174 void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
2175 {
2176         lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
2177
2178         if (flags & PLANE_HAS_FENCE)
2179                 i915_vma_unpin_fence(vma);
2180         i915_gem_object_unpin_from_display_plane(vma);
2181         i915_vma_put(vma);
2182 }
2183
2184 static int intel_fb_pitch(const struct drm_framebuffer *fb, int plane,
2185                           unsigned int rotation)
2186 {
2187         if (drm_rotation_90_or_270(rotation))
2188                 return to_intel_framebuffer(fb)->rotated[plane].pitch;
2189         else
2190                 return fb->pitches[plane];
2191 }
2192
2193 /*
2194  * Convert the x/y offsets into a linear offset.
2195  * Only valid with 0/180 degree rotation, which is fine since linear
2196  * offset is only used with linear buffers on pre-hsw and tiled buffers
2197  * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
2198  */
2199 u32 intel_fb_xy_to_linear(int x, int y,
2200                           const struct intel_plane_state *state,
2201                           int plane)
2202 {
2203         const struct drm_framebuffer *fb = state->base.fb;
2204         unsigned int cpp = fb->format->cpp[plane];
2205         unsigned int pitch = fb->pitches[plane];
2206
2207         return y * pitch + x * cpp;
2208 }
2209
2210 /*
2211  * Add the x/y offsets derived from fb->offsets[] to the user
2212  * specified plane src x/y offsets. The resulting x/y offsets
2213  * specify the start of scanout from the beginning of the gtt mapping.
2214  */
2215 void intel_add_fb_offsets(int *x, int *y,
2216                           const struct intel_plane_state *state,
2217                           int plane)
2218
2219 {
2220         const struct intel_framebuffer *intel_fb = to_intel_framebuffer(state->base.fb);
2221         unsigned int rotation = state->base.rotation;
2222
2223         if (drm_rotation_90_or_270(rotation)) {
2224                 *x += intel_fb->rotated[plane].x;
2225                 *y += intel_fb->rotated[plane].y;
2226         } else {
2227                 *x += intel_fb->normal[plane].x;
2228                 *y += intel_fb->normal[plane].y;
2229         }
2230 }
2231
2232 static u32 __intel_adjust_tile_offset(int *x, int *y,
2233                                       unsigned int tile_width,
2234                                       unsigned int tile_height,
2235                                       unsigned int tile_size,
2236                                       unsigned int pitch_tiles,
2237                                       u32 old_offset,
2238                                       u32 new_offset)
2239 {
2240         unsigned int pitch_pixels = pitch_tiles * tile_width;
2241         unsigned int tiles;
2242
2243         WARN_ON(old_offset & (tile_size - 1));
2244         WARN_ON(new_offset & (tile_size - 1));
2245         WARN_ON(new_offset > old_offset);
2246
2247         tiles = (old_offset - new_offset) / tile_size;
2248
2249         *y += tiles / pitch_tiles * tile_height;
2250         *x += tiles % pitch_tiles * tile_width;
2251
2252         /* minimize x in case it got needlessly big */
2253         *y += *x / pitch_pixels * tile_height;
2254         *x %= pitch_pixels;
2255
2256         return new_offset;
2257 }
2258
2259 static u32 _intel_adjust_tile_offset(int *x, int *y,
2260                                      const struct drm_framebuffer *fb, int plane,
2261                                      unsigned int rotation,
2262                                      u32 old_offset, u32 new_offset)
2263 {
2264         const struct drm_i915_private *dev_priv = to_i915(fb->dev);
2265         unsigned int cpp = fb->format->cpp[plane];
2266         unsigned int pitch = intel_fb_pitch(fb, plane, rotation);
2267
2268         WARN_ON(new_offset > old_offset);
2269
2270         if (fb->modifier != DRM_FORMAT_MOD_LINEAR) {
2271                 unsigned int tile_size, tile_width, tile_height;
2272                 unsigned int pitch_tiles;
2273
2274                 tile_size = intel_tile_size(dev_priv);
2275                 intel_tile_dims(fb, plane, &tile_width, &tile_height);
2276
2277                 if (drm_rotation_90_or_270(rotation)) {
2278                         pitch_tiles = pitch / tile_height;
2279                         swap(tile_width, tile_height);
2280                 } else {
2281                         pitch_tiles = pitch / (tile_width * cpp);
2282                 }
2283
2284                 __intel_adjust_tile_offset(x, y, tile_width, tile_height,
2285                                            tile_size, pitch_tiles,
2286                                            old_offset, new_offset);
2287         } else {
2288                 old_offset += *y * pitch + *x * cpp;
2289
2290                 *y = (old_offset - new_offset) / pitch;
2291                 *x = ((old_offset - new_offset) - *y * pitch) / cpp;
2292         }
2293
2294         return new_offset;
2295 }
2296
2297 /*
2298  * Adjust the tile offset by moving the difference into
2299  * the x/y offsets.
2300  */
2301 static u32 intel_adjust_tile_offset(int *x, int *y,
2302                                     const struct intel_plane_state *state, int plane,
2303                                     u32 old_offset, u32 new_offset)
2304 {
2305         return _intel_adjust_tile_offset(x, y, state->base.fb, plane,
2306                                          state->base.rotation,
2307                                          old_offset, new_offset);
2308 }
2309
2310 /*
2311  * Computes the linear offset to the base tile and adjusts
2312  * x, y. bytes per pixel is assumed to be a power-of-two.
2313  *
2314  * In the 90/270 rotated case, x and y are assumed
2315  * to be already rotated to match the rotated GTT view, and
2316  * pitch is the tile_height aligned framebuffer height.
2317  *
2318  * This function is used when computing the derived information
2319  * under intel_framebuffer, so using any of that information
2320  * here is not allowed. Anything under drm_framebuffer can be
2321  * used. This is why the user has to pass in the pitch since it
2322  * is specified in the rotated orientation.
2323  */
2324 static u32 _intel_compute_tile_offset(const struct drm_i915_private *dev_priv,
2325                                       int *x, int *y,
2326                                       const struct drm_framebuffer *fb, int plane,
2327                                       unsigned int pitch,
2328                                       unsigned int rotation,
2329                                       u32 alignment)
2330 {
2331         uint64_t fb_modifier = fb->modifier;
2332         unsigned int cpp = fb->format->cpp[plane];
2333         u32 offset, offset_aligned;
2334
2335         if (alignment)
2336                 alignment--;
2337
2338         if (fb_modifier != DRM_FORMAT_MOD_LINEAR) {
2339                 unsigned int tile_size, tile_width, tile_height;
2340                 unsigned int tile_rows, tiles, pitch_tiles;
2341
2342                 tile_size = intel_tile_size(dev_priv);
2343                 intel_tile_dims(fb, plane, &tile_width, &tile_height);
2344
2345                 if (drm_rotation_90_or_270(rotation)) {
2346                         pitch_tiles = pitch / tile_height;
2347                         swap(tile_width, tile_height);
2348                 } else {
2349                         pitch_tiles = pitch / (tile_width * cpp);
2350                 }
2351
2352                 tile_rows = *y / tile_height;
2353                 *y %= tile_height;
2354
2355                 tiles = *x / tile_width;
2356                 *x %= tile_width;
2357
2358                 offset = (tile_rows * pitch_tiles + tiles) * tile_size;
2359                 offset_aligned = offset & ~alignment;
2360
2361                 __intel_adjust_tile_offset(x, y, tile_width, tile_height,
2362                                            tile_size, pitch_tiles,
2363                                            offset, offset_aligned);
2364         } else {
2365                 offset = *y * pitch + *x * cpp;
2366                 offset_aligned = offset & ~alignment;
2367
2368                 *y = (offset & alignment) / pitch;
2369                 *x = ((offset & alignment) - *y * pitch) / cpp;
2370         }
2371
2372         return offset_aligned;
2373 }
2374
2375 u32 intel_compute_tile_offset(int *x, int *y,
2376                               const struct intel_plane_state *state,
2377                               int plane)
2378 {
2379         struct intel_plane *intel_plane = to_intel_plane(state->base.plane);
2380         struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
2381         const struct drm_framebuffer *fb = state->base.fb;
2382         unsigned int rotation = state->base.rotation;
2383         int pitch = intel_fb_pitch(fb, plane, rotation);
2384         u32 alignment;
2385
2386         if (intel_plane->id == PLANE_CURSOR)
2387                 alignment = intel_cursor_alignment(dev_priv);
2388         else
2389                 alignment = intel_surf_alignment(fb, plane);
2390
2391         return _intel_compute_tile_offset(dev_priv, x, y, fb, plane, pitch,
2392                                           rotation, alignment);
2393 }
2394
2395 /* Convert the fb->offset[] into x/y offsets */
2396 static int intel_fb_offset_to_xy(int *x, int *y,
2397                                  const struct drm_framebuffer *fb, int plane)
2398 {
2399         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2400
2401         if (fb->modifier != DRM_FORMAT_MOD_LINEAR &&
2402             fb->offsets[plane] % intel_tile_size(dev_priv))
2403                 return -EINVAL;
2404
2405         *x = 0;
2406         *y = 0;
2407
2408         _intel_adjust_tile_offset(x, y,
2409                                   fb, plane, DRM_MODE_ROTATE_0,
2410                                   fb->offsets[plane], 0);
2411
2412         return 0;
2413 }
2414
2415 static unsigned int intel_fb_modifier_to_tiling(uint64_t fb_modifier)
2416 {
2417         switch (fb_modifier) {
2418         case I915_FORMAT_MOD_X_TILED:
2419                 return I915_TILING_X;
2420         case I915_FORMAT_MOD_Y_TILED:
2421         case I915_FORMAT_MOD_Y_TILED_CCS:
2422                 return I915_TILING_Y;
2423         default:
2424                 return I915_TILING_NONE;
2425         }
2426 }
2427
2428 /*
2429  * From the Sky Lake PRM:
2430  * "The Color Control Surface (CCS) contains the compression status of
2431  *  the cache-line pairs. The compression state of the cache-line pair
2432  *  is specified by 2 bits in the CCS. Each CCS cache-line represents
2433  *  an area on the main surface of 16 x16 sets of 128 byte Y-tiled
2434  *  cache-line-pairs. CCS is always Y tiled."
2435  *
2436  * Since cache line pairs refers to horizontally adjacent cache lines,
2437  * each cache line in the CCS corresponds to an area of 32x16 cache
2438  * lines on the main surface. Since each pixel is 4 bytes, this gives
2439  * us a ratio of one byte in the CCS for each 8x16 pixels in the
2440  * main surface.
2441  */
2442 static const struct drm_format_info ccs_formats[] = {
2443         { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2444         { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2445         { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2446         { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2447 };
2448
2449 static const struct drm_format_info *
2450 lookup_format_info(const struct drm_format_info formats[],
2451                    int num_formats, u32 format)
2452 {
2453         int i;
2454
2455         for (i = 0; i < num_formats; i++) {
2456                 if (formats[i].format == format)
2457                         return &formats[i];
2458         }
2459
2460         return NULL;
2461 }
2462
2463 static const struct drm_format_info *
2464 intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
2465 {
2466         switch (cmd->modifier[0]) {
2467         case I915_FORMAT_MOD_Y_TILED_CCS:
2468         case I915_FORMAT_MOD_Yf_TILED_CCS:
2469                 return lookup_format_info(ccs_formats,
2470                                           ARRAY_SIZE(ccs_formats),
2471                                           cmd->pixel_format);
2472         default:
2473                 return NULL;
2474         }
2475 }
2476
2477 bool is_ccs_modifier(u64 modifier)
2478 {
2479         return modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
2480                modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
2481 }
2482
2483 static int
2484 intel_fill_fb_info(struct drm_i915_private *dev_priv,
2485                    struct drm_framebuffer *fb)
2486 {
2487         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2488         struct intel_rotation_info *rot_info = &intel_fb->rot_info;
2489         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2490         u32 gtt_offset_rotated = 0;
2491         unsigned int max_size = 0;
2492         int i, num_planes = fb->format->num_planes;
2493         unsigned int tile_size = intel_tile_size(dev_priv);
2494
2495         for (i = 0; i < num_planes; i++) {
2496                 unsigned int width, height;
2497                 unsigned int cpp, size;
2498                 u32 offset;
2499                 int x, y;
2500                 int ret;
2501
2502                 cpp = fb->format->cpp[i];
2503                 width = drm_framebuffer_plane_width(fb->width, fb, i);
2504                 height = drm_framebuffer_plane_height(fb->height, fb, i);
2505
2506                 ret = intel_fb_offset_to_xy(&x, &y, fb, i);
2507                 if (ret) {
2508                         DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
2509                                       i, fb->offsets[i]);
2510                         return ret;
2511                 }
2512
2513                 if (is_ccs_modifier(fb->modifier) && i == 1) {
2514                         int hsub = fb->format->hsub;
2515                         int vsub = fb->format->vsub;
2516                         int tile_width, tile_height;
2517                         int main_x, main_y;
2518                         int ccs_x, ccs_y;
2519
2520                         intel_tile_dims(fb, i, &tile_width, &tile_height);
2521                         tile_width *= hsub;
2522                         tile_height *= vsub;
2523
2524                         ccs_x = (x * hsub) % tile_width;
2525                         ccs_y = (y * vsub) % tile_height;
2526                         main_x = intel_fb->normal[0].x % tile_width;
2527                         main_y = intel_fb->normal[0].y % tile_height;
2528
2529                         /*
2530                          * CCS doesn't have its own x/y offset register, so the intra CCS tile
2531                          * x/y offsets must match between CCS and the main surface.
2532                          */
2533                         if (main_x != ccs_x || main_y != ccs_y) {
2534                                 DRM_DEBUG_KMS("Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
2535                                               main_x, main_y,
2536                                               ccs_x, ccs_y,
2537                                               intel_fb->normal[0].x,
2538                                               intel_fb->normal[0].y,
2539                                               x, y);
2540                                 return -EINVAL;
2541                         }
2542                 }
2543
2544                 /*
2545                  * The fence (if used) is aligned to the start of the object
2546                  * so having the framebuffer wrap around across the edge of the
2547                  * fenced region doesn't really work. We have no API to configure
2548                  * the fence start offset within the object (nor could we probably
2549                  * on gen2/3). So it's just easier if we just require that the
2550                  * fb layout agrees with the fence layout. We already check that the
2551                  * fb stride matches the fence stride elsewhere.
2552                  */
2553                 if (i == 0 && i915_gem_object_is_tiled(obj) &&
2554                     (x + width) * cpp > fb->pitches[i]) {
2555                         DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
2556                                       i, fb->offsets[i]);
2557                         return -EINVAL;
2558                 }
2559
2560                 /*
2561                  * First pixel of the framebuffer from
2562                  * the start of the normal gtt mapping.
2563                  */
2564                 intel_fb->normal[i].x = x;
2565                 intel_fb->normal[i].y = y;
2566
2567                 offset = _intel_compute_tile_offset(dev_priv, &x, &y,
2568                                                     fb, i, fb->pitches[i],
2569                                                     DRM_MODE_ROTATE_0, tile_size);
2570                 offset /= tile_size;
2571
2572                 if (fb->modifier != DRM_FORMAT_MOD_LINEAR) {
2573                         unsigned int tile_width, tile_height;
2574                         unsigned int pitch_tiles;
2575                         struct drm_rect r;
2576
2577                         intel_tile_dims(fb, i, &tile_width, &tile_height);
2578
2579                         rot_info->plane[i].offset = offset;
2580                         rot_info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], tile_width * cpp);
2581                         rot_info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
2582                         rot_info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
2583
2584                         intel_fb->rotated[i].pitch =
2585                                 rot_info->plane[i].height * tile_height;
2586
2587                         /* how many tiles does this plane need */
2588                         size = rot_info->plane[i].stride * rot_info->plane[i].height;
2589                         /*
2590                          * If the plane isn't horizontally tile aligned,
2591                          * we need one more tile.
2592                          */
2593                         if (x != 0)
2594                                 size++;
2595
2596                         /* rotate the x/y offsets to match the GTT view */
2597                         r.x1 = x;
2598                         r.y1 = y;
2599                         r.x2 = x + width;
2600                         r.y2 = y + height;
2601                         drm_rect_rotate(&r,
2602                                         rot_info->plane[i].width * tile_width,
2603                                         rot_info->plane[i].height * tile_height,
2604                                         DRM_MODE_ROTATE_270);
2605                         x = r.x1;
2606                         y = r.y1;
2607
2608                         /* rotate the tile dimensions to match the GTT view */
2609                         pitch_tiles = intel_fb->rotated[i].pitch / tile_height;
2610                         swap(tile_width, tile_height);
2611
2612                         /*
2613                          * We only keep the x/y offsets, so push all of the
2614                          * gtt offset into the x/y offsets.
2615                          */
2616                         __intel_adjust_tile_offset(&x, &y,
2617                                                    tile_width, tile_height,
2618                                                    tile_size, pitch_tiles,
2619                                                    gtt_offset_rotated * tile_size, 0);
2620
2621                         gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height;
2622
2623                         /*
2624                          * First pixel of the framebuffer from
2625                          * the start of the rotated gtt mapping.
2626                          */
2627                         intel_fb->rotated[i].x = x;
2628                         intel_fb->rotated[i].y = y;
2629                 } else {
2630                         size = DIV_ROUND_UP((y + height) * fb->pitches[i] +
2631                                             x * cpp, tile_size);
2632                 }
2633
2634                 /* how many tiles in total needed in the bo */
2635                 max_size = max(max_size, offset + size);
2636         }
2637
2638         if (max_size * tile_size > obj->base.size) {
2639                 DRM_DEBUG_KMS("fb too big for bo (need %u bytes, have %zu bytes)\n",
2640                               max_size * tile_size, obj->base.size);
2641                 return -EINVAL;
2642         }
2643
2644         return 0;
2645 }
2646
2647 static int i9xx_format_to_fourcc(int format)
2648 {
2649         switch (format) {
2650         case DISPPLANE_8BPP:
2651                 return DRM_FORMAT_C8;
2652         case DISPPLANE_BGRX555:
2653                 return DRM_FORMAT_XRGB1555;
2654         case DISPPLANE_BGRX565:
2655                 return DRM_FORMAT_RGB565;
2656         default:
2657         case DISPPLANE_BGRX888:
2658                 return DRM_FORMAT_XRGB8888;
2659         case DISPPLANE_RGBX888:
2660                 return DRM_FORMAT_XBGR8888;
2661         case DISPPLANE_BGRX101010:
2662                 return DRM_FORMAT_XRGB2101010;
2663         case DISPPLANE_RGBX101010:
2664                 return DRM_FORMAT_XBGR2101010;
2665         }
2666 }
2667
2668 int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
2669 {
2670         switch (format) {
2671         case PLANE_CTL_FORMAT_RGB_565:
2672                 return DRM_FORMAT_RGB565;
2673         case PLANE_CTL_FORMAT_NV12:
2674                 return DRM_FORMAT_NV12;
2675         default:
2676         case PLANE_CTL_FORMAT_XRGB_8888:
2677                 if (rgb_order) {
2678                         if (alpha)
2679                                 return DRM_FORMAT_ABGR8888;
2680                         else
2681                                 return DRM_FORMAT_XBGR8888;
2682                 } else {
2683                         if (alpha)
2684                                 return DRM_FORMAT_ARGB8888;
2685                         else
2686                                 return DRM_FORMAT_XRGB8888;
2687                 }
2688         case PLANE_CTL_FORMAT_XRGB_2101010:
2689                 if (rgb_order)
2690                         return DRM_FORMAT_XBGR2101010;
2691                 else
2692                         return DRM_FORMAT_XRGB2101010;
2693         }
2694 }
2695
2696 static bool
2697 intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
2698                               struct intel_initial_plane_config *plane_config)
2699 {
2700         struct drm_device *dev = crtc->base.dev;
2701         struct drm_i915_private *dev_priv = to_i915(dev);
2702         struct drm_i915_gem_object *obj = NULL;
2703         struct drm_mode_fb_cmd2 mode_cmd = { 0 };
2704         struct drm_framebuffer *fb = &plane_config->fb->base;
2705         u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
2706         u32 size_aligned = round_up(plane_config->base + plane_config->size,
2707                                     PAGE_SIZE);
2708
2709         size_aligned -= base_aligned;
2710
2711         if (plane_config->size == 0)
2712                 return false;
2713
2714         /* If the FB is too big, just don't use it since fbdev is not very
2715          * important and we should probably use that space with FBC or other
2716          * features. */
2717         if (size_aligned * 2 > dev_priv->stolen_usable_size)
2718                 return false;
2719
2720         mutex_lock(&dev->struct_mutex);
2721         obj = i915_gem_object_create_stolen_for_preallocated(dev_priv,
2722                                                              base_aligned,
2723                                                              base_aligned,
2724                                                              size_aligned);
2725         mutex_unlock(&dev->struct_mutex);
2726         if (!obj)
2727                 return false;
2728
2729         if (plane_config->tiling == I915_TILING_X)
2730                 obj->tiling_and_stride = fb->pitches[0] | I915_TILING_X;
2731
2732         mode_cmd.pixel_format = fb->format->format;
2733         mode_cmd.width = fb->width;
2734         mode_cmd.height = fb->height;
2735         mode_cmd.pitches[0] = fb->pitches[0];
2736         mode_cmd.modifier[0] = fb->modifier;
2737         mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
2738
2739         if (intel_framebuffer_init(to_intel_framebuffer(fb), obj, &mode_cmd)) {
2740                 DRM_DEBUG_KMS("intel fb init failed\n");
2741                 goto out_unref_obj;
2742         }
2743
2744
2745         DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
2746         return true;
2747
2748 out_unref_obj:
2749         i915_gem_object_put(obj);
2750         return false;
2751 }
2752
2753 static void
2754 intel_set_plane_visible(struct intel_crtc_state *crtc_state,
2755                         struct intel_plane_state *plane_state,
2756                         bool visible)
2757 {
2758         struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
2759
2760         plane_state->base.visible = visible;
2761
2762         /* FIXME pre-g4x don't work like this */
2763         if (visible) {
2764                 crtc_state->base.plane_mask |= drm_plane_mask(&plane->base);
2765                 crtc_state->active_planes |= BIT(plane->id);
2766         } else {
2767                 crtc_state->base.plane_mask &= ~drm_plane_mask(&plane->base);
2768                 crtc_state->active_planes &= ~BIT(plane->id);
2769         }
2770
2771         DRM_DEBUG_KMS("%s active planes 0x%x\n",
2772                       crtc_state->base.crtc->name,
2773                       crtc_state->active_planes);
2774 }
2775
2776 static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
2777                                          struct intel_plane *plane)
2778 {
2779         struct intel_crtc_state *crtc_state =
2780                 to_intel_crtc_state(crtc->base.state);
2781         struct intel_plane_state *plane_state =
2782                 to_intel_plane_state(plane->base.state);
2783
2784         intel_set_plane_visible(crtc_state, plane_state, false);
2785
2786         if (plane->id == PLANE_PRIMARY)
2787                 intel_pre_disable_primary_noatomic(&crtc->base);
2788
2789         trace_intel_disable_plane(&plane->base, crtc);
2790         plane->disable_plane(plane, crtc);
2791 }
2792
2793 static void
2794 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
2795                              struct intel_initial_plane_config *plane_config)
2796 {
2797         struct drm_device *dev = intel_crtc->base.dev;
2798         struct drm_i915_private *dev_priv = to_i915(dev);
2799         struct drm_crtc *c;
2800         struct drm_i915_gem_object *obj;
2801         struct drm_plane *primary = intel_crtc->base.primary;
2802         struct drm_plane_state *plane_state = primary->state;
2803         struct drm_crtc_state *crtc_state = intel_crtc->base.state;
2804         struct intel_plane *intel_plane = to_intel_plane(primary);
2805         struct intel_plane_state *intel_state =
2806                 to_intel_plane_state(plane_state);
2807         struct drm_framebuffer *fb;
2808
2809         if (!plane_config->fb)
2810                 return;
2811
2812         if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
2813                 fb = &plane_config->fb->base;
2814                 goto valid_fb;
2815         }
2816
2817         kfree(plane_config->fb);
2818
2819         /*
2820          * Failed to alloc the obj, check to see if we should share
2821          * an fb with another CRTC instead
2822          */
2823         for_each_crtc(dev, c) {
2824                 struct intel_plane_state *state;
2825
2826                 if (c == &intel_crtc->base)
2827                         continue;
2828
2829                 if (!to_intel_crtc(c)->active)
2830                         continue;
2831
2832                 state = to_intel_plane_state(c->primary->state);
2833                 if (!state->vma)
2834                         continue;
2835
2836                 if (intel_plane_ggtt_offset(state) == plane_config->base) {
2837                         fb = state->base.fb;
2838                         drm_framebuffer_get(fb);
2839                         goto valid_fb;
2840                 }
2841         }
2842
2843         /*
2844          * We've failed to reconstruct the BIOS FB.  Current display state
2845          * indicates that the primary plane is visible, but has a NULL FB,
2846          * which will lead to problems later if we don't fix it up.  The
2847          * simplest solution is to just disable the primary plane now and
2848          * pretend the BIOS never had it enabled.
2849          */
2850         intel_plane_disable_noatomic(intel_crtc, intel_plane);
2851
2852         return;
2853
2854 valid_fb:
2855         mutex_lock(&dev->struct_mutex);
2856         intel_state->vma =
2857                 intel_pin_and_fence_fb_obj(fb,
2858                                            primary->state->rotation,
2859                                            intel_plane_uses_fence(intel_state),
2860                                            &intel_state->flags);
2861         mutex_unlock(&dev->struct_mutex);
2862         if (IS_ERR(intel_state->vma)) {
2863                 DRM_ERROR("failed to pin boot fb on pipe %d: %li\n",
2864                           intel_crtc->pipe, PTR_ERR(intel_state->vma));
2865
2866                 intel_state->vma = NULL;
2867                 drm_framebuffer_put(fb);
2868                 return;
2869         }
2870
2871         obj = intel_fb_obj(fb);
2872         intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
2873
2874         plane_state->src_x = 0;
2875         plane_state->src_y = 0;
2876         plane_state->src_w = fb->width << 16;
2877         plane_state->src_h = fb->height << 16;
2878
2879         plane_state->crtc_x = 0;
2880         plane_state->crtc_y = 0;
2881         plane_state->crtc_w = fb->width;
2882         plane_state->crtc_h = fb->height;
2883
2884         intel_state->base.src = drm_plane_state_src(plane_state);
2885         intel_state->base.dst = drm_plane_state_dest(plane_state);
2886
2887         if (i915_gem_object_is_tiled(obj))
2888                 dev_priv->preserve_bios_swizzle = true;
2889
2890         plane_state->fb = fb;
2891         plane_state->crtc = &intel_crtc->base;
2892
2893         intel_set_plane_visible(to_intel_crtc_state(crtc_state),
2894                                 to_intel_plane_state(plane_state),
2895                                 true);
2896
2897         atomic_or(to_intel_plane(primary)->frontbuffer_bit,
2898                   &obj->frontbuffer_bits);
2899 }
2900
2901 static int skl_max_plane_width(const struct drm_framebuffer *fb, int plane,
2902                                unsigned int rotation)
2903 {
2904         int cpp = fb->format->cpp[plane];
2905
2906         switch (fb->modifier) {
2907         case DRM_FORMAT_MOD_LINEAR:
2908         case I915_FORMAT_MOD_X_TILED:
2909                 switch (cpp) {
2910                 case 8:
2911                         return 4096;
2912                 case 4:
2913                 case 2:
2914                 case 1:
2915                         return 8192;
2916                 default:
2917                         MISSING_CASE(cpp);
2918                         break;
2919                 }
2920                 break;
2921         case I915_FORMAT_MOD_Y_TILED_CCS:
2922         case I915_FORMAT_MOD_Yf_TILED_CCS:
2923                 /* FIXME AUX plane? */
2924         case I915_FORMAT_MOD_Y_TILED:
2925         case I915_FORMAT_MOD_Yf_TILED:
2926                 switch (cpp) {
2927                 case 8:
2928                         return 2048;
2929                 case 4:
2930                         return 4096;
2931                 case 2:
2932                 case 1:
2933                         return 8192;
2934                 default:
2935                         MISSING_CASE(cpp);
2936                         break;
2937                 }
2938                 break;
2939         default:
2940                 MISSING_CASE(fb->modifier);
2941         }
2942
2943         return 2048;
2944 }
2945
2946 static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state,
2947                                            int main_x, int main_y, u32 main_offset)
2948 {
2949         const struct drm_framebuffer *fb = plane_state->base.fb;
2950         int hsub = fb->format->hsub;
2951         int vsub = fb->format->vsub;
2952         int aux_x = plane_state->aux.x;
2953         int aux_y = plane_state->aux.y;
2954         u32 aux_offset = plane_state->aux.offset;
2955         u32 alignment = intel_surf_alignment(fb, 1);
2956
2957         while (aux_offset >= main_offset && aux_y <= main_y) {
2958                 int x, y;
2959
2960                 if (aux_x == main_x && aux_y == main_y)
2961                         break;
2962
2963                 if (aux_offset == 0)
2964                         break;
2965
2966                 x = aux_x / hsub;
2967                 y = aux_y / vsub;
2968                 aux_offset = intel_adjust_tile_offset(&x, &y, plane_state, 1,
2969                                                       aux_offset, aux_offset - alignment);
2970                 aux_x = x * hsub + aux_x % hsub;
2971                 aux_y = y * vsub + aux_y % vsub;
2972         }
2973
2974         if (aux_x != main_x || aux_y != main_y)
2975                 return false;
2976
2977         plane_state->aux.offset = aux_offset;
2978         plane_state->aux.x = aux_x;
2979         plane_state->aux.y = aux_y;
2980
2981         return true;
2982 }
2983
2984 static int skl_check_main_surface(const struct intel_crtc_state *crtc_state,
2985                                   struct intel_plane_state *plane_state)
2986 {
2987         struct drm_i915_private *dev_priv =
2988                 to_i915(plane_state->base.plane->dev);
2989         const struct drm_framebuffer *fb = plane_state->base.fb;
2990         unsigned int rotation = plane_state->base.rotation;
2991         int x = plane_state->base.src.x1 >> 16;
2992         int y = plane_state->base.src.y1 >> 16;
2993         int w = drm_rect_width(&plane_state->base.src) >> 16;
2994         int h = drm_rect_height(&plane_state->base.src) >> 16;
2995         int dst_x = plane_state->base.dst.x1;
2996         int dst_w = drm_rect_width(&plane_state->base.dst);
2997         int pipe_src_w = crtc_state->pipe_src_w;
2998         int max_width = skl_max_plane_width(fb, 0, rotation);
2999         int max_height = 4096;
3000         u32 alignment, offset, aux_offset = plane_state->aux.offset;
3001
3002         if (w > max_width || h > max_height) {
3003                 DRM_DEBUG_KMS("requested Y/RGB source size %dx%d too big (limit %dx%d)\n",
3004                               w, h, max_width, max_height);
3005                 return -EINVAL;
3006         }
3007
3008         /*
3009          * Display WA #1175: cnl,glk
3010          * Planes other than the cursor may cause FIFO underflow and display
3011          * corruption if starting less than 4 pixels from the right edge of
3012          * the screen.
3013          * Besides the above WA fix the similar problem, where planes other
3014          * than the cursor ending less than 4 pixels from the left edge of the
3015          * screen may cause FIFO underflow and display corruption.
3016          */
3017         if ((IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
3018             (dst_x + dst_w < 4 || dst_x > pipe_src_w - 4)) {
3019                 DRM_DEBUG_KMS("requested plane X %s position %d invalid (valid range %d-%d)\n",
3020                               dst_x + dst_w < 4 ? "end" : "start",
3021                               dst_x + dst_w < 4 ? dst_x + dst_w : dst_x,
3022                               4, pipe_src_w - 4);
3023                 return -ERANGE;
3024         }
3025
3026         intel_add_fb_offsets(&x, &y, plane_state, 0);
3027         offset = intel_compute_tile_offset(&x, &y, plane_state, 0);
3028         alignment = intel_surf_alignment(fb, 0);
3029
3030         /*
3031          * AUX surface offset is specified as the distance from the
3032          * main surface offset, and it must be non-negative. Make
3033          * sure that is what we will get.
3034          */
3035         if (offset > aux_offset)
3036                 offset = intel_adjust_tile_offset(&x, &y, plane_state, 0,
3037                                                   offset, aux_offset & ~(alignment - 1));
3038
3039         /*
3040          * When using an X-tiled surface, the plane blows up
3041          * if the x offset + width exceed the stride.
3042          *
3043          * TODO: linear and Y-tiled seem fine, Yf untested,
3044          */
3045         if (fb->modifier == I915_FORMAT_MOD_X_TILED) {
3046                 int cpp = fb->format->cpp[0];
3047
3048                 while ((x + w) * cpp > fb->pitches[0]) {
3049                         if (offset == 0) {
3050                                 DRM_DEBUG_KMS("Unable to find suitable display surface offset due to X-tiling\n");
3051                                 return -EINVAL;
3052                         }
3053
3054                         offset = intel_adjust_tile_offset(&x, &y, plane_state, 0,
3055                                                           offset, offset - alignment);
3056                 }
3057         }
3058
3059         /*
3060          * CCS AUX surface doesn't have its own x/y offsets, we must make sure
3061          * they match with the main surface x/y offsets.
3062          */
3063         if (is_ccs_modifier(fb->modifier)) {
3064                 while (!skl_check_main_ccs_coordinates(plane_state, x, y, offset)) {
3065                         if (offset == 0)
3066                                 break;
3067
3068                         offset = intel_adjust_tile_offset(&x, &y, plane_state, 0,
3069                                                           offset, offset - alignment);
3070                 }
3071
3072                 if (x != plane_state->aux.x || y != plane_state->aux.y) {
3073                         DRM_DEBUG_KMS("Unable to find suitable display surface offset due to CCS\n");
3074                         return -EINVAL;
3075                 }
3076         }
3077
3078         plane_state->main.offset = offset;
3079         plane_state->main.x = x;
3080         plane_state->main.y = y;
3081
3082         return 0;
3083 }
3084
3085 static int
3086 skl_check_nv12_surface(const struct intel_crtc_state *crtc_state,
3087                        struct intel_plane_state *plane_state)
3088 {
3089         /* Display WA #1106 */
3090         if (plane_state->base.rotation !=
3091             (DRM_MODE_REFLECT_X | DRM_MODE_ROTATE_90) &&
3092             plane_state->base.rotation != DRM_MODE_ROTATE_270)
3093                 return 0;
3094
3095         /*
3096          * src coordinates are rotated here.
3097          * We check height but report it as width
3098          */
3099         if (((drm_rect_height(&plane_state->base.src) >> 16) % 4) != 0) {
3100                 DRM_DEBUG_KMS("src width must be multiple "
3101                               "of 4 for rotated NV12\n");
3102                 return -EINVAL;
3103         }
3104
3105         return 0;
3106 }
3107
3108 static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
3109 {
3110         const struct drm_framebuffer *fb = plane_state->base.fb;
3111         unsigned int rotation = plane_state->base.rotation;
3112         int max_width = skl_max_plane_width(fb, 1, rotation);
3113         int max_height = 4096;
3114         int x = plane_state->base.src.x1 >> 17;
3115         int y = plane_state->base.src.y1 >> 17;
3116         int w = drm_rect_width(&plane_state->base.src) >> 17;
3117         int h = drm_rect_height(&plane_state->base.src) >> 17;
3118         u32 offset;
3119
3120         intel_add_fb_offsets(&x, &y, plane_state, 1);
3121         offset = intel_compute_tile_offset(&x, &y, plane_state, 1);
3122
3123         /* FIXME not quite sure how/if these apply to the chroma plane */
3124         if (w > max_width || h > max_height) {
3125                 DRM_DEBUG_KMS("CbCr source size %dx%d too big (limit %dx%d)\n",
3126                               w, h, max_width, max_height);
3127                 return -EINVAL;
3128         }
3129
3130         plane_state->aux.offset = offset;
3131         plane_state->aux.x = x;
3132         plane_state->aux.y = y;
3133
3134         return 0;
3135 }
3136
3137 static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
3138 {
3139         const struct drm_framebuffer *fb = plane_state->base.fb;
3140         int src_x = plane_state->base.src.x1 >> 16;
3141         int src_y = plane_state->base.src.y1 >> 16;
3142         int hsub = fb->format->hsub;
3143         int vsub = fb->format->vsub;
3144         int x = src_x / hsub;
3145         int y = src_y / vsub;
3146         u32 offset;
3147
3148         if (plane_state->base.rotation & ~(DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180)) {
3149                 DRM_DEBUG_KMS("RC support only with 0/180 degree rotation %x\n",
3150                               plane_state->base.rotation);
3151                 return -EINVAL;
3152         }
3153
3154         intel_add_fb_offsets(&x, &y, plane_state, 1);
3155         offset = intel_compute_tile_offset(&x, &y, plane_state, 1);
3156
3157         plane_state->aux.offset = offset;
3158         plane_state->aux.x = x * hsub + src_x % hsub;
3159         plane_state->aux.y = y * vsub + src_y % vsub;
3160
3161         return 0;
3162 }
3163
3164 int skl_check_plane_surface(const struct intel_crtc_state *crtc_state,
3165                             struct intel_plane_state *plane_state)
3166 {
3167         const struct drm_framebuffer *fb = plane_state->base.fb;
3168         unsigned int rotation = plane_state->base.rotation;
3169         int ret;
3170
3171         if (rotation & DRM_MODE_REFLECT_X &&
3172             fb->modifier == DRM_FORMAT_MOD_LINEAR) {
3173                 DRM_DEBUG_KMS("horizontal flip is not supported with linear surface formats\n");
3174                 return -EINVAL;
3175         }
3176
3177         if (!plane_state->base.visible)
3178                 return 0;
3179
3180         /* Rotate src coordinates to match rotated GTT view */
3181         if (drm_rotation_90_or_270(rotation))
3182                 drm_rect_rotate(&plane_state->base.src,
3183                                 fb->width << 16, fb->height << 16,
3184                                 DRM_MODE_ROTATE_270);
3185
3186         /*
3187          * Handle the AUX surface first since
3188          * the main surface setup depends on it.
3189          */
3190         if (fb->format->format == DRM_FORMAT_NV12) {
3191                 ret = skl_check_nv12_surface(crtc_state, plane_state);
3192                 if (ret)
3193                         return ret;
3194                 ret = skl_check_nv12_aux_surface(plane_state);
3195                 if (ret)
3196                         return ret;
3197         } else if (is_ccs_modifier(fb->modifier)) {
3198                 ret = skl_check_ccs_aux_surface(plane_state);
3199                 if (ret)
3200                         return ret;
3201         } else {
3202                 plane_state->aux.offset = ~0xfff;
3203                 plane_state->aux.x = 0;
3204                 plane_state->aux.y = 0;
3205         }
3206
3207         ret = skl_check_main_surface(crtc_state, plane_state);
3208         if (ret)
3209                 return ret;
3210
3211         return 0;
3212 }
3213
3214 static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
3215                           const struct intel_plane_state *plane_state)
3216 {
3217         struct drm_i915_private *dev_priv =
3218                 to_i915(plane_state->base.plane->dev);
3219         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
3220         const struct drm_framebuffer *fb = plane_state->base.fb;
3221         unsigned int rotation = plane_state->base.rotation;
3222         u32 dspcntr;
3223
3224         dspcntr = DISPLAY_PLANE_ENABLE | DISPPLANE_GAMMA_ENABLE;
3225
3226         if (IS_G4X(dev_priv) || IS_GEN5(dev_priv) ||
3227             IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv))
3228                 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
3229
3230         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
3231                 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
3232
3233         if (INTEL_GEN(dev_priv) < 5)
3234                 dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe);
3235
3236         switch (fb->format->format) {
3237         case DRM_FORMAT_C8:
3238                 dspcntr |= DISPPLANE_8BPP;
3239                 break;
3240         case DRM_FORMAT_XRGB1555:
3241                 dspcntr |= DISPPLANE_BGRX555;
3242                 break;
3243         case DRM_FORMAT_RGB565:
3244                 dspcntr |= DISPPLANE_BGRX565;
3245                 break;
3246         case DRM_FORMAT_XRGB8888:
3247                 dspcntr |= DISPPLANE_BGRX888;
3248                 break;
3249         case DRM_FORMAT_XBGR8888:
3250                 dspcntr |= DISPPLANE_RGBX888;
3251                 break;
3252         case DRM_FORMAT_XRGB2101010:
3253                 dspcntr |= DISPPLANE_BGRX101010;
3254                 break;
3255         case DRM_FORMAT_XBGR2101010:
3256                 dspcntr |= DISPPLANE_RGBX101010;
3257                 break;
3258         default:
3259                 MISSING_CASE(fb->format->format);
3260                 return 0;
3261         }
3262
3263         if (INTEL_GEN(dev_priv) >= 4 &&
3264             fb->modifier == I915_FORMAT_MOD_X_TILED)
3265                 dspcntr |= DISPPLANE_TILED;
3266
3267         if (rotation & DRM_MODE_ROTATE_180)
3268                 dspcntr |= DISPPLANE_ROTATE_180;
3269
3270         if (rotation & DRM_MODE_REFLECT_X)
3271                 dspcntr |= DISPPLANE_MIRROR;
3272
3273         return dspcntr;
3274 }
3275
3276 int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
3277 {
3278         struct drm_i915_private *dev_priv =
3279                 to_i915(plane_state->base.plane->dev);
3280         int src_x = plane_state->base.src.x1 >> 16;
3281         int src_y = plane_state->base.src.y1 >> 16;
3282         u32 offset;
3283
3284         intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
3285
3286         if (INTEL_GEN(dev_priv) >= 4)
3287                 offset = intel_compute_tile_offset(&src_x, &src_y,
3288                                                    plane_state, 0);
3289         else
3290                 offset = 0;
3291
3292         /* HSW/BDW do this automagically in hardware */
3293         if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
3294                 unsigned int rotation = plane_state->base.rotation;
3295                 int src_w = drm_rect_width(&plane_state->base.src) >> 16;
3296                 int src_h = drm_rect_height(&plane_state->base.src) >> 16;
3297
3298                 if (rotation & DRM_MODE_ROTATE_180) {
3299                         src_x += src_w - 1;
3300                         src_y += src_h - 1;
3301                 } else if (rotation & DRM_MODE_REFLECT_X) {
3302                         src_x += src_w - 1;
3303                 }
3304         }
3305
3306         plane_state->main.offset = offset;
3307         plane_state->main.x = src_x;
3308         plane_state->main.y = src_y;
3309
3310         return 0;
3311 }
3312
3313 static void i9xx_update_plane(struct intel_plane *plane,
3314                               const struct intel_crtc_state *crtc_state,
3315                               const struct intel_plane_state *plane_state)
3316 {
3317         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3318         const struct drm_framebuffer *fb = plane_state->base.fb;
3319         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3320         u32 linear_offset;
3321         u32 dspcntr = plane_state->ctl;
3322         i915_reg_t reg = DSPCNTR(i9xx_plane);
3323         int x = plane_state->main.x;
3324         int y = plane_state->main.y;
3325         unsigned long irqflags;
3326         u32 dspaddr_offset;
3327
3328         linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
3329
3330         if (INTEL_GEN(dev_priv) >= 4)
3331                 dspaddr_offset = plane_state->main.offset;
3332         else
3333                 dspaddr_offset = linear_offset;
3334
3335         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
3336
3337         if (INTEL_GEN(dev_priv) < 4) {
3338                 /* pipesrc and dspsize control the size that is scaled from,
3339                  * which should always be the user's requested size.
3340                  */
3341                 I915_WRITE_FW(DSPSIZE(i9xx_plane),
3342                               ((crtc_state->pipe_src_h - 1) << 16) |
3343                               (crtc_state->pipe_src_w - 1));
3344                 I915_WRITE_FW(DSPPOS(i9xx_plane), 0);
3345         } else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
3346                 I915_WRITE_FW(PRIMSIZE(i9xx_plane),
3347                               ((crtc_state->pipe_src_h - 1) << 16) |
3348                               (crtc_state->pipe_src_w - 1));
3349                 I915_WRITE_FW(PRIMPOS(i9xx_plane), 0);
3350                 I915_WRITE_FW(PRIMCNSTALPHA(i9xx_plane), 0);
3351         }
3352
3353         I915_WRITE_FW(reg, dspcntr);
3354
3355         I915_WRITE_FW(DSPSTRIDE(i9xx_plane), fb->pitches[0]);
3356         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
3357                 I915_WRITE_FW(DSPSURF(i9xx_plane),
3358                               intel_plane_ggtt_offset(plane_state) +
3359                               dspaddr_offset);
3360                 I915_WRITE_FW(DSPOFFSET(i9xx_plane), (y << 16) | x);
3361         } else if (INTEL_GEN(dev_priv) >= 4) {
3362                 I915_WRITE_FW(DSPSURF(i9xx_plane),
3363                               intel_plane_ggtt_offset(plane_state) +
3364                               dspaddr_offset);
3365                 I915_WRITE_FW(DSPTILEOFF(i9xx_plane), (y << 16) | x);
3366                 I915_WRITE_FW(DSPLINOFF(i9xx_plane), linear_offset);
3367         } else {
3368                 I915_WRITE_FW(DSPADDR(i9xx_plane),
3369                               intel_plane_ggtt_offset(plane_state) +
3370                               dspaddr_offset);
3371         }
3372         POSTING_READ_FW(reg);
3373
3374         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
3375 }
3376
3377 static void i9xx_disable_plane(struct intel_plane *plane,
3378                                struct intel_crtc *crtc)
3379 {
3380         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3381         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3382         unsigned long irqflags;
3383
3384         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
3385
3386         I915_WRITE_FW(DSPCNTR(i9xx_plane), 0);
3387         if (INTEL_GEN(dev_priv) >= 4)
3388                 I915_WRITE_FW(DSPSURF(i9xx_plane), 0);
3389         else
3390                 I915_WRITE_FW(DSPADDR(i9xx_plane), 0);
3391         POSTING_READ_FW(DSPCNTR(i9xx_plane));
3392
3393         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
3394 }
3395
3396 static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
3397                                     enum pipe *pipe)
3398 {
3399         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3400         enum intel_display_power_domain power_domain;
3401         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3402         bool ret;
3403         u32 val;
3404
3405         /*
3406          * Not 100% correct for planes that can move between pipes,
3407          * but that's only the case for gen2-4 which don't have any
3408          * display power wells.
3409          */
3410         power_domain = POWER_DOMAIN_PIPE(plane->pipe);
3411         if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
3412                 return false;
3413
3414         val = I915_READ(DSPCNTR(i9xx_plane));
3415
3416         ret = val & DISPLAY_PLANE_ENABLE;
3417
3418         if (INTEL_GEN(dev_priv) >= 5)
3419                 *pipe = plane->pipe;
3420         else
3421                 *pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
3422                         DISPPLANE_SEL_PIPE_SHIFT;
3423
3424         intel_display_power_put(dev_priv, power_domain);
3425
3426         return ret;
3427 }
3428
3429 static u32
3430 intel_fb_stride_alignment(const struct drm_framebuffer *fb, int plane)
3431 {
3432         if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
3433                 return 64;
3434         else
3435                 return intel_tile_width_bytes(fb, plane);
3436 }
3437
3438 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
3439 {
3440         struct drm_device *dev = intel_crtc->base.dev;
3441         struct drm_i915_private *dev_priv = to_i915(dev);
3442
3443         I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
3444         I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
3445         I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
3446 }
3447
3448 /*
3449  * This function detaches (aka. unbinds) unused scalers in hardware
3450  */
3451 static void skl_detach_scalers(struct intel_crtc *intel_crtc)
3452 {
3453         struct intel_crtc_scaler_state *scaler_state;
3454         int i;
3455
3456         scaler_state = &intel_crtc->config->scaler_state;
3457
3458         /* loop through and disable scalers that aren't in use */
3459         for (i = 0; i < intel_crtc->num_scalers; i++) {
3460                 if (!scaler_state->scalers[i].in_use)
3461                         skl_detach_scaler(intel_crtc, i);
3462         }
3463 }
3464
3465 u32 skl_plane_stride(const struct drm_framebuffer *fb, int plane,
3466                      unsigned int rotation)
3467 {
3468         u32 stride;
3469
3470         if (plane >= fb->format->num_planes)
3471                 return 0;
3472
3473         stride = intel_fb_pitch(fb, plane, rotation);
3474
3475         /*
3476          * The stride is either expressed as a multiple of 64 bytes chunks for
3477          * linear buffers or in number of tiles for tiled buffers.
3478          */
3479         if (drm_rotation_90_or_270(rotation))
3480                 stride /= intel_tile_height(fb, plane);
3481         else
3482                 stride /= intel_fb_stride_alignment(fb, plane);
3483
3484         return stride;
3485 }
3486
3487 static u32 skl_plane_ctl_format(uint32_t pixel_format)
3488 {
3489         switch (pixel_format) {
3490         case DRM_FORMAT_C8:
3491                 return PLANE_CTL_FORMAT_INDEXED;
3492         case DRM_FORMAT_RGB565:
3493                 return PLANE_CTL_FORMAT_RGB_565;
3494         case DRM_FORMAT_XBGR8888:
3495         case DRM_FORMAT_ABGR8888:
3496                 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
3497         case DRM_FORMAT_XRGB8888:
3498         case DRM_FORMAT_ARGB8888:
3499                 return PLANE_CTL_FORMAT_XRGB_8888;
3500         case DRM_FORMAT_XRGB2101010:
3501                 return PLANE_CTL_FORMAT_XRGB_2101010;
3502         case DRM_FORMAT_XBGR2101010:
3503                 return PLANE_CTL_ORDER_RGBX | PLANE_CTL_FORMAT_XRGB_2101010;
3504         case DRM_FORMAT_YUYV:
3505                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
3506         case DRM_FORMAT_YVYU:
3507                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
3508         case DRM_FORMAT_UYVY:
3509                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
3510         case DRM_FORMAT_VYUY:
3511                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
3512         case DRM_FORMAT_NV12:
3513                 return PLANE_CTL_FORMAT_NV12;
3514         default:
3515                 MISSING_CASE(pixel_format);
3516         }
3517
3518         return 0;
3519 }
3520
3521 /*
3522  * XXX: For ARBG/ABGR formats we default to expecting scanout buffers
3523  * to be already pre-multiplied. We need to add a knob (or a different
3524  * DRM_FORMAT) for user-space to configure that.
3525  */
3526 static u32 skl_plane_ctl_alpha(uint32_t pixel_format)
3527 {
3528         switch (pixel_format) {
3529         case DRM_FORMAT_ABGR8888:
3530         case DRM_FORMAT_ARGB8888:
3531                 return PLANE_CTL_ALPHA_SW_PREMULTIPLY;
3532         default:
3533                 return PLANE_CTL_ALPHA_DISABLE;
3534         }
3535 }
3536
3537 static u32 glk_plane_color_ctl_alpha(uint32_t pixel_format)
3538 {
3539         switch (pixel_format) {
3540         case DRM_FORMAT_ABGR8888:
3541         case DRM_FORMAT_ARGB8888:
3542                 return PLANE_COLOR_ALPHA_SW_PREMULTIPLY;
3543         default:
3544                 return PLANE_COLOR_ALPHA_DISABLE;
3545         }
3546 }
3547
3548 static u32 skl_plane_ctl_tiling(uint64_t fb_modifier)
3549 {
3550         switch (fb_modifier) {
3551         case DRM_FORMAT_MOD_LINEAR:
3552                 break;
3553         case I915_FORMAT_MOD_X_TILED:
3554                 return PLANE_CTL_TILED_X;
3555         case I915_FORMAT_MOD_Y_TILED:
3556                 return PLANE_CTL_TILED_Y;
3557         case I915_FORMAT_MOD_Y_TILED_CCS:
3558                 return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
3559         case I915_FORMAT_MOD_Yf_TILED:
3560                 return PLANE_CTL_TILED_YF;
3561         case I915_FORMAT_MOD_Yf_TILED_CCS:
3562                 return PLANE_CTL_TILED_YF | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
3563         default:
3564                 MISSING_CASE(fb_modifier);
3565         }
3566
3567         return 0;
3568 }
3569
3570 static u32 skl_plane_ctl_rotate(unsigned int rotate)
3571 {
3572         switch (rotate) {
3573         case DRM_MODE_ROTATE_0:
3574                 break;
3575         /*
3576          * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
3577          * while i915 HW rotation is clockwise, thats why this swapping.
3578          */
3579         case DRM_MODE_ROTATE_90:
3580                 return PLANE_CTL_ROTATE_270;
3581         case DRM_MODE_ROTATE_180:
3582                 return PLANE_CTL_ROTATE_180;
3583         case DRM_MODE_ROTATE_270:
3584                 return PLANE_CTL_ROTATE_90;
3585         default:
3586                 MISSING_CASE(rotate);
3587         }
3588
3589         return 0;
3590 }
3591
3592 static u32 cnl_plane_ctl_flip(unsigned int reflect)
3593 {
3594         switch (reflect) {
3595         case 0:
3596                 break;
3597         case DRM_MODE_REFLECT_X:
3598                 return PLANE_CTL_FLIP_HORIZONTAL;
3599         case DRM_MODE_REFLECT_Y:
3600         default:
3601                 MISSING_CASE(reflect);
3602         }
3603
3604         return 0;
3605 }
3606
3607 u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
3608                   const struct intel_plane_state *plane_state)
3609 {
3610         struct drm_i915_private *dev_priv =
3611                 to_i915(plane_state->base.plane->dev);
3612         const struct drm_framebuffer *fb = plane_state->base.fb;
3613         unsigned int rotation = plane_state->base.rotation;
3614         const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
3615         u32 plane_ctl;
3616
3617         plane_ctl = PLANE_CTL_ENABLE;
3618
3619         if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) {
3620                 plane_ctl |= skl_plane_ctl_alpha(fb->format->format);
3621                 plane_ctl |=
3622                         PLANE_CTL_PIPE_GAMMA_ENABLE |
3623                         PLANE_CTL_PIPE_CSC_ENABLE |
3624                         PLANE_CTL_PLANE_GAMMA_DISABLE;
3625
3626                 if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
3627                         plane_ctl |= PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709;
3628
3629                 if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
3630                         plane_ctl |= PLANE_CTL_YUV_RANGE_CORRECTION_DISABLE;
3631         }
3632
3633         plane_ctl |= skl_plane_ctl_format(fb->format->format);
3634         plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
3635         plane_ctl |= skl_plane_ctl_rotate(rotation & DRM_MODE_ROTATE_MASK);
3636
3637         if (INTEL_GEN(dev_priv) >= 10)
3638                 plane_ctl |= cnl_plane_ctl_flip(rotation &
3639                                                 DRM_MODE_REFLECT_MASK);
3640
3641         if (key->flags & I915_SET_COLORKEY_DESTINATION)
3642                 plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
3643         else if (key->flags & I915_SET_COLORKEY_SOURCE)
3644                 plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
3645
3646         return plane_ctl;
3647 }
3648
3649 u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
3650                         const struct intel_plane_state *plane_state)
3651 {
3652         struct drm_i915_private *dev_priv =
3653                 to_i915(plane_state->base.plane->dev);
3654         const struct drm_framebuffer *fb = plane_state->base.fb;
3655         u32 plane_color_ctl = 0;
3656
3657         if (INTEL_GEN(dev_priv) < 11) {
3658                 plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE;
3659                 plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
3660         }
3661         plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
3662         plane_color_ctl |= glk_plane_color_ctl_alpha(fb->format->format);
3663
3664         if (fb->format->is_yuv) {
3665                 if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
3666                         plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
3667                 else
3668                         plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV601_TO_RGB709;
3669
3670                 if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
3671                         plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
3672         }
3673
3674         return plane_color_ctl;
3675 }
3676
3677 static int
3678 __intel_display_resume(struct drm_device *dev,
3679                        struct drm_atomic_state *state,
3680                        struct drm_modeset_acquire_ctx *ctx)
3681 {
3682         struct drm_crtc_state *crtc_state;
3683         struct drm_crtc *crtc;
3684         int i, ret;
3685
3686         intel_modeset_setup_hw_state(dev, ctx);
3687         i915_redisable_vga(to_i915(dev));
3688
3689         if (!state)
3690                 return 0;
3691
3692         /*
3693          * We've duplicated the state, pointers to the old state are invalid.
3694          *
3695          * Don't attempt to use the old state until we commit the duplicated state.
3696          */
3697         for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
3698                 /*
3699                  * Force recalculation even if we restore
3700                  * current state. With fast modeset this may not result
3701                  * in a modeset when the state is compatible.
3702                  */
3703                 crtc_state->mode_changed = true;
3704         }
3705
3706         /* ignore any reset values/BIOS leftovers in the WM registers */
3707         if (!HAS_GMCH_DISPLAY(to_i915(dev)))
3708                 to_intel_atomic_state(state)->skip_intermediate_wm = true;
3709
3710         ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
3711
3712         WARN_ON(ret == -EDEADLK);
3713         return ret;
3714 }
3715
3716 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
3717 {
3718         return intel_has_gpu_reset(dev_priv) &&
3719                 INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv);
3720 }
3721
3722 void intel_prepare_reset(struct drm_i915_private *dev_priv)
3723 {
3724         struct drm_device *dev = &dev_priv->drm;
3725         struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
3726         struct drm_atomic_state *state;
3727         int ret;
3728
3729         /* reset doesn't touch the display */
3730         if (!i915_modparams.force_reset_modeset_test &&
3731             !gpu_reset_clobbers_display(dev_priv))
3732                 return;
3733
3734         /* We have a modeset vs reset deadlock, defensively unbreak it. */
3735         set_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags);
3736         wake_up_all(&dev_priv->gpu_error.wait_queue);
3737
3738         if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
3739                 DRM_DEBUG_KMS("Modeset potentially stuck, unbreaking through wedging\n");
3740                 i915_gem_set_wedged(dev_priv);
3741         }
3742
3743         /*
3744          * Need mode_config.mutex so that we don't
3745          * trample ongoing ->detect() and whatnot.
3746          */
3747         mutex_lock(&dev->mode_config.mutex);
3748         drm_modeset_acquire_init(ctx, 0);
3749         while (1) {
3750                 ret = drm_modeset_lock_all_ctx(dev, ctx);
3751                 if (ret != -EDEADLK)
3752                         break;
3753
3754                 drm_modeset_backoff(ctx);
3755         }
3756         /*
3757          * Disabling the crtcs gracefully seems nicer. Also the
3758          * g33 docs say we should at least disable all the planes.
3759          */
3760         state = drm_atomic_helper_duplicate_state(dev, ctx);
3761         if (IS_ERR(state)) {
3762                 ret = PTR_ERR(state);
3763                 DRM_ERROR("Duplicating state failed with %i\n", ret);
3764                 return;
3765         }
3766
3767         ret = drm_atomic_helper_disable_all(dev, ctx);
3768         if (ret) {
3769                 DRM_ERROR("Suspending crtc's failed with %i\n", ret);
3770                 drm_atomic_state_put(state);
3771                 return;
3772         }
3773
3774         dev_priv->modeset_restore_state = state;
3775         state->acquire_ctx = ctx;
3776 }
3777
3778 void intel_finish_reset(struct drm_i915_private *dev_priv)
3779 {
3780         struct drm_device *dev = &dev_priv->drm;
3781         struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
3782         struct drm_atomic_state *state;
3783         int ret;
3784
3785         /* reset doesn't touch the display */
3786         if (!test_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags))
3787                 return;
3788
3789         state = fetch_and_zero(&dev_priv->modeset_restore_state);
3790         if (!state)
3791                 goto unlock;
3792
3793         /* reset doesn't touch the display */
3794         if (!gpu_reset_clobbers_display(dev_priv)) {
3795                 /* for testing only restore the display */
3796                 ret = __intel_display_resume(dev, state, ctx);
3797                 if (ret)
3798                         DRM_ERROR("Restoring old state failed with %i\n", ret);
3799         } else {
3800                 /*
3801                  * The display has been reset as well,
3802                  * so need a full re-initialization.
3803                  */
3804                 intel_runtime_pm_disable_interrupts(dev_priv);
3805                 intel_runtime_pm_enable_interrupts(dev_priv);
3806
3807                 intel_pps_unlock_regs_wa(dev_priv);
3808                 intel_modeset_init_hw(dev);
3809                 intel_init_clock_gating(dev_priv);
3810
3811                 spin_lock_irq(&dev_priv->irq_lock);
3812                 if (dev_priv->display.hpd_irq_setup)
3813                         dev_priv->display.hpd_irq_setup(dev_priv);
3814                 spin_unlock_irq(&dev_priv->irq_lock);
3815
3816                 ret = __intel_display_resume(dev, state, ctx);
3817                 if (ret)
3818                         DRM_ERROR("Restoring old state failed with %i\n", ret);
3819
3820                 intel_hpd_init(dev_priv);
3821         }
3822
3823         drm_atomic_state_put(state);
3824 unlock:
3825         drm_modeset_drop_locks(ctx);
3826         drm_modeset_acquire_fini(ctx);
3827         mutex_unlock(&dev->mode_config.mutex);
3828
3829         clear_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags);
3830 }
3831
3832 static void intel_update_pipe_config(const struct intel_crtc_state *old_crtc_state,
3833                                      const struct intel_crtc_state *new_crtc_state)
3834 {
3835         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
3836         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3837
3838         /* drm_atomic_helper_update_legacy_modeset_state might not be called. */
3839         crtc->base.mode = new_crtc_state->base.mode;
3840
3841         /*
3842          * Update pipe size and adjust fitter if needed: the reason for this is
3843          * that in compute_mode_changes we check the native mode (not the pfit
3844          * mode) to see if we can flip rather than do a full mode set. In the
3845          * fastboot case, we'll flip, but if we don't update the pipesrc and
3846          * pfit state, we'll end up with a big fb scanned out into the wrong
3847          * sized surface.
3848          */
3849
3850         I915_WRITE(PIPESRC(crtc->pipe),
3851                    ((new_crtc_state->pipe_src_w - 1) << 16) |
3852                    (new_crtc_state->pipe_src_h - 1));
3853
3854         /* on skylake this is done by detaching scalers */
3855         if (INTEL_GEN(dev_priv) >= 9) {
3856                 skl_detach_scalers(crtc);
3857
3858                 if (new_crtc_state->pch_pfit.enabled)
3859                         skylake_pfit_enable(crtc);
3860         } else if (HAS_PCH_SPLIT(dev_priv)) {
3861                 if (new_crtc_state->pch_pfit.enabled)
3862                         ironlake_pfit_enable(crtc);
3863                 else if (old_crtc_state->pch_pfit.enabled)
3864                         ironlake_pfit_disable(crtc, true);
3865         }
3866 }
3867
3868 static void intel_fdi_normal_train(struct intel_crtc *crtc)
3869 {
3870         struct drm_device *dev = crtc->base.dev;
3871         struct drm_i915_private *dev_priv = to_i915(dev);
3872         int pipe = crtc->pipe;
3873         i915_reg_t reg;
3874         u32 temp;
3875
3876         /* enable normal train */
3877         reg = FDI_TX_CTL(pipe);
3878         temp = I915_READ(reg);
3879         if (IS_IVYBRIDGE(dev_priv)) {
3880                 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
3881                 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
3882         } else {
3883                 temp &= ~FDI_LINK_TRAIN_NONE;
3884                 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
3885         }
3886         I915_WRITE(reg, temp);
3887
3888         reg = FDI_RX_CTL(pipe);
3889         temp = I915_READ(reg);
3890         if (HAS_PCH_CPT(dev_priv)) {
3891                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3892                 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
3893         } else {
3894                 temp &= ~FDI_LINK_TRAIN_NONE;
3895                 temp |= FDI_LINK_TRAIN_NONE;
3896         }
3897         I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
3898
3899         /* wait one idle pattern time */
3900         POSTING_READ(reg);
3901         udelay(1000);
3902
3903         /* IVB wants error correction enabled */
3904         if (IS_IVYBRIDGE(dev_priv))
3905                 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
3906                            FDI_FE_ERRC_ENABLE);
3907 }
3908
3909 /* The FDI link training functions for ILK/Ibexpeak. */
3910 static void ironlake_fdi_link_train(struct intel_crtc *crtc,
3911                                     const struct intel_crtc_state *crtc_state)
3912 {
3913         struct drm_device *dev = crtc->base.dev;
3914         struct drm_i915_private *dev_priv = to_i915(dev);
3915         int pipe = crtc->pipe;
3916         i915_reg_t reg;
3917         u32 temp, tries;
3918
3919         /* FDI needs bits from pipe first */
3920         assert_pipe_enabled(dev_priv, pipe);
3921
3922         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3923            for train result */
3924         reg = FDI_RX_IMR(pipe);
3925         temp = I915_READ(reg);
3926         temp &= ~FDI_RX_SYMBOL_LOCK;
3927         temp &= ~FDI_RX_BIT_LOCK;
3928         I915_WRITE(reg, temp);
3929         I915_READ(reg);
3930         udelay(150);
3931
3932         /* enable CPU FDI TX and PCH FDI RX */
3933         reg = FDI_TX_CTL(pipe);
3934         temp = I915_READ(reg);
3935         temp &= ~FDI_DP_PORT_WIDTH_MASK;
3936         temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
3937         temp &= ~FDI_LINK_TRAIN_NONE;
3938         temp |= FDI_LINK_TRAIN_PATTERN_1;
3939         I915_WRITE(reg, temp | FDI_TX_ENABLE);
3940
3941         reg = FDI_RX_CTL(pipe);
3942         temp = I915_READ(reg);
3943         temp &= ~FDI_LINK_TRAIN_NONE;
3944         temp |= FDI_LINK_TRAIN_PATTERN_1;
3945         I915_WRITE(reg, temp | FDI_RX_ENABLE);
3946
3947         POSTING_READ(reg);
3948         udelay(150);
3949
3950         /* Ironlake workaround, enable clock pointer after FDI enable*/
3951         I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
3952         I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
3953                    FDI_RX_PHASE_SYNC_POINTER_EN);
3954
3955         reg = FDI_RX_IIR(pipe);
3956         for (tries = 0; tries < 5; tries++) {
3957                 temp = I915_READ(reg);
3958                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3959
3960                 if ((temp & FDI_RX_BIT_LOCK)) {
3961                         DRM_DEBUG_KMS("FDI train 1 done.\n");
3962                         I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3963                         break;
3964                 }
3965         }
3966         if (tries == 5)
3967                 DRM_ERROR("FDI train 1 fail!\n");
3968
3969         /* Train 2 */
3970         reg = FDI_TX_CTL(pipe);
3971         temp = I915_READ(reg);
3972         temp &= ~FDI_LINK_TRAIN_NONE;
3973         temp |= FDI_LINK_TRAIN_PATTERN_2;
3974         I915_WRITE(reg, temp);
3975
3976         reg = FDI_RX_CTL(pipe);
3977         temp = I915_READ(reg);
3978         temp &= ~FDI_LINK_TRAIN_NONE;
3979         temp |= FDI_LINK_TRAIN_PATTERN_2;
3980         I915_WRITE(reg, temp);
3981
3982         POSTING_READ(reg);
3983         udelay(150);
3984
3985         reg = FDI_RX_IIR(pipe);
3986         for (tries = 0; tries < 5; tries++) {
3987                 temp = I915_READ(reg);
3988                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3989
3990                 if (temp & FDI_RX_SYMBOL_LOCK) {
3991                         I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3992                         DRM_DEBUG_KMS("FDI train 2 done.\n");
3993                         break;
3994                 }
3995         }
3996         if (tries == 5)
3997                 DRM_ERROR("FDI train 2 fail!\n");
3998
3999         DRM_DEBUG_KMS("FDI train done\n");
4000
4001 }
4002
4003 static const int snb_b_fdi_train_param[] = {
4004         FDI_LINK_TRAIN_400MV_0DB_SNB_B,
4005         FDI_LINK_TRAIN_400MV_6DB_SNB_B,
4006         FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
4007         FDI_LINK_TRAIN_800MV_0DB_SNB_B,
4008 };
4009
4010 /* The FDI link training functions for SNB/Cougarpoint. */
4011 static void gen6_fdi_link_train(struct intel_crtc *crtc,
4012                                 const struct intel_crtc_state *crtc_state)
4013 {
4014         struct drm_device *dev = crtc->base.dev;
4015         struct drm_i915_private *dev_priv = to_i915(dev);
4016         int pipe = crtc->pipe;
4017         i915_reg_t reg;
4018         u32 temp, i, retry;
4019
4020         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4021            for train result */
4022         reg = FDI_RX_IMR(pipe);
4023         temp = I915_READ(reg);
4024         temp &= ~FDI_RX_SYMBOL_LOCK;
4025         temp &= ~FDI_RX_BIT_LOCK;
4026         I915_WRITE(reg, temp);
4027
4028         POSTING_READ(reg);
4029         udelay(150);
4030
4031         /* enable CPU FDI TX and PCH FDI RX */
4032         reg = FDI_TX_CTL(pipe);
4033         temp = I915_READ(reg);
4034         temp &= ~FDI_DP_PORT_WIDTH_MASK;
4035         temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4036         temp &= ~FDI_LINK_TRAIN_NONE;
4037         temp |= FDI_LINK_TRAIN_PATTERN_1;
4038         temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4039         /* SNB-B */
4040         temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
4041         I915_WRITE(reg, temp | FDI_TX_ENABLE);
4042
4043         I915_WRITE(FDI_RX_MISC(pipe),
4044                    FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
4045
4046         reg = FDI_RX_CTL(pipe);
4047         temp = I915_READ(reg);
4048         if (HAS_PCH_CPT(dev_priv)) {
4049                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4050                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4051         } else {
4052                 temp &= ~FDI_LINK_TRAIN_NONE;
4053                 temp |= FDI_LINK_TRAIN_PATTERN_1;
4054         }
4055         I915_WRITE(reg, temp | FDI_RX_ENABLE);
4056
4057         POSTING_READ(reg);
4058         udelay(150);
4059
4060         for (i = 0; i < 4; i++) {
4061                 reg = FDI_TX_CTL(pipe);
4062                 temp = I915_READ(reg);
4063                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4064                 temp |= snb_b_fdi_train_param[i];
4065                 I915_WRITE(reg, temp);
4066
4067                 POSTING_READ(reg);
4068                 udelay(500);
4069
4070                 for (retry = 0; retry < 5; retry++) {
4071                         reg = FDI_RX_IIR(pipe);
4072                         temp = I915_READ(reg);
4073                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4074                         if (temp & FDI_RX_BIT_LOCK) {
4075                                 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4076                                 DRM_DEBUG_KMS("FDI train 1 done.\n");
4077                                 break;
4078                         }
4079                         udelay(50);
4080                 }
4081                 if (retry < 5)
4082                         break;
4083         }
4084         if (i == 4)
4085                 DRM_ERROR("FDI train 1 fail!\n");
4086
4087         /* Train 2 */
4088         reg = FDI_TX_CTL(pipe);
4089         temp = I915_READ(reg);
4090         temp &= ~FDI_LINK_TRAIN_NONE;
4091         temp |= FDI_LINK_TRAIN_PATTERN_2;
4092         if (IS_GEN6(dev_priv)) {
4093                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4094                 /* SNB-B */
4095                 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
4096         }
4097         I915_WRITE(reg, temp);
4098
4099         reg = FDI_RX_CTL(pipe);
4100         temp = I915_READ(reg);
4101         if (HAS_PCH_CPT(dev_priv)) {
4102                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4103                 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
4104         } else {
4105                 temp &= ~FDI_LINK_TRAIN_NONE;
4106                 temp |= FDI_LINK_TRAIN_PATTERN_2;
4107         }
4108         I915_WRITE(reg, temp);
4109
4110         POSTING_READ(reg);
4111         udelay(150);
4112
4113         for (i = 0; i < 4; i++) {
4114                 reg = FDI_TX_CTL(pipe);
4115                 temp = I915_READ(reg);
4116                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4117                 temp |= snb_b_fdi_train_param[i];
4118                 I915_WRITE(reg, temp);
4119
4120                 POSTING_READ(reg);
4121                 udelay(500);
4122
4123                 for (retry = 0; retry < 5; retry++) {
4124                         reg = FDI_RX_IIR(pipe);
4125                         temp = I915_READ(reg);
4126                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4127                         if (temp & FDI_RX_SYMBOL_LOCK) {
4128                                 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4129                                 DRM_DEBUG_KMS("FDI train 2 done.\n");
4130                                 break;
4131                         }
4132                         udelay(50);
4133                 }
4134                 if (retry < 5)
4135                         break;
4136         }
4137         if (i == 4)
4138                 DRM_ERROR("FDI train 2 fail!\n");
4139
4140         DRM_DEBUG_KMS("FDI train done.\n");
4141 }
4142
4143 /* Manual link training for Ivy Bridge A0 parts */
4144 static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
4145                                       const struct intel_crtc_state *crtc_state)
4146 {
4147         struct drm_device *dev = crtc->base.dev;
4148         struct drm_i915_private *dev_priv = to_i915(dev);
4149         int pipe = crtc->pipe;
4150         i915_reg_t reg;
4151         u32 temp, i, j;
4152
4153         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4154            for train result */
4155         reg = FDI_RX_IMR(pipe);
4156         temp = I915_READ(reg);
4157         temp &= ~FDI_RX_SYMBOL_LOCK;
4158         temp &= ~FDI_RX_BIT_LOCK;
4159         I915_WRITE(reg, temp);
4160
4161         POSTING_READ(reg);
4162         udelay(150);
4163
4164         DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
4165                       I915_READ(FDI_RX_IIR(pipe)));
4166
4167         /* Try each vswing and preemphasis setting twice before moving on */
4168         for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
4169                 /* disable first in case we need to retry */
4170                 reg = FDI_TX_CTL(pipe);
4171                 temp = I915_READ(reg);
4172                 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
4173                 temp &= ~FDI_TX_ENABLE;
4174                 I915_WRITE(reg, temp);
4175
4176                 reg = FDI_RX_CTL(pipe);
4177                 temp = I915_READ(reg);
4178                 temp &= ~FDI_LINK_TRAIN_AUTO;
4179                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4180                 temp &= ~FDI_RX_ENABLE;
4181                 I915_WRITE(reg, temp);
4182
4183                 /* enable CPU FDI TX and PCH FDI RX */
4184                 reg = FDI_TX_CTL(pipe);
4185                 temp = I915_READ(reg);
4186                 temp &= ~FDI_DP_PORT_WIDTH_MASK;
4187                 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4188                 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
4189                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4190                 temp |= snb_b_fdi_train_param[j/2];
4191                 temp |= FDI_COMPOSITE_SYNC;
4192                 I915_WRITE(reg, temp | FDI_TX_ENABLE);
4193
4194                 I915_WRITE(FDI_RX_MISC(pipe),
4195                            FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
4196
4197                 reg = FDI_RX_CTL(pipe);
4198                 temp = I915_READ(reg);
4199                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4200                 temp |= FDI_COMPOSITE_SYNC;
4201                 I915_WRITE(reg, temp | FDI_RX_ENABLE);
4202
4203                 POSTING_READ(reg);
4204                 udelay(1); /* should be 0.5us */
4205
4206                 for (i = 0; i < 4; i++) {
4207                         reg = FDI_RX_IIR(pipe);
4208                         temp = I915_READ(reg);
4209                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4210
4211                         if (temp & FDI_RX_BIT_LOCK ||
4212                             (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
4213                                 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4214                                 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
4215                                               i);
4216                                 break;
4217                         }
4218                         udelay(1); /* should be 0.5us */
4219                 }
4220                 if (i == 4) {
4221                         DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
4222                         continue;
4223                 }
4224
4225                 /* Train 2 */
4226                 reg = FDI_TX_CTL(pipe);
4227                 temp = I915_READ(reg);
4228                 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
4229                 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
4230                 I915_WRITE(reg, temp);
4231
4232                 reg = FDI_RX_CTL(pipe);
4233                 temp = I915_READ(reg);
4234                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4235                 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
4236                 I915_WRITE(reg, temp);
4237
4238                 POSTING_READ(reg);
4239                 udelay(2); /* should be 1.5us */
4240
4241                 for (i = 0; i < 4; i++) {
4242                         reg = FDI_RX_IIR(pipe);
4243                         temp = I915_READ(reg);
4244                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4245
4246                         if (temp & FDI_RX_SYMBOL_LOCK ||
4247                             (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
4248                                 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4249                                 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
4250                                               i);
4251                                 goto train_done;
4252                         }
4253                         udelay(2); /* should be 1.5us */
4254                 }
4255                 if (i == 4)
4256                         DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
4257         }
4258
4259 train_done:
4260         DRM_DEBUG_KMS("FDI train done.\n");
4261 }
4262
4263 static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
4264 {
4265         struct drm_device *dev = intel_crtc->base.dev;
4266         struct drm_i915_private *dev_priv = to_i915(dev);
4267         int pipe = intel_crtc->pipe;
4268         i915_reg_t reg;
4269         u32 temp;
4270
4271         /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
4272         reg = FDI_RX_CTL(pipe);
4273         temp = I915_READ(reg);
4274         temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
4275         temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
4276         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
4277         I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
4278
4279         POSTING_READ(reg);
4280         udelay(200);
4281
4282         /* Switch from Rawclk to PCDclk */
4283         temp = I915_READ(reg);
4284         I915_WRITE(reg, temp | FDI_PCDCLK);
4285
4286         POSTING_READ(reg);
4287         udelay(200);
4288
4289         /* Enable CPU FDI TX PLL, always on for Ironlake */
4290         reg = FDI_TX_CTL(pipe);
4291         temp = I915_READ(reg);
4292         if ((temp & FDI_TX_PLL_ENABLE) == 0) {
4293                 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
4294
4295                 POSTING_READ(reg);
4296                 udelay(100);
4297         }
4298 }
4299
4300 static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
4301 {
4302         struct drm_device *dev = intel_crtc->base.dev;
4303         struct drm_i915_private *dev_priv = to_i915(dev);
4304         int pipe = intel_crtc->pipe;
4305         i915_reg_t reg;
4306         u32 temp;
4307
4308         /* Switch from PCDclk to Rawclk */
4309         reg = FDI_RX_CTL(pipe);
4310         temp = I915_READ(reg);
4311         I915_WRITE(reg, temp & ~FDI_PCDCLK);
4312
4313         /* Disable CPU FDI TX PLL */
4314         reg = FDI_TX_CTL(pipe);
4315         temp = I915_READ(reg);
4316         I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
4317
4318         POSTING_READ(reg);
4319         udelay(100);
4320
4321         reg = FDI_RX_CTL(pipe);
4322         temp = I915_READ(reg);
4323         I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
4324
4325         /* Wait for the clocks to turn off. */
4326         POSTING_READ(reg);
4327         udelay(100);
4328 }
4329
4330 static void ironlake_fdi_disable(struct drm_crtc *crtc)
4331 {
4332         struct drm_device *dev = crtc->dev;
4333         struct drm_i915_private *dev_priv = to_i915(dev);
4334         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4335         int pipe = intel_crtc->pipe;
4336         i915_reg_t reg;
4337         u32 temp;
4338
4339         /* disable CPU FDI tx and PCH FDI rx */
4340         reg = FDI_TX_CTL(pipe);
4341         temp = I915_READ(reg);
4342         I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
4343         POSTING_READ(reg);
4344
4345         reg = FDI_RX_CTL(pipe);
4346         temp = I915_READ(reg);
4347         temp &= ~(0x7 << 16);
4348         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
4349         I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
4350
4351         POSTING_READ(reg);
4352         udelay(100);
4353
4354         /* Ironlake workaround, disable clock pointer after downing FDI */
4355         if (HAS_PCH_IBX(dev_priv))
4356                 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
4357
4358         /* still set train pattern 1 */
4359         reg = FDI_TX_CTL(pipe);
4360         temp = I915_READ(reg);
4361         temp &= ~FDI_LINK_TRAIN_NONE;
4362         temp |= FDI_LINK_TRAIN_PATTERN_1;
4363         I915_WRITE(reg, temp);
4364
4365         reg = FDI_RX_CTL(pipe);
4366         temp = I915_READ(reg);
4367         if (HAS_PCH_CPT(dev_priv)) {
4368                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4369                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4370         } else {
4371                 temp &= ~FDI_LINK_TRAIN_NONE;
4372                 temp |= FDI_LINK_TRAIN_PATTERN_1;
4373         }
4374         /* BPC in FDI rx is consistent with that in PIPECONF */
4375         temp &= ~(0x07 << 16);
4376         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
4377         I915_WRITE(reg, temp);
4378
4379         POSTING_READ(reg);
4380         udelay(100);
4381 }
4382
4383 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
4384 {
4385         struct drm_crtc *crtc;
4386         bool cleanup_done;
4387
4388         drm_for_each_crtc(crtc, &dev_priv->drm) {
4389                 struct drm_crtc_commit *commit;
4390                 spin_lock(&crtc->commit_lock);
4391                 commit = list_first_entry_or_null(&crtc->commit_list,
4392                                                   struct drm_crtc_commit, commit_entry);
4393                 cleanup_done = commit ?
4394                         try_wait_for_completion(&commit->cleanup_done) : true;
4395                 spin_unlock(&crtc->commit_lock);
4396
4397                 if (cleanup_done)
4398                         continue;
4399
4400                 drm_crtc_wait_one_vblank(crtc);
4401
4402                 return true;
4403         }
4404
4405         return false;
4406 }
4407
4408 void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
4409 {
4410         u32 temp;
4411
4412         I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
4413
4414         mutex_lock(&dev_priv->sb_lock);
4415
4416         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
4417         temp |= SBI_SSCCTL_DISABLE;
4418         intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
4419
4420         mutex_unlock(&dev_priv->sb_lock);
4421 }
4422
4423 /* Program iCLKIP clock to the desired frequency */
4424 static void lpt_program_iclkip(struct intel_crtc *crtc)
4425 {
4426         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4427         int clock = crtc->config->base.adjusted_mode.crtc_clock;
4428         u32 divsel, phaseinc, auxdiv, phasedir = 0;
4429         u32 temp;
4430
4431         lpt_disable_iclkip(dev_priv);
4432
4433         /* The iCLK virtual clock root frequency is in MHz,
4434          * but the adjusted_mode->crtc_clock in in KHz. To get the
4435          * divisors, it is necessary to divide one by another, so we
4436          * convert the virtual clock precision to KHz here for higher
4437          * precision.
4438          */
4439         for (auxdiv = 0; auxdiv < 2; auxdiv++) {
4440                 u32 iclk_virtual_root_freq = 172800 * 1000;
4441                 u32 iclk_pi_range = 64;
4442                 u32 desired_divisor;
4443
4444                 desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
4445                                                     clock << auxdiv);
4446                 divsel = (desired_divisor / iclk_pi_range) - 2;
4447                 phaseinc = desired_divisor % iclk_pi_range;
4448
4449                 /*
4450                  * Near 20MHz is a corner case which is
4451                  * out of range for the 7-bit divisor
4452                  */
4453                 if (divsel <= 0x7f)
4454                         break;
4455         }
4456
4457         /* This should not happen with any sane values */
4458         WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
4459                 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
4460         WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
4461                 ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
4462
4463         DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
4464                         clock,
4465                         auxdiv,
4466                         divsel,
4467                         phasedir,
4468                         phaseinc);
4469
4470         mutex_lock(&dev_priv->sb_lock);
4471
4472         /* Program SSCDIVINTPHASE6 */
4473         temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
4474         temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
4475         temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
4476         temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
4477         temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
4478         temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
4479         temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
4480         intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
4481
4482         /* Program SSCAUXDIV */
4483         temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
4484         temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
4485         temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
4486         intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
4487
4488         /* Enable modulator and associated divider */
4489         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
4490         temp &= ~SBI_SSCCTL_DISABLE;
4491         intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
4492
4493         mutex_unlock(&dev_priv->sb_lock);
4494
4495         /* Wait for initialization time */
4496         udelay(24);
4497
4498         I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
4499 }
4500
4501 int lpt_get_iclkip(struct drm_i915_private *dev_priv)
4502 {
4503         u32 divsel, phaseinc, auxdiv;
4504         u32 iclk_virtual_root_freq = 172800 * 1000;
4505         u32 iclk_pi_range = 64;
4506         u32 desired_divisor;
4507         u32 temp;
4508
4509         if ((I915_READ(PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
4510                 return 0;
4511
4512         mutex_lock(&dev_priv->sb_lock);
4513
4514         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
4515         if (temp & SBI_SSCCTL_DISABLE) {
4516                 mutex_unlock(&dev_priv->sb_lock);
4517                 return 0;
4518         }
4519
4520         temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
4521         divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
4522                 SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
4523         phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
4524                 SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
4525
4526         temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
4527         auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
4528                 SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
4529
4530         mutex_unlock(&dev_priv->sb_lock);
4531
4532         desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
4533
4534         return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
4535                                  desired_divisor << auxdiv);
4536 }
4537
4538 static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
4539                                                 enum pipe pch_transcoder)
4540 {
4541         struct drm_device *dev = crtc->base.dev;
4542         struct drm_i915_private *dev_priv = to_i915(dev);
4543         enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
4544
4545         I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
4546                    I915_READ(HTOTAL(cpu_transcoder)));
4547         I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
4548                    I915_READ(HBLANK(cpu_transcoder)));
4549         I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
4550                    I915_READ(HSYNC(cpu_transcoder)));
4551
4552         I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
4553                    I915_READ(VTOTAL(cpu_transcoder)));
4554         I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
4555                    I915_READ(VBLANK(cpu_transcoder)));
4556         I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
4557                    I915_READ(VSYNC(cpu_transcoder)));
4558         I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
4559                    I915_READ(VSYNCSHIFT(cpu_transcoder)));
4560 }
4561
4562 static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable)
4563 {
4564         struct drm_i915_private *dev_priv = to_i915(dev);
4565         uint32_t temp;
4566
4567         temp = I915_READ(SOUTH_CHICKEN1);
4568         if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
4569                 return;
4570
4571         WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
4572         WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
4573
4574         temp &= ~FDI_BC_BIFURCATION_SELECT;
4575         if (enable)
4576                 temp |= FDI_BC_BIFURCATION_SELECT;
4577
4578         DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis");
4579         I915_WRITE(SOUTH_CHICKEN1, temp);
4580         POSTING_READ(SOUTH_CHICKEN1);
4581 }
4582
4583 static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
4584 {
4585         struct drm_device *dev = intel_crtc->base.dev;
4586
4587         switch (intel_crtc->pipe) {
4588         case PIPE_A:
4589                 break;
4590         case PIPE_B:
4591                 if (intel_crtc->config->fdi_lanes > 2)
4592                         cpt_set_fdi_bc_bifurcation(dev, false);
4593                 else
4594                         cpt_set_fdi_bc_bifurcation(dev, true);
4595
4596                 break;
4597         case PIPE_C:
4598                 cpt_set_fdi_bc_bifurcation(dev, true);
4599
4600                 break;
4601         default:
4602                 BUG();
4603         }
4604 }
4605
4606 /*
4607  * Finds the encoder associated with the given CRTC. This can only be
4608  * used when we know that the CRTC isn't feeding multiple encoders!
4609  */
4610 static struct intel_encoder *
4611 intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
4612                            const struct intel_crtc_state *crtc_state)
4613 {
4614         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
4615         const struct drm_connector_state *connector_state;
4616         const struct drm_connector *connector;
4617         struct intel_encoder *encoder = NULL;
4618         int num_encoders = 0;
4619         int i;
4620
4621         for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
4622                 if (connector_state->crtc != &crtc->base)
4623                         continue;
4624
4625                 encoder = to_intel_encoder(connector_state->best_encoder);
4626                 num_encoders++;
4627         }
4628
4629         WARN(num_encoders != 1, "%d encoders for pipe %c\n",
4630              num_encoders, pipe_name(crtc->pipe));
4631
4632         return encoder;
4633 }
4634
4635 /*
4636  * Enable PCH resources required for PCH ports:
4637  *   - PCH PLLs
4638  *   - FDI training & RX/TX
4639  *   - update transcoder timings
4640  *   - DP transcoding bits
4641  *   - transcoder
4642  */
4643 static void ironlake_pch_enable(const struct intel_atomic_state *state,
4644                                 const struct intel_crtc_state *crtc_state)
4645 {
4646         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
4647         struct drm_device *dev = crtc->base.dev;
4648         struct drm_i915_private *dev_priv = to_i915(dev);
4649         int pipe = crtc->pipe;
4650         u32 temp;
4651
4652         assert_pch_transcoder_disabled(dev_priv, pipe);
4653
4654         if (IS_IVYBRIDGE(dev_priv))
4655                 ivybridge_update_fdi_bc_bifurcation(crtc);
4656
4657         /* Write the TU size bits before fdi link training, so that error
4658          * detection works. */
4659         I915_WRITE(FDI_RX_TUSIZE1(pipe),
4660                    I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
4661
4662         /* For PCH output, training FDI link */
4663         dev_priv->display.fdi_link_train(crtc, crtc_state);
4664
4665         /* We need to program the right clock selection before writing the pixel
4666          * mutliplier into the DPLL. */
4667         if (HAS_PCH_CPT(dev_priv)) {
4668                 u32 sel;
4669
4670                 temp = I915_READ(PCH_DPLL_SEL);
4671                 temp |= TRANS_DPLL_ENABLE(pipe);
4672                 sel = TRANS_DPLLB_SEL(pipe);
4673                 if (crtc_state->shared_dpll ==
4674                     intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
4675                         temp |= sel;
4676                 else
4677                         temp &= ~sel;
4678                 I915_WRITE(PCH_DPLL_SEL, temp);
4679         }
4680
4681         /* XXX: pch pll's can be enabled any time before we enable the PCH
4682          * transcoder, and we actually should do this to not upset any PCH
4683          * transcoder that already use the clock when we share it.
4684          *
4685          * Note that enable_shared_dpll tries to do the right thing, but
4686          * get_shared_dpll unconditionally resets the pll - we need that to have
4687          * the right LVDS enable sequence. */
4688         intel_enable_shared_dpll(crtc);
4689
4690         /* set transcoder timing, panel must allow it */
4691         assert_panel_unlocked(dev_priv, pipe);
4692         ironlake_pch_transcoder_set_timings(crtc, pipe);
4693
4694         intel_fdi_normal_train(crtc);
4695
4696         /* For PCH DP, enable TRANS_DP_CTL */
4697         if (HAS_PCH_CPT(dev_priv) &&
4698             intel_crtc_has_dp_encoder(crtc_state)) {
4699                 const struct drm_display_mode *adjusted_mode =
4700                         &crtc_state->base.adjusted_mode;
4701                 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
4702                 i915_reg_t reg = TRANS_DP_CTL(pipe);
4703                 enum port port;
4704
4705                 temp = I915_READ(reg);
4706                 temp &= ~(TRANS_DP_PORT_SEL_MASK |
4707                           TRANS_DP_SYNC_MASK |
4708                           TRANS_DP_BPC_MASK);
4709                 temp |= TRANS_DP_OUTPUT_ENABLE;
4710                 temp |= bpc << 9; /* same format but at 11:9 */
4711
4712                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
4713                         temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
4714                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
4715                         temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
4716
4717                 port = intel_get_crtc_new_encoder(state, crtc_state)->port;
4718                 WARN_ON(port < PORT_B || port > PORT_D);
4719                 temp |= TRANS_DP_PORT_SEL(port);
4720
4721                 I915_WRITE(reg, temp);
4722         }
4723
4724         ironlake_enable_pch_transcoder(dev_priv, pipe);
4725 }
4726
4727 static void lpt_pch_enable(const struct intel_atomic_state *state,
4728                            const struct intel_crtc_state *crtc_state)
4729 {
4730         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
4731         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4732         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
4733
4734         assert_pch_transcoder_disabled(dev_priv, PIPE_A);
4735
4736         lpt_program_iclkip(crtc);
4737
4738         /* Set transcoder timing. */
4739         ironlake_pch_transcoder_set_timings(crtc, PIPE_A);
4740
4741         lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
4742 }
4743
4744 static void cpt_verify_modeset(struct drm_device *dev, int pipe)
4745 {
4746         struct drm_i915_private *dev_priv = to_i915(dev);
4747         i915_reg_t dslreg = PIPEDSL(pipe);
4748         u32 temp;
4749
4750         temp = I915_READ(dslreg);
4751         udelay(500);
4752         if (wait_for(I915_READ(dslreg) != temp, 5)) {
4753                 if (wait_for(I915_READ(dslreg) != temp, 5))
4754                         DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
4755         }
4756 }
4757
4758 /*
4759  * The hardware phase 0.0 refers to the center of the pixel.
4760  * We want to start from the top/left edge which is phase
4761  * -0.5. That matches how the hardware calculates the scaling
4762  * factors (from top-left of the first pixel to bottom-right
4763  * of the last pixel, as opposed to the pixel centers).
4764  *
4765  * For 4:2:0 subsampled chroma planes we obviously have to
4766  * adjust that so that the chroma sample position lands in
4767  * the right spot.
4768  *
4769  * Note that for packed YCbCr 4:2:2 formats there is no way to
4770  * control chroma siting. The hardware simply replicates the
4771  * chroma samples for both of the luma samples, and thus we don't
4772  * actually get the expected MPEG2 chroma siting convention :(
4773  * The same behaviour is observed on pre-SKL platforms as well.
4774  */
4775 u16 skl_scaler_calc_phase(int sub, bool chroma_cosited)
4776 {
4777         int phase = -0x8000;
4778         u16 trip = 0;
4779
4780         if (chroma_cosited)
4781                 phase += (sub - 1) * 0x8000 / sub;
4782
4783         if (phase < 0)
4784                 phase = 0x10000 + phase;
4785         else
4786                 trip = PS_PHASE_TRIP;
4787
4788         return ((phase >> 2) & PS_PHASE_MASK) | trip;
4789 }
4790
4791 static int
4792 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
4793                   unsigned int scaler_user, int *scaler_id,
4794                   int src_w, int src_h, int dst_w, int dst_h,
4795                   bool plane_scaler_check,
4796                   uint32_t pixel_format)
4797 {
4798         struct intel_crtc_scaler_state *scaler_state =
4799                 &crtc_state->scaler_state;
4800         struct intel_crtc *intel_crtc =
4801                 to_intel_crtc(crtc_state->base.crtc);
4802         struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
4803         const struct drm_display_mode *adjusted_mode =
4804                 &crtc_state->base.adjusted_mode;
4805         int need_scaling;
4806
4807         /*
4808          * Src coordinates are already rotated by 270 degrees for
4809          * the 90/270 degree plane rotation cases (to match the
4810          * GTT mapping), hence no need to account for rotation here.
4811          */
4812         need_scaling = src_w != dst_w || src_h != dst_h;
4813
4814         if (plane_scaler_check)
4815                 if (pixel_format == DRM_FORMAT_NV12)
4816                         need_scaling = true;
4817
4818         if (crtc_state->ycbcr420 && scaler_user == SKL_CRTC_INDEX)
4819                 need_scaling = true;
4820
4821         /*
4822          * Scaling/fitting not supported in IF-ID mode in GEN9+
4823          * TODO: Interlace fetch mode doesn't support YUV420 planar formats.
4824          * Once NV12 is enabled, handle it here while allocating scaler
4825          * for NV12.
4826          */
4827         if (INTEL_GEN(dev_priv) >= 9 && crtc_state->base.enable &&
4828             need_scaling && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
4829                 DRM_DEBUG_KMS("Pipe/Plane scaling not supported with IF-ID mode\n");
4830                 return -EINVAL;
4831         }
4832
4833         /*
4834          * if plane is being disabled or scaler is no more required or force detach
4835          *  - free scaler binded to this plane/crtc
4836          *  - in order to do this, update crtc->scaler_usage
4837          *
4838          * Here scaler state in crtc_state is set free so that
4839          * scaler can be assigned to other user. Actual register
4840          * update to free the scaler is done in plane/panel-fit programming.
4841          * For this purpose crtc/plane_state->scaler_id isn't reset here.
4842          */
4843         if (force_detach || !need_scaling) {
4844                 if (*scaler_id >= 0) {
4845                         scaler_state->scaler_users &= ~(1 << scaler_user);
4846                         scaler_state->scalers[*scaler_id].in_use = 0;
4847
4848                         DRM_DEBUG_KMS("scaler_user index %u.%u: "
4849                                 "Staged freeing scaler id %d scaler_users = 0x%x\n",
4850                                 intel_crtc->pipe, scaler_user, *scaler_id,
4851                                 scaler_state->scaler_users);
4852                         *scaler_id = -1;
4853                 }
4854                 return 0;
4855         }
4856
4857         if (plane_scaler_check && pixel_format == DRM_FORMAT_NV12 &&
4858             (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
4859                 DRM_DEBUG_KMS("NV12: src dimensions not met\n");
4860                 return -EINVAL;
4861         }
4862
4863         /* range checks */
4864         if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
4865             dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
4866             (IS_GEN11(dev_priv) &&
4867              (src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H ||
4868               dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) ||
4869             (!IS_GEN11(dev_priv) &&
4870              (src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
4871               dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H))) {
4872                 DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
4873                         "size is out of scaler range\n",
4874                         intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h);
4875                 return -EINVAL;
4876         }
4877
4878         /* mark this plane as a scaler user in crtc_state */
4879         scaler_state->scaler_users |= (1 << scaler_user);
4880         DRM_DEBUG_KMS("scaler_user index %u.%u: "
4881                 "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
4882                 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
4883                 scaler_state->scaler_users);
4884
4885         return 0;
4886 }
4887
4888 /**
4889  * skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
4890  *
4891  * @state: crtc's scaler state
4892  *
4893  * Return
4894  *     0 - scaler_usage updated successfully
4895  *    error - requested scaling cannot be supported or other error condition
4896  */
4897 int skl_update_scaler_crtc(struct intel_crtc_state *state)
4898 {
4899         const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
4900
4901         return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
4902                                  &state->scaler_state.scaler_id,
4903                                  state->pipe_src_w, state->pipe_src_h,
4904                                  adjusted_mode->crtc_hdisplay,
4905                                  adjusted_mode->crtc_vdisplay, false, 0);
4906 }
4907
4908 /**
4909  * skl_update_scaler_plane - Stages update to scaler state for a given plane.
4910  * @crtc_state: crtc's scaler state
4911  * @plane_state: atomic plane state to update
4912  *
4913  * Return
4914  *     0 - scaler_usage updated successfully
4915  *    error - requested scaling cannot be supported or other error condition
4916  */
4917 static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
4918                                    struct intel_plane_state *plane_state)
4919 {
4920
4921         struct intel_plane *intel_plane =
4922                 to_intel_plane(plane_state->base.plane);
4923         struct drm_framebuffer *fb = plane_state->base.fb;
4924         int ret;
4925
4926         bool force_detach = !fb || !plane_state->base.visible;
4927
4928         ret = skl_update_scaler(crtc_state, force_detach,
4929                                 drm_plane_index(&intel_plane->base),
4930                                 &plane_state->scaler_id,
4931                                 drm_rect_width(&plane_state->base.src) >> 16,
4932                                 drm_rect_height(&plane_state->base.src) >> 16,
4933                                 drm_rect_width(&plane_state->base.dst),
4934                                 drm_rect_height(&plane_state->base.dst),
4935                                 fb ? true : false, fb ? fb->format->format : 0);
4936
4937         if (ret || plane_state->scaler_id < 0)
4938                 return ret;
4939
4940         /* check colorkey */
4941         if (plane_state->ckey.flags) {
4942                 DRM_DEBUG_KMS("[PLANE:%d:%s] scaling with color key not allowed",
4943                               intel_plane->base.base.id,
4944                               intel_plane->base.name);
4945                 return -EINVAL;
4946         }
4947
4948         /* Check src format */
4949         switch (fb->format->format) {
4950         case DRM_FORMAT_RGB565:
4951         case DRM_FORMAT_XBGR8888:
4952         case DRM_FORMAT_XRGB8888:
4953         case DRM_FORMAT_ABGR8888:
4954         case DRM_FORMAT_ARGB8888:
4955         case DRM_FORMAT_XRGB2101010:
4956         case DRM_FORMAT_XBGR2101010:
4957         case DRM_FORMAT_YUYV:
4958         case DRM_FORMAT_YVYU:
4959         case DRM_FORMAT_UYVY:
4960         case DRM_FORMAT_VYUY:
4961         case DRM_FORMAT_NV12:
4962                 break;
4963         default:
4964                 DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
4965                               intel_plane->base.base.id, intel_plane->base.name,
4966                               fb->base.id, fb->format->format);
4967                 return -EINVAL;
4968         }
4969
4970         return 0;
4971 }
4972
4973 static void skylake_scaler_disable(struct intel_crtc *crtc)
4974 {
4975         int i;
4976
4977         for (i = 0; i < crtc->num_scalers; i++)
4978                 skl_detach_scaler(crtc, i);
4979 }
4980
4981 static void skylake_pfit_enable(struct intel_crtc *crtc)
4982 {
4983         struct drm_device *dev = crtc->base.dev;
4984         struct drm_i915_private *dev_priv = to_i915(dev);
4985         int pipe = crtc->pipe;
4986         struct intel_crtc_scaler_state *scaler_state =
4987                 &crtc->config->scaler_state;
4988
4989         if (crtc->config->pch_pfit.enabled) {
4990                 u16 uv_rgb_hphase, uv_rgb_vphase;
4991                 int id;
4992
4993                 if (WARN_ON(crtc->config->scaler_state.scaler_id < 0))
4994                         return;
4995
4996                 uv_rgb_hphase = skl_scaler_calc_phase(1, false);
4997                 uv_rgb_vphase = skl_scaler_calc_phase(1, false);
4998
4999                 id = scaler_state->scaler_id;
5000                 I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
5001                         PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
5002                 I915_WRITE_FW(SKL_PS_VPHASE(pipe, id),
5003                               PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
5004                 I915_WRITE_FW(SKL_PS_HPHASE(pipe, id),
5005                               PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
5006                 I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos);
5007                 I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size);
5008         }
5009 }
5010
5011 static void ironlake_pfit_enable(struct intel_crtc *crtc)
5012 {
5013         struct drm_device *dev = crtc->base.dev;
5014         struct drm_i915_private *dev_priv = to_i915(dev);
5015         int pipe = crtc->pipe;
5016
5017         if (crtc->config->pch_pfit.enabled) {
5018                 /* Force use of hard-coded filter coefficients
5019                  * as some pre-programmed values are broken,
5020                  * e.g. x201.
5021                  */
5022                 if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
5023                         I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
5024                                                  PF_PIPE_SEL_IVB(pipe));
5025                 else
5026                         I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
5027                 I915_WRITE(PF_WIN_POS(pipe), crtc->config->pch_pfit.pos);
5028                 I915_WRITE(PF_WIN_SZ(pipe), crtc->config->pch_pfit.size);
5029         }
5030 }
5031
5032 void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
5033 {
5034         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5035         struct drm_device *dev = crtc->base.dev;
5036         struct drm_i915_private *dev_priv = to_i915(dev);
5037
5038         if (!crtc_state->ips_enabled)
5039                 return;
5040
5041         /*
5042          * We can only enable IPS after we enable a plane and wait for a vblank
5043          * This function is called from post_plane_update, which is run after
5044          * a vblank wait.
5045          */
5046         WARN_ON(!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
5047
5048         if (IS_BROADWELL(dev_priv)) {
5049                 mutex_lock(&dev_priv->pcu_lock);
5050                 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
5051                                                 IPS_ENABLE | IPS_PCODE_CONTROL));
5052                 mutex_unlock(&dev_priv->pcu_lock);
5053                 /* Quoting Art Runyan: "its not safe to expect any particular
5054                  * value in IPS_CTL bit 31 after enabling IPS through the
5055                  * mailbox." Moreover, the mailbox may return a bogus state,
5056                  * so we need to just enable it and continue on.
5057                  */
5058         } else {
5059                 I915_WRITE(IPS_CTL, IPS_ENABLE);
5060                 /* The bit only becomes 1 in the next vblank, so this wait here
5061                  * is essentially intel_wait_for_vblank. If we don't have this
5062                  * and don't wait for vblanks until the end of crtc_enable, then
5063                  * the HW state readout code will complain that the expected
5064                  * IPS_CTL value is not the one we read. */
5065                 if (intel_wait_for_register(dev_priv,
5066                                             IPS_CTL, IPS_ENABLE, IPS_ENABLE,
5067                                             50))
5068                         DRM_ERROR("Timed out waiting for IPS enable\n");
5069         }
5070 }
5071
5072 void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
5073 {
5074         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5075         struct drm_device *dev = crtc->base.dev;
5076         struct drm_i915_private *dev_priv = to_i915(dev);
5077
5078         if (!crtc_state->ips_enabled)
5079                 return;
5080
5081         if (IS_BROADWELL(dev_priv)) {
5082                 mutex_lock(&dev_priv->pcu_lock);
5083                 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
5084                 mutex_unlock(&dev_priv->pcu_lock);
5085                 /*
5086                  * Wait for PCODE to finish disabling IPS. The BSpec specified
5087                  * 42ms timeout value leads to occasional timeouts so use 100ms
5088                  * instead.
5089                  */
5090                 if (intel_wait_for_register(dev_priv,
5091                                             IPS_CTL, IPS_ENABLE, 0,
5092                                             100))
5093                         DRM_ERROR("Timed out waiting for IPS disable\n");
5094         } else {
5095                 I915_WRITE(IPS_CTL, 0);
5096                 POSTING_READ(IPS_CTL);
5097         }
5098
5099         /* We need to wait for a vblank before we can disable the plane. */
5100         intel_wait_for_vblank(dev_priv, crtc->pipe);
5101 }
5102
5103 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
5104 {
5105         if (intel_crtc->overlay) {
5106                 struct drm_device *dev = intel_crtc->base.dev;
5107
5108                 mutex_lock(&dev->struct_mutex);
5109                 (void) intel_overlay_switch_off(intel_crtc->overlay);
5110                 mutex_unlock(&dev->struct_mutex);
5111         }
5112
5113         /* Let userspace switch the overlay on again. In most cases userspace
5114          * has to recompute where to put it anyway.
5115          */
5116 }
5117
5118 /**
5119  * intel_post_enable_primary - Perform operations after enabling primary plane
5120  * @crtc: the CRTC whose primary plane was just enabled
5121  * @new_crtc_state: the enabling state
5122  *
5123  * Performs potentially sleeping operations that must be done after the primary
5124  * plane is enabled, such as updating FBC and IPS.  Note that this may be
5125  * called due to an explicit primary plane update, or due to an implicit
5126  * re-enable that is caused when a sprite plane is updated to no longer
5127  * completely hide the primary plane.
5128  */
5129 static void
5130 intel_post_enable_primary(struct drm_crtc *crtc,
5131                           const struct intel_crtc_state *new_crtc_state)
5132 {
5133         struct drm_device *dev = crtc->dev;
5134         struct drm_i915_private *dev_priv = to_i915(dev);
5135         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5136         int pipe = intel_crtc->pipe;
5137
5138         /*
5139          * Gen2 reports pipe underruns whenever all planes are disabled.
5140          * So don't enable underrun reporting before at least some planes
5141          * are enabled.
5142          * FIXME: Need to fix the logic to work when we turn off all planes
5143          * but leave the pipe running.
5144          */
5145         if (IS_GEN2(dev_priv))
5146                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5147
5148         /* Underruns don't always raise interrupts, so check manually. */
5149         intel_check_cpu_fifo_underruns(dev_priv);
5150         intel_check_pch_fifo_underruns(dev_priv);
5151 }
5152
5153 /* FIXME get rid of this and use pre_plane_update */
5154 static void
5155 intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
5156 {
5157         struct drm_device *dev = crtc->dev;
5158         struct drm_i915_private *dev_priv = to_i915(dev);
5159         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5160         int pipe = intel_crtc->pipe;
5161
5162         /*
5163          * Gen2 reports pipe underruns whenever all planes are disabled.
5164          * So disable underrun reporting before all the planes get disabled.
5165          */
5166         if (IS_GEN2(dev_priv))
5167                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5168
5169         hsw_disable_ips(to_intel_crtc_state(crtc->state));
5170
5171         /*
5172          * Vblank time updates from the shadow to live plane control register
5173          * are blocked if the memory self-refresh mode is active at that
5174          * moment. So to make sure the plane gets truly disabled, disable
5175          * first the self-refresh mode. The self-refresh enable bit in turn
5176          * will be checked/applied by the HW only at the next frame start
5177          * event which is after the vblank start event, so we need to have a
5178          * wait-for-vblank between disabling the plane and the pipe.
5179          */
5180         if (HAS_GMCH_DISPLAY(dev_priv) &&
5181             intel_set_memory_cxsr(dev_priv, false))
5182                 intel_wait_for_vblank(dev_priv, pipe);
5183 }
5184
5185 static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
5186                                        const struct intel_crtc_state *new_crtc_state)
5187 {
5188         if (!old_crtc_state->ips_enabled)
5189                 return false;
5190
5191         if (needs_modeset(&new_crtc_state->base))
5192                 return true;
5193
5194         return !new_crtc_state->ips_enabled;
5195 }
5196
5197 static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
5198                                        const struct intel_crtc_state *new_crtc_state)
5199 {
5200         if (!new_crtc_state->ips_enabled)
5201                 return false;
5202
5203         if (needs_modeset(&new_crtc_state->base))
5204                 return true;
5205
5206         /*
5207          * We can't read out IPS on broadwell, assume the worst and
5208          * forcibly enable IPS on the first fastset.
5209          */
5210         if (new_crtc_state->update_pipe &&
5211             old_crtc_state->base.adjusted_mode.private_flags & I915_MODE_FLAG_INHERITED)
5212                 return true;
5213
5214         return !old_crtc_state->ips_enabled;
5215 }
5216
5217 static bool needs_nv12_wa(struct drm_i915_private *dev_priv,
5218                           const struct intel_crtc_state *crtc_state)
5219 {
5220         if (!crtc_state->nv12_planes)
5221                 return false;
5222
5223         if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
5224                 return false;
5225
5226         if ((INTEL_GEN(dev_priv) == 9 && !IS_GEMINILAKE(dev_priv)) ||
5227             IS_CANNONLAKE(dev_priv))
5228                 return true;
5229
5230         return false;
5231 }
5232
5233 static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
5234 {
5235         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
5236         struct drm_device *dev = crtc->base.dev;
5237         struct drm_i915_private *dev_priv = to_i915(dev);
5238         struct drm_atomic_state *old_state = old_crtc_state->base.state;
5239         struct intel_crtc_state *pipe_config =
5240                 intel_atomic_get_new_crtc_state(to_intel_atomic_state(old_state),
5241                                                 crtc);
5242         struct drm_plane *primary = crtc->base.primary;
5243         struct drm_plane_state *old_primary_state =
5244                 drm_atomic_get_old_plane_state(old_state, primary);
5245
5246         intel_frontbuffer_flip(to_i915(crtc->base.dev), pipe_config->fb_bits);
5247
5248         if (pipe_config->update_wm_post && pipe_config->base.active)
5249                 intel_update_watermarks(crtc);
5250
5251         if (hsw_post_update_enable_ips(old_crtc_state, pipe_config))
5252                 hsw_enable_ips(pipe_config);
5253
5254         if (old_primary_state) {
5255                 struct drm_plane_state *new_primary_state =
5256                         drm_atomic_get_new_plane_state(old_state, primary);
5257
5258                 intel_fbc_post_update(crtc);
5259
5260                 if (new_primary_state->visible &&
5261                     (needs_modeset(&pipe_config->base) ||
5262                      !old_primary_state->visible))
5263                         intel_post_enable_primary(&crtc->base, pipe_config);
5264         }
5265
5266         /* Display WA 827 */
5267         if (needs_nv12_wa(dev_priv, old_crtc_state) &&
5268             !needs_nv12_wa(dev_priv, pipe_config)) {
5269                 skl_wa_clkgate(dev_priv, crtc->pipe, false);
5270                 skl_wa_528(dev_priv, crtc->pipe, false);
5271         }
5272 }
5273
5274 static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
5275                                    struct intel_crtc_state *pipe_config)
5276 {
5277         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
5278         struct drm_device *dev = crtc->base.dev;
5279         struct drm_i915_private *dev_priv = to_i915(dev);
5280         struct drm_atomic_state *old_state = old_crtc_state->base.state;
5281         struct drm_plane *primary = crtc->base.primary;
5282         struct drm_plane_state *old_primary_state =
5283                 drm_atomic_get_old_plane_state(old_state, primary);
5284         bool modeset = needs_modeset(&pipe_config->base);
5285         struct intel_atomic_state *old_intel_state =
5286                 to_intel_atomic_state(old_state);
5287
5288         if (hsw_pre_update_disable_ips(old_crtc_state, pipe_config))
5289                 hsw_disable_ips(old_crtc_state);
5290
5291         if (old_primary_state) {
5292                 struct intel_plane_state *new_primary_state =
5293                         intel_atomic_get_new_plane_state(old_intel_state,
5294                                                          to_intel_plane(primary));
5295
5296                 intel_fbc_pre_update(crtc, pipe_config, new_primary_state);
5297                 /*
5298                  * Gen2 reports pipe underruns whenever all planes are disabled.
5299                  * So disable underrun reporting before all the planes get disabled.
5300                  */
5301                 if (IS_GEN2(dev_priv) && old_primary_state->visible &&
5302                     (modeset || !new_primary_state->base.visible))
5303                         intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
5304         }
5305
5306         /* Display WA 827 */
5307         if (!needs_nv12_wa(dev_priv, old_crtc_state) &&
5308             needs_nv12_wa(dev_priv, pipe_config)) {
5309                 skl_wa_clkgate(dev_priv, crtc->pipe, true);
5310                 skl_wa_528(dev_priv, crtc->pipe, true);
5311         }
5312
5313         /*
5314          * Vblank time updates from the shadow to live plane control register
5315          * are blocked if the memory self-refresh mode is active at that
5316          * moment. So to make sure the plane gets truly disabled, disable
5317          * first the self-refresh mode. The self-refresh enable bit in turn
5318          * will be checked/applied by the HW only at the next frame start
5319          * event which is after the vblank start event, so we need to have a
5320          * wait-for-vblank between disabling the plane and the pipe.
5321          */
5322         if (HAS_GMCH_DISPLAY(dev_priv) && old_crtc_state->base.active &&
5323             pipe_config->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
5324                 intel_wait_for_vblank(dev_priv, crtc->pipe);
5325
5326         /*
5327          * IVB workaround: must disable low power watermarks for at least
5328          * one frame before enabling scaling.  LP watermarks can be re-enabled
5329          * when scaling is disabled.
5330          *
5331          * WaCxSRDisabledForSpriteScaling:ivb
5332          */
5333         if (pipe_config->disable_lp_wm && ilk_disable_lp_wm(dev))
5334                 intel_wait_for_vblank(dev_priv, crtc->pipe);
5335
5336         /*
5337          * If we're doing a modeset, we're done.  No need to do any pre-vblank
5338          * watermark programming here.
5339          */
5340         if (needs_modeset(&pipe_config->base))
5341                 return;
5342
5343         /*
5344          * For platforms that support atomic watermarks, program the
5345          * 'intermediate' watermarks immediately.  On pre-gen9 platforms, these
5346          * will be the intermediate values that are safe for both pre- and
5347          * post- vblank; when vblank happens, the 'active' values will be set
5348          * to the final 'target' values and we'll do this again to get the
5349          * optimal watermarks.  For gen9+ platforms, the values we program here
5350          * will be the final target values which will get automatically latched
5351          * at vblank time; no further programming will be necessary.
5352          *
5353          * If a platform hasn't been transitioned to atomic watermarks yet,
5354          * we'll continue to update watermarks the old way, if flags tell
5355          * us to.
5356          */
5357         if (dev_priv->display.initial_watermarks != NULL)
5358                 dev_priv->display.initial_watermarks(old_intel_state,
5359                                                      pipe_config);
5360         else if (pipe_config->update_wm_pre)
5361                 intel_update_watermarks(crtc);
5362 }
5363
5364 static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask)
5365 {
5366         struct drm_device *dev = crtc->dev;
5367         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5368         struct drm_plane *p;
5369         int pipe = intel_crtc->pipe;
5370
5371         intel_crtc_dpms_overlay_disable(intel_crtc);
5372
5373         drm_for_each_plane_mask(p, dev, plane_mask)
5374                 to_intel_plane(p)->disable_plane(to_intel_plane(p), intel_crtc);
5375
5376         /*
5377          * FIXME: Once we grow proper nuclear flip support out of this we need
5378          * to compute the mask of flip planes precisely. For the time being
5379          * consider this a flip to a NULL plane.
5380          */
5381         intel_frontbuffer_flip(to_i915(dev), INTEL_FRONTBUFFER_ALL_MASK(pipe));
5382 }
5383
5384 static void intel_encoders_pre_pll_enable(struct drm_crtc *crtc,
5385                                           struct intel_crtc_state *crtc_state,
5386                                           struct drm_atomic_state *old_state)
5387 {
5388         struct drm_connector_state *conn_state;
5389         struct drm_connector *conn;
5390         int i;
5391
5392         for_each_new_connector_in_state(old_state, conn, conn_state, i) {
5393                 struct intel_encoder *encoder =
5394                         to_intel_encoder(conn_state->best_encoder);
5395
5396                 if (conn_state->crtc != crtc)
5397                         continue;
5398
5399                 if (encoder->pre_pll_enable)
5400                         encoder->pre_pll_enable(encoder, crtc_state, conn_state);
5401         }
5402 }
5403
5404 static void intel_encoders_pre_enable(struct drm_crtc *crtc,
5405                                       struct intel_crtc_state *crtc_state,
5406                                       struct drm_atomic_state *old_state)
5407 {
5408         struct drm_connector_state *conn_state;
5409         struct drm_connector *conn;
5410         int i;
5411
5412         for_each_new_connector_in_state(old_state, conn, conn_state, i) {
5413                 struct intel_encoder *encoder =
5414                         to_intel_encoder(conn_state->best_encoder);
5415
5416                 if (conn_state->crtc != crtc)
5417                         continue;
5418
5419                 if (encoder->pre_enable)
5420                         encoder->pre_enable(encoder, crtc_state, conn_state);
5421         }
5422 }
5423
5424 static void intel_encoders_enable(struct drm_crtc *crtc,
5425                                   struct intel_crtc_state *crtc_state,
5426                                   struct drm_atomic_state *old_state)
5427 {
5428         struct drm_connector_state *conn_state;
5429         struct drm_connector *conn;
5430         int i;
5431
5432         for_each_new_connector_in_state(old_state, conn, conn_state, i) {
5433                 struct intel_encoder *encoder =
5434                         to_intel_encoder(conn_state->best_encoder);
5435
5436                 if (conn_state->crtc != crtc)
5437                         continue;
5438
5439                 encoder->enable(encoder, crtc_state, conn_state);
5440                 intel_opregion_notify_encoder(encoder, true);
5441         }
5442 }
5443
5444 static void intel_encoders_disable(struct drm_crtc *crtc,
5445                                    struct intel_crtc_state *old_crtc_state,
5446                                    struct drm_atomic_state *old_state)
5447 {
5448         struct drm_connector_state *old_conn_state;
5449         struct drm_connector *conn;
5450         int i;
5451
5452         for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
5453                 struct intel_encoder *encoder =
5454                         to_intel_encoder(old_conn_state->best_encoder);
5455
5456                 if (old_conn_state->crtc != crtc)
5457                         continue;
5458
5459                 intel_opregion_notify_encoder(encoder, false);
5460                 encoder->disable(encoder, old_crtc_state, old_conn_state);
5461         }
5462 }
5463
5464 static void intel_encoders_post_disable(struct drm_crtc *crtc,
5465                                         struct intel_crtc_state *old_crtc_state,
5466                                         struct drm_atomic_state *old_state)
5467 {
5468         struct drm_connector_state *old_conn_state;
5469         struct drm_connector *conn;
5470         int i;
5471
5472         for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
5473                 struct intel_encoder *encoder =
5474                         to_intel_encoder(old_conn_state->best_encoder);
5475
5476                 if (old_conn_state->crtc != crtc)
5477                         continue;
5478
5479                 if (encoder->post_disable)
5480                         encoder->post_disable(encoder, old_crtc_state, old_conn_state);
5481         }
5482 }
5483
5484 static void intel_encoders_post_pll_disable(struct drm_crtc *crtc,
5485                                             struct intel_crtc_state *old_crtc_state,
5486                                             struct drm_atomic_state *old_state)
5487 {
5488         struct drm_connector_state *old_conn_state;
5489         struct drm_connector *conn;
5490         int i;
5491
5492         for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
5493                 struct intel_encoder *encoder =
5494                         to_intel_encoder(old_conn_state->best_encoder);
5495
5496                 if (old_conn_state->crtc != crtc)
5497                         continue;
5498
5499                 if (encoder->post_pll_disable)
5500                         encoder->post_pll_disable(encoder, old_crtc_state, old_conn_state);
5501         }
5502 }
5503
5504 static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
5505                                  struct drm_atomic_state *old_state)
5506 {
5507         struct drm_crtc *crtc = pipe_config->base.crtc;
5508         struct drm_device *dev = crtc->dev;
5509         struct drm_i915_private *dev_priv = to_i915(dev);
5510         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5511         int pipe = intel_crtc->pipe;
5512         struct intel_atomic_state *old_intel_state =
5513                 to_intel_atomic_state(old_state);
5514
5515         if (WARN_ON(intel_crtc->active))
5516                 return;
5517
5518         /*
5519          * Sometimes spurious CPU pipe underruns happen during FDI
5520          * training, at least with VGA+HDMI cloning. Suppress them.
5521          *
5522          * On ILK we get an occasional spurious CPU pipe underruns
5523          * between eDP port A enable and vdd enable. Also PCH port
5524          * enable seems to result in the occasional CPU pipe underrun.
5525          *
5526          * Spurious PCH underruns also occur during PCH enabling.
5527          */
5528         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5529         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
5530
5531         if (intel_crtc->config->has_pch_encoder)
5532                 intel_prepare_shared_dpll(intel_crtc);
5533
5534         if (intel_crtc_has_dp_encoder(intel_crtc->config))
5535                 intel_dp_set_m_n(intel_crtc, M1_N1);
5536
5537         intel_set_pipe_timings(intel_crtc);
5538         intel_set_pipe_src_size(intel_crtc);
5539
5540         if (intel_crtc->config->has_pch_encoder) {
5541                 intel_cpu_transcoder_set_m_n(intel_crtc,
5542                                      &intel_crtc->config->fdi_m_n, NULL);
5543         }
5544
5545         ironlake_set_pipeconf(crtc);
5546
5547         intel_crtc->active = true;
5548
5549         intel_encoders_pre_enable(crtc, pipe_config, old_state);
5550
5551         if (intel_crtc->config->has_pch_encoder) {
5552                 /* Note: FDI PLL enabling _must_ be done before we enable the
5553                  * cpu pipes, hence this is separate from all the other fdi/pch
5554                  * enabling. */
5555                 ironlake_fdi_pll_enable(intel_crtc);
5556         } else {
5557                 assert_fdi_tx_disabled(dev_priv, pipe);
5558                 assert_fdi_rx_disabled(dev_priv, pipe);
5559         }
5560
5561         ironlake_pfit_enable(intel_crtc);
5562
5563         /*
5564          * On ILK+ LUT must be loaded before the pipe is running but with
5565          * clocks enabled
5566          */
5567         intel_color_load_luts(&pipe_config->base);
5568
5569         if (dev_priv->display.initial_watermarks != NULL)
5570                 dev_priv->display.initial_watermarks(old_intel_state, intel_crtc->config);
5571         intel_enable_pipe(pipe_config);
5572
5573         if (intel_crtc->config->has_pch_encoder)
5574                 ironlake_pch_enable(old_intel_state, pipe_config);
5575
5576         assert_vblank_disabled(crtc);
5577         drm_crtc_vblank_on(crtc);
5578
5579         intel_encoders_enable(crtc, pipe_config, old_state);
5580
5581         if (HAS_PCH_CPT(dev_priv))
5582                 cpt_verify_modeset(dev, intel_crtc->pipe);
5583
5584         /*
5585          * Must wait for vblank to avoid spurious PCH FIFO underruns.
5586          * And a second vblank wait is needed at least on ILK with
5587          * some interlaced HDMI modes. Let's do the double wait always
5588          * in case there are more corner cases we don't know about.
5589          */
5590         if (intel_crtc->config->has_pch_encoder) {
5591                 intel_wait_for_vblank(dev_priv, pipe);
5592                 intel_wait_for_vblank(dev_priv, pipe);
5593         }
5594         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5595         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
5596 }
5597
5598 /* IPS only exists on ULT machines and is tied to pipe A. */
5599 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
5600 {
5601         return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
5602 }
5603
5604 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
5605                                             enum pipe pipe, bool apply)
5606 {
5607         u32 val = I915_READ(CLKGATE_DIS_PSL(pipe));
5608         u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
5609
5610         if (apply)
5611                 val |= mask;
5612         else
5613                 val &= ~mask;
5614
5615         I915_WRITE(CLKGATE_DIS_PSL(pipe), val);
5616 }
5617
5618 static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
5619 {
5620         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5621         enum pipe pipe = crtc->pipe;
5622         uint32_t val;
5623
5624         val = MBUS_DBOX_BW_CREDIT(1) | MBUS_DBOX_A_CREDIT(2);
5625
5626         /* Program B credit equally to all pipes */
5627         val |= MBUS_DBOX_B_CREDIT(24 / INTEL_INFO(dev_priv)->num_pipes);
5628
5629         I915_WRITE(PIPE_MBUS_DBOX_CTL(pipe), val);
5630 }
5631
5632 static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
5633                                 struct drm_atomic_state *old_state)
5634 {
5635         struct drm_crtc *crtc = pipe_config->base.crtc;
5636         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
5637         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5638         int pipe = intel_crtc->pipe, hsw_workaround_pipe;
5639         enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
5640         struct intel_atomic_state *old_intel_state =
5641                 to_intel_atomic_state(old_state);
5642         bool psl_clkgate_wa;
5643         u32 pipe_chicken;
5644
5645         if (WARN_ON(intel_crtc->active))
5646                 return;
5647
5648         intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
5649
5650         if (intel_crtc->config->shared_dpll)
5651                 intel_enable_shared_dpll(intel_crtc);
5652
5653         if (INTEL_GEN(dev_priv) >= 11)
5654                 icl_map_plls_to_ports(crtc, pipe_config, old_state);
5655
5656         intel_encoders_pre_enable(crtc, pipe_config, old_state);
5657
5658         if (intel_crtc_has_dp_encoder(intel_crtc->config))
5659                 intel_dp_set_m_n(intel_crtc, M1_N1);
5660
5661         if (!transcoder_is_dsi(cpu_transcoder))
5662                 intel_set_pipe_timings(intel_crtc);
5663
5664         intel_set_pipe_src_size(intel_crtc);
5665
5666         if (cpu_transcoder != TRANSCODER_EDP &&
5667             !transcoder_is_dsi(cpu_transcoder)) {
5668                 I915_WRITE(PIPE_MULT(cpu_transcoder),
5669                            intel_crtc->config->pixel_multiplier - 1);
5670         }
5671
5672         if (intel_crtc->config->has_pch_encoder) {
5673                 intel_cpu_transcoder_set_m_n(intel_crtc,
5674                                      &intel_crtc->config->fdi_m_n, NULL);
5675         }
5676
5677         if (!transcoder_is_dsi(cpu_transcoder))
5678                 haswell_set_pipeconf(crtc);
5679
5680         haswell_set_pipemisc(crtc);
5681
5682         intel_color_set_csc(&pipe_config->base);
5683
5684         intel_crtc->active = true;
5685
5686         /* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
5687         psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
5688                          intel_crtc->config->pch_pfit.enabled;
5689         if (psl_clkgate_wa)
5690                 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
5691
5692         if (INTEL_GEN(dev_priv) >= 9)
5693                 skylake_pfit_enable(intel_crtc);
5694         else
5695                 ironlake_pfit_enable(intel_crtc);
5696
5697         /*
5698          * On ILK+ LUT must be loaded before the pipe is running but with
5699          * clocks enabled
5700          */
5701         intel_color_load_luts(&pipe_config->base);
5702
5703         /*
5704          * Display WA #1153: enable hardware to bypass the alpha math
5705          * and rounding for per-pixel values 00 and 0xff
5706          */
5707         if (INTEL_GEN(dev_priv) >= 11) {
5708                 pipe_chicken = I915_READ(PIPE_CHICKEN(pipe));
5709                 if (!(pipe_chicken & PER_PIXEL_ALPHA_BYPASS_EN))
5710                         I915_WRITE_FW(PIPE_CHICKEN(pipe),
5711                                       pipe_chicken | PER_PIXEL_ALPHA_BYPASS_EN);
5712         }
5713
5714         intel_ddi_set_pipe_settings(pipe_config);
5715         if (!transcoder_is_dsi(cpu_transcoder))
5716                 intel_ddi_enable_transcoder_func(pipe_config);
5717
5718         if (dev_priv->display.initial_watermarks != NULL)
5719                 dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
5720
5721         if (INTEL_GEN(dev_priv) >= 11)
5722                 icl_pipe_mbus_enable(intel_crtc);
5723
5724         /* XXX: Do the pipe assertions at the right place for BXT DSI. */
5725         if (!transcoder_is_dsi(cpu_transcoder))
5726                 intel_enable_pipe(pipe_config);
5727
5728         if (intel_crtc->config->has_pch_encoder)
5729                 lpt_pch_enable(old_intel_state, pipe_config);
5730
5731         if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DP_MST))
5732                 intel_ddi_set_vc_payload_alloc(pipe_config, true);
5733
5734         assert_vblank_disabled(crtc);
5735         drm_crtc_vblank_on(crtc);
5736
5737         intel_encoders_enable(crtc, pipe_config, old_state);
5738
5739         if (psl_clkgate_wa) {
5740                 intel_wait_for_vblank(dev_priv, pipe);
5741                 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
5742         }
5743
5744         /* If we change the relative order between pipe/planes enabling, we need
5745          * to change the workaround. */
5746         hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
5747         if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
5748                 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
5749                 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
5750         }
5751 }
5752
5753 static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force)
5754 {
5755         struct drm_device *dev = crtc->base.dev;
5756         struct drm_i915_private *dev_priv = to_i915(dev);
5757         int pipe = crtc->pipe;
5758
5759         /* To avoid upsetting the power well on haswell only disable the pfit if
5760          * it's in use. The hw state code will make sure we get this right. */
5761         if (force || crtc->config->pch_pfit.enabled) {
5762                 I915_WRITE(PF_CTL(pipe), 0);
5763                 I915_WRITE(PF_WIN_POS(pipe), 0);
5764                 I915_WRITE(PF_WIN_SZ(pipe), 0);
5765         }
5766 }
5767
5768 static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
5769                                   struct drm_atomic_state *old_state)
5770 {
5771         struct drm_crtc *crtc = old_crtc_state->base.crtc;
5772         struct drm_device *dev = crtc->dev;
5773         struct drm_i915_private *dev_priv = to_i915(dev);
5774         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5775         int pipe = intel_crtc->pipe;
5776
5777         /*
5778          * Sometimes spurious CPU pipe underruns happen when the
5779          * pipe is already disabled, but FDI RX/TX is still enabled.
5780          * Happens at least with VGA+HDMI cloning. Suppress them.
5781          */
5782         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5783         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
5784
5785         intel_encoders_disable(crtc, old_crtc_state, old_state);
5786
5787         drm_crtc_vblank_off(crtc);
5788         assert_vblank_disabled(crtc);
5789
5790         intel_disable_pipe(old_crtc_state);
5791
5792         ironlake_pfit_disable(intel_crtc, false);
5793
5794         if (intel_crtc->config->has_pch_encoder)
5795                 ironlake_fdi_disable(crtc);
5796
5797         intel_encoders_post_disable(crtc, old_crtc_state, old_state);
5798
5799         if (intel_crtc->config->has_pch_encoder) {
5800                 ironlake_disable_pch_transcoder(dev_priv, pipe);
5801
5802                 if (HAS_PCH_CPT(dev_priv)) {
5803                         i915_reg_t reg;
5804                         u32 temp;
5805
5806                         /* disable TRANS_DP_CTL */
5807                         reg = TRANS_DP_CTL(pipe);
5808                         temp = I915_READ(reg);
5809                         temp &= ~(TRANS_DP_OUTPUT_ENABLE |
5810                                   TRANS_DP_PORT_SEL_MASK);
5811                         temp |= TRANS_DP_PORT_SEL_NONE;
5812                         I915_WRITE(reg, temp);
5813
5814                         /* disable DPLL_SEL */
5815                         temp = I915_READ(PCH_DPLL_SEL);
5816                         temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
5817                         I915_WRITE(PCH_DPLL_SEL, temp);
5818                 }
5819
5820                 ironlake_fdi_pll_disable(intel_crtc);
5821         }
5822
5823         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5824         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
5825 }
5826
5827 static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
5828                                  struct drm_atomic_state *old_state)
5829 {
5830         struct drm_crtc *crtc = old_crtc_state->base.crtc;
5831         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
5832         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5833         enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
5834
5835         intel_encoders_disable(crtc, old_crtc_state, old_state);
5836
5837         drm_crtc_vblank_off(crtc);
5838         assert_vblank_disabled(crtc);
5839
5840         /* XXX: Do the pipe assertions at the right place for BXT DSI. */
5841         if (!transcoder_is_dsi(cpu_transcoder))
5842                 intel_disable_pipe(old_crtc_state);
5843
5844         if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST))
5845                 intel_ddi_set_vc_payload_alloc(old_crtc_state, false);
5846
5847         if (!transcoder_is_dsi(cpu_transcoder))
5848                 intel_ddi_disable_transcoder_func(old_crtc_state);
5849
5850         if (INTEL_GEN(dev_priv) >= 9)
5851                 skylake_scaler_disable(intel_crtc);
5852         else
5853                 ironlake_pfit_disable(intel_crtc, false);
5854
5855         intel_encoders_post_disable(crtc, old_crtc_state, old_state);
5856
5857         if (INTEL_GEN(dev_priv) >= 11)
5858                 icl_unmap_plls_to_ports(crtc, old_crtc_state, old_state);
5859 }
5860
5861 static void i9xx_pfit_enable(struct intel_crtc *crtc)
5862 {
5863         struct drm_device *dev = crtc->base.dev;
5864         struct drm_i915_private *dev_priv = to_i915(dev);
5865         struct intel_crtc_state *pipe_config = crtc->config;
5866
5867         if (!pipe_config->gmch_pfit.control)
5868                 return;
5869
5870         /*
5871          * The panel fitter should only be adjusted whilst the pipe is disabled,
5872          * according to register description and PRM.
5873          */
5874         WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
5875         assert_pipe_disabled(dev_priv, crtc->pipe);
5876
5877         I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios);
5878         I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control);
5879
5880         /* Border color in case we don't scale up to the full screen. Black by
5881          * default, change to something else for debugging. */
5882         I915_WRITE(BCLRPAT(crtc->pipe), 0);
5883 }
5884
5885 bool intel_port_is_tc(struct drm_i915_private *dev_priv, enum port port)
5886 {
5887         if (IS_ICELAKE(dev_priv))
5888                 return port >= PORT_C && port <= PORT_F;
5889
5890         return false;
5891 }
5892
5893 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
5894 {
5895         if (!intel_port_is_tc(dev_priv, port))
5896                 return PORT_TC_NONE;
5897
5898         return port - PORT_C;
5899 }
5900
5901 enum intel_display_power_domain intel_port_to_power_domain(enum port port)
5902 {
5903         switch (port) {
5904         case PORT_A:
5905                 return POWER_DOMAIN_PORT_DDI_A_LANES;
5906         case PORT_B:
5907                 return POWER_DOMAIN_PORT_DDI_B_LANES;
5908         case PORT_C:
5909                 return POWER_DOMAIN_PORT_DDI_C_LANES;
5910         case PORT_D:
5911                 return POWER_DOMAIN_PORT_DDI_D_LANES;
5912         case PORT_E:
5913                 return POWER_DOMAIN_PORT_DDI_E_LANES;
5914         case PORT_F:
5915                 return POWER_DOMAIN_PORT_DDI_F_LANES;
5916         default:
5917                 MISSING_CASE(port);
5918                 return POWER_DOMAIN_PORT_OTHER;
5919         }
5920 }
5921
5922 static u64 get_crtc_power_domains(struct drm_crtc *crtc,
5923                                   struct intel_crtc_state *crtc_state)
5924 {
5925         struct drm_device *dev = crtc->dev;
5926         struct drm_i915_private *dev_priv = to_i915(dev);
5927         struct drm_encoder *encoder;
5928         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5929         enum pipe pipe = intel_crtc->pipe;
5930         u64 mask;
5931         enum transcoder transcoder = crtc_state->cpu_transcoder;
5932
5933         if (!crtc_state->base.active)
5934                 return 0;
5935
5936         mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
5937         mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder));
5938         if (crtc_state->pch_pfit.enabled ||
5939             crtc_state->pch_pfit.force_thru)
5940                 mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
5941
5942         drm_for_each_encoder_mask(encoder, dev, crtc_state->base.encoder_mask) {
5943                 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
5944
5945                 mask |= BIT_ULL(intel_encoder->power_domain);
5946         }
5947
5948         if (HAS_DDI(dev_priv) && crtc_state->has_audio)
5949                 mask |= BIT_ULL(POWER_DOMAIN_AUDIO);
5950
5951         if (crtc_state->shared_dpll)
5952                 mask |= BIT_ULL(POWER_DOMAIN_PLLS);
5953
5954         return mask;
5955 }
5956
5957 static u64
5958 modeset_get_crtc_power_domains(struct drm_crtc *crtc,
5959                                struct intel_crtc_state *crtc_state)
5960 {
5961         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
5962         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5963         enum intel_display_power_domain domain;
5964         u64 domains, new_domains, old_domains;
5965
5966         old_domains = intel_crtc->enabled_power_domains;
5967         intel_crtc->enabled_power_domains = new_domains =
5968                 get_crtc_power_domains(crtc, crtc_state);
5969
5970         domains = new_domains & ~old_domains;
5971
5972         for_each_power_domain(domain, domains)
5973                 intel_display_power_get(dev_priv, domain);
5974
5975         return old_domains & ~new_domains;
5976 }
5977
5978 static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
5979                                       u64 domains)
5980 {
5981         enum intel_display_power_domain domain;
5982
5983         for_each_power_domain(domain, domains)
5984                 intel_display_power_put(dev_priv, domain);
5985 }
5986
5987 static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
5988                                    struct drm_atomic_state *old_state)
5989 {
5990         struct intel_atomic_state *old_intel_state =
5991                 to_intel_atomic_state(old_state);
5992         struct drm_crtc *crtc = pipe_config->base.crtc;
5993         struct drm_device *dev = crtc->dev;
5994         struct drm_i915_private *dev_priv = to_i915(dev);
5995         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5996         int pipe = intel_crtc->pipe;
5997
5998         if (WARN_ON(intel_crtc->active))
5999                 return;
6000
6001         if (intel_crtc_has_dp_encoder(intel_crtc->config))
6002                 intel_dp_set_m_n(intel_crtc, M1_N1);
6003
6004         intel_set_pipe_timings(intel_crtc);
6005         intel_set_pipe_src_size(intel_crtc);
6006
6007         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
6008                 struct drm_i915_private *dev_priv = to_i915(dev);
6009
6010                 I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
6011                 I915_WRITE(CHV_CANVAS(pipe), 0);
6012         }
6013
6014         i9xx_set_pipeconf(intel_crtc);
6015
6016         intel_crtc->active = true;
6017
6018         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6019
6020         intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
6021
6022         if (IS_CHERRYVIEW(dev_priv)) {
6023                 chv_prepare_pll(intel_crtc, intel_crtc->config);
6024                 chv_enable_pll(intel_crtc, intel_crtc->config);
6025         } else {
6026                 vlv_prepare_pll(intel_crtc, intel_crtc->config);
6027                 vlv_enable_pll(intel_crtc, intel_crtc->config);
6028         }
6029
6030         intel_encoders_pre_enable(crtc, pipe_config, old_state);
6031
6032         i9xx_pfit_enable(intel_crtc);
6033
6034         intel_color_load_luts(&pipe_config->base);
6035
6036         dev_priv->display.initial_watermarks(old_intel_state,
6037                                              pipe_config);
6038         intel_enable_pipe(pipe_config);
6039
6040         assert_vblank_disabled(crtc);
6041         drm_crtc_vblank_on(crtc);
6042
6043         intel_encoders_enable(crtc, pipe_config, old_state);
6044 }
6045
6046 static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
6047 {
6048         struct drm_device *dev = crtc->base.dev;
6049         struct drm_i915_private *dev_priv = to_i915(dev);
6050
6051         I915_WRITE(FP0(crtc->pipe), crtc->config->dpll_hw_state.fp0);
6052         I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1);
6053 }
6054
6055 static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
6056                              struct drm_atomic_state *old_state)
6057 {
6058         struct intel_atomic_state *old_intel_state =
6059                 to_intel_atomic_state(old_state);
6060         struct drm_crtc *crtc = pipe_config->base.crtc;
6061         struct drm_device *dev = crtc->dev;
6062         struct drm_i915_private *dev_priv = to_i915(dev);
6063         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6064         enum pipe pipe = intel_crtc->pipe;
6065
6066         if (WARN_ON(intel_crtc->active))
6067                 return;
6068
6069         i9xx_set_pll_dividers(intel_crtc);
6070
6071         if (intel_crtc_has_dp_encoder(intel_crtc->config))
6072                 intel_dp_set_m_n(intel_crtc, M1_N1);
6073
6074         intel_set_pipe_timings(intel_crtc);
6075         intel_set_pipe_src_size(intel_crtc);
6076
6077         i9xx_set_pipeconf(intel_crtc);
6078
6079         intel_crtc->active = true;
6080
6081         if (!IS_GEN2(dev_priv))
6082                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6083
6084         intel_encoders_pre_enable(crtc, pipe_config, old_state);
6085
6086         i9xx_enable_pll(intel_crtc, pipe_config);
6087
6088         i9xx_pfit_enable(intel_crtc);
6089
6090         intel_color_load_luts(&pipe_config->base);
6091
6092         if (dev_priv->display.initial_watermarks != NULL)
6093                 dev_priv->display.initial_watermarks(old_intel_state,
6094                                                      intel_crtc->config);
6095         else
6096                 intel_update_watermarks(intel_crtc);
6097         intel_enable_pipe(pipe_config);
6098
6099         assert_vblank_disabled(crtc);
6100         drm_crtc_vblank_on(crtc);
6101
6102         intel_encoders_enable(crtc, pipe_config, old_state);
6103 }
6104
6105 static void i9xx_pfit_disable(struct intel_crtc *crtc)
6106 {
6107         struct drm_device *dev = crtc->base.dev;
6108         struct drm_i915_private *dev_priv = to_i915(dev);
6109
6110         if (!crtc->config->gmch_pfit.control)
6111                 return;
6112
6113         assert_pipe_disabled(dev_priv, crtc->pipe);
6114
6115         DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n",
6116                          I915_READ(PFIT_CONTROL));
6117         I915_WRITE(PFIT_CONTROL, 0);
6118 }
6119
6120 static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
6121                               struct drm_atomic_state *old_state)
6122 {
6123         struct drm_crtc *crtc = old_crtc_state->base.crtc;
6124         struct drm_device *dev = crtc->dev;
6125         struct drm_i915_private *dev_priv = to_i915(dev);
6126         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6127         int pipe = intel_crtc->pipe;
6128
6129         /*
6130          * On gen2 planes are double buffered but the pipe isn't, so we must
6131          * wait for planes to fully turn off before disabling the pipe.
6132          */
6133         if (IS_GEN2(dev_priv))
6134                 intel_wait_for_vblank(dev_priv, pipe);
6135
6136         intel_encoders_disable(crtc, old_crtc_state, old_state);
6137
6138         drm_crtc_vblank_off(crtc);
6139         assert_vblank_disabled(crtc);
6140
6141         intel_disable_pipe(old_crtc_state);
6142
6143         i9xx_pfit_disable(intel_crtc);
6144
6145         intel_encoders_post_disable(crtc, old_crtc_state, old_state);
6146
6147         if (!intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DSI)) {
6148                 if (IS_CHERRYVIEW(dev_priv))
6149                         chv_disable_pll(dev_priv, pipe);
6150                 else if (IS_VALLEYVIEW(dev_priv))
6151                         vlv_disable_pll(dev_priv, pipe);
6152                 else
6153                         i9xx_disable_pll(intel_crtc);
6154         }
6155
6156         intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
6157
6158         if (!IS_GEN2(dev_priv))
6159                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6160
6161         if (!dev_priv->display.initial_watermarks)
6162                 intel_update_watermarks(intel_crtc);
6163
6164         /* clock the pipe down to 640x480@60 to potentially save power */
6165         if (IS_I830(dev_priv))
6166                 i830_enable_pipe(dev_priv, pipe);
6167 }
6168
6169 static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
6170                                         struct drm_modeset_acquire_ctx *ctx)
6171 {
6172         struct intel_encoder *encoder;
6173         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6174         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6175         enum intel_display_power_domain domain;
6176         struct intel_plane *plane;
6177         u64 domains;
6178         struct drm_atomic_state *state;
6179         struct intel_crtc_state *crtc_state;
6180         int ret;
6181
6182         if (!intel_crtc->active)
6183                 return;
6184
6185         for_each_intel_plane_on_crtc(&dev_priv->drm, intel_crtc, plane) {
6186                 const struct intel_plane_state *plane_state =
6187                         to_intel_plane_state(plane->base.state);
6188
6189                 if (plane_state->base.visible)
6190                         intel_plane_disable_noatomic(intel_crtc, plane);
6191         }
6192
6193         state = drm_atomic_state_alloc(crtc->dev);
6194         if (!state) {
6195                 DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory",
6196                               crtc->base.id, crtc->name);
6197                 return;
6198         }
6199
6200         state->acquire_ctx = ctx;
6201
6202         /* Everything's already locked, -EDEADLK can't happen. */
6203         crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
6204         ret = drm_atomic_add_affected_connectors(state, crtc);
6205
6206         WARN_ON(IS_ERR(crtc_state) || ret);
6207
6208         dev_priv->display.crtc_disable(crtc_state, state);
6209
6210         drm_atomic_state_put(state);
6211
6212         DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
6213                       crtc->base.id, crtc->name);
6214
6215         WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0);
6216         crtc->state->active = false;
6217         intel_crtc->active = false;
6218         crtc->enabled = false;
6219         crtc->state->connector_mask = 0;
6220         crtc->state->encoder_mask = 0;
6221
6222         for_each_encoder_on_crtc(crtc->dev, crtc, encoder)
6223                 encoder->base.crtc = NULL;
6224
6225         intel_fbc_disable(intel_crtc);
6226         intel_update_watermarks(intel_crtc);
6227         intel_disable_shared_dpll(intel_crtc);
6228
6229         domains = intel_crtc->enabled_power_domains;
6230         for_each_power_domain(domain, domains)
6231                 intel_display_power_put(dev_priv, domain);
6232         intel_crtc->enabled_power_domains = 0;
6233
6234         dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe);
6235         dev_priv->min_cdclk[intel_crtc->pipe] = 0;
6236         dev_priv->min_voltage_level[intel_crtc->pipe] = 0;
6237 }
6238
6239 /*
6240  * turn all crtc's off, but do not adjust state
6241  * This has to be paired with a call to intel_modeset_setup_hw_state.
6242  */
6243 int intel_display_suspend(struct drm_device *dev)
6244 {
6245         struct drm_i915_private *dev_priv = to_i915(dev);
6246         struct drm_atomic_state *state;
6247         int ret;
6248
6249         state = drm_atomic_helper_suspend(dev);
6250         ret = PTR_ERR_OR_ZERO(state);
6251         if (ret)
6252                 DRM_ERROR("Suspending crtc's failed with %i\n", ret);
6253         else
6254                 dev_priv->modeset_restore_state = state;
6255         return ret;
6256 }
6257
6258 void intel_encoder_destroy(struct drm_encoder *encoder)
6259 {
6260         struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
6261
6262         drm_encoder_cleanup(encoder);
6263         kfree(intel_encoder);
6264 }
6265
6266 /* Cross check the actual hw state with our own modeset state tracking (and it's
6267  * internal consistency). */
6268 static void intel_connector_verify_state(struct drm_crtc_state *crtc_state,
6269                                          struct drm_connector_state *conn_state)
6270 {
6271         struct intel_connector *connector = to_intel_connector(conn_state->connector);
6272
6273         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
6274                       connector->base.base.id,
6275                       connector->base.name);
6276
6277         if (connector->get_hw_state(connector)) {
6278                 struct intel_encoder *encoder = connector->encoder;
6279
6280                 I915_STATE_WARN(!crtc_state,
6281                          "connector enabled without attached crtc\n");
6282
6283                 if (!crtc_state)
6284                         return;
6285
6286                 I915_STATE_WARN(!crtc_state->active,
6287                       "connector is active, but attached crtc isn't\n");
6288
6289                 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
6290                         return;
6291
6292                 I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
6293                         "atomic encoder doesn't match attached encoder\n");
6294
6295                 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
6296                         "attached encoder crtc differs from connector crtc\n");
6297         } else {
6298                 I915_STATE_WARN(crtc_state && crtc_state->active,
6299                         "attached crtc is active, but connector isn't\n");
6300                 I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
6301                         "best encoder set without crtc!\n");
6302         }
6303 }
6304
6305 int intel_connector_init(struct intel_connector *connector)
6306 {
6307         struct intel_digital_connector_state *conn_state;
6308
6309         /*
6310          * Allocate enough memory to hold intel_digital_connector_state,
6311          * This might be a few bytes too many, but for connectors that don't
6312          * need it we'll free the state and allocate a smaller one on the first
6313          * succesful commit anyway.
6314          */
6315         conn_state = kzalloc(sizeof(*conn_state), GFP_KERNEL);
6316         if (!conn_state)
6317                 return -ENOMEM;
6318
6319         __drm_atomic_helper_connector_reset(&connector->base,
6320                                             &conn_state->base);
6321
6322         return 0;
6323 }
6324
6325 struct intel_connector *intel_connector_alloc(void)
6326 {
6327         struct intel_connector *connector;
6328
6329         connector = kzalloc(sizeof *connector, GFP_KERNEL);
6330         if (!connector)
6331                 return NULL;
6332
6333         if (intel_connector_init(connector) < 0) {
6334                 kfree(connector);
6335                 return NULL;
6336         }
6337
6338         return connector;
6339 }
6340
6341 /*
6342  * Free the bits allocated by intel_connector_alloc.
6343  * This should only be used after intel_connector_alloc has returned
6344  * successfully, and before drm_connector_init returns successfully.
6345  * Otherwise the destroy callbacks for the connector and the state should
6346  * take care of proper cleanup/free
6347  */
6348 void intel_connector_free(struct intel_connector *connector)
6349 {
6350         kfree(to_intel_digital_connector_state(connector->base.state));
6351         kfree(connector);
6352 }
6353
6354 /* Simple connector->get_hw_state implementation for encoders that support only
6355  * one connector and no cloning and hence the encoder state determines the state
6356  * of the connector. */
6357 bool intel_connector_get_hw_state(struct intel_connector *connector)
6358 {
6359         enum pipe pipe = 0;
6360         struct intel_encoder *encoder = connector->encoder;
6361
6362         return encoder->get_hw_state(encoder, &pipe);
6363 }
6364
6365 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
6366 {
6367         if (crtc_state->base.enable && crtc_state->has_pch_encoder)
6368                 return crtc_state->fdi_lanes;
6369
6370         return 0;
6371 }
6372
6373 static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
6374                                      struct intel_crtc_state *pipe_config)
6375 {
6376         struct drm_i915_private *dev_priv = to_i915(dev);
6377         struct drm_atomic_state *state = pipe_config->base.state;
6378         struct intel_crtc *other_crtc;
6379         struct intel_crtc_state *other_crtc_state;
6380
6381         DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
6382                       pipe_name(pipe), pipe_config->fdi_lanes);
6383         if (pipe_config->fdi_lanes > 4) {
6384                 DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
6385                               pipe_name(pipe), pipe_config->fdi_lanes);
6386                 return -EINVAL;
6387         }
6388
6389         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
6390                 if (pipe_config->fdi_lanes > 2) {
6391                         DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
6392                                       pipe_config->fdi_lanes);
6393                         return -EINVAL;
6394                 } else {
6395                         return 0;
6396                 }
6397         }
6398
6399         if (INTEL_INFO(dev_priv)->num_pipes == 2)
6400                 return 0;
6401
6402         /* Ivybridge 3 pipe is really complicated */
6403         switch (pipe) {
6404         case PIPE_A:
6405                 return 0;
6406         case PIPE_B:
6407                 if (pipe_config->fdi_lanes <= 2)
6408                         return 0;
6409
6410                 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C);
6411                 other_crtc_state =
6412                         intel_atomic_get_crtc_state(state, other_crtc);
6413                 if (IS_ERR(other_crtc_state))
6414                         return PTR_ERR(other_crtc_state);
6415
6416                 if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
6417                         DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
6418                                       pipe_name(pipe), pipe_config->fdi_lanes);
6419                         return -EINVAL;
6420                 }
6421                 return 0;
6422         case PIPE_C:
6423                 if (pipe_config->fdi_lanes > 2) {
6424                         DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n",
6425                                       pipe_name(pipe), pipe_config->fdi_lanes);
6426                         return -EINVAL;
6427                 }
6428
6429                 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B);
6430                 other_crtc_state =
6431                         intel_atomic_get_crtc_state(state, other_crtc);
6432                 if (IS_ERR(other_crtc_state))
6433                         return PTR_ERR(other_crtc_state);
6434
6435                 if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
6436                         DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
6437                         return -EINVAL;
6438                 }
6439                 return 0;
6440         default:
6441                 BUG();
6442         }
6443 }
6444
6445 #define RETRY 1
6446 static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
6447                                        struct intel_crtc_state *pipe_config)
6448 {
6449         struct drm_device *dev = intel_crtc->base.dev;
6450         const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
6451         int lane, link_bw, fdi_dotclock, ret;
6452         bool needs_recompute = false;
6453
6454 retry:
6455         /* FDI is a binary signal running at ~2.7GHz, encoding
6456          * each output octet as 10 bits. The actual frequency
6457          * is stored as a divider into a 100MHz clock, and the
6458          * mode pixel clock is stored in units of 1KHz.
6459          * Hence the bw of each lane in terms of the mode signal
6460          * is:
6461          */
6462         link_bw = intel_fdi_link_freq(to_i915(dev), pipe_config);
6463
6464         fdi_dotclock = adjusted_mode->crtc_clock;
6465
6466         lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
6467                                            pipe_config->pipe_bpp);
6468
6469         pipe_config->fdi_lanes = lane;
6470
6471         intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
6472                                link_bw, &pipe_config->fdi_m_n, false);
6473
6474         ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
6475         if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
6476                 pipe_config->pipe_bpp -= 2*3;
6477                 DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
6478                               pipe_config->pipe_bpp);
6479                 needs_recompute = true;
6480                 pipe_config->bw_constrained = true;
6481
6482                 goto retry;
6483         }
6484
6485         if (needs_recompute)
6486                 return RETRY;
6487
6488         return ret;
6489 }
6490
6491 bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
6492 {
6493         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
6494         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6495
6496         /* IPS only exists on ULT machines and is tied to pipe A. */
6497         if (!hsw_crtc_supports_ips(crtc))
6498                 return false;
6499
6500         if (!i915_modparams.enable_ips)
6501                 return false;
6502
6503         if (crtc_state->pipe_bpp > 24)
6504                 return false;
6505
6506         /*
6507          * We compare against max which means we must take
6508          * the increased cdclk requirement into account when
6509          * calculating the new cdclk.
6510          *
6511          * Should measure whether using a lower cdclk w/o IPS
6512          */
6513         if (IS_BROADWELL(dev_priv) &&
6514             crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100)
6515                 return false;
6516
6517         return true;
6518 }
6519
6520 static bool hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
6521 {
6522         struct drm_i915_private *dev_priv =
6523                 to_i915(crtc_state->base.crtc->dev);
6524         struct intel_atomic_state *intel_state =
6525                 to_intel_atomic_state(crtc_state->base.state);
6526
6527         if (!hsw_crtc_state_ips_capable(crtc_state))
6528                 return false;
6529
6530         if (crtc_state->ips_force_disable)
6531                 return false;
6532
6533         /* IPS should be fine as long as at least one plane is enabled. */
6534         if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
6535                 return false;
6536
6537         /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
6538         if (IS_BROADWELL(dev_priv) &&
6539             crtc_state->pixel_rate > intel_state->cdclk.logical.cdclk * 95 / 100)
6540                 return false;
6541
6542         return true;
6543 }
6544
6545 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
6546 {
6547         const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6548
6549         /* GDG double wide on either pipe, otherwise pipe A only */
6550         return INTEL_GEN(dev_priv) < 4 &&
6551                 (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
6552 }
6553
6554 static uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
6555 {
6556         uint32_t pixel_rate;
6557
6558         pixel_rate = pipe_config->base.adjusted_mode.crtc_clock;
6559
6560         /*
6561          * We only use IF-ID interlacing. If we ever use
6562          * PF-ID we'll need to adjust the pixel_rate here.
6563          */
6564
6565         if (pipe_config->pch_pfit.enabled) {
6566                 uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
6567                 uint32_t pfit_size = pipe_config->pch_pfit.size;
6568
6569                 pipe_w = pipe_config->pipe_src_w;
6570                 pipe_h = pipe_config->pipe_src_h;
6571
6572                 pfit_w = (pfit_size >> 16) & 0xFFFF;
6573                 pfit_h = pfit_size & 0xFFFF;
6574                 if (pipe_w < pfit_w)
6575                         pipe_w = pfit_w;
6576                 if (pipe_h < pfit_h)
6577                         pipe_h = pfit_h;
6578
6579                 if (WARN_ON(!pfit_w || !pfit_h))
6580                         return pixel_rate;
6581
6582                 pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h,
6583                                      pfit_w * pfit_h);
6584         }
6585
6586         return pixel_rate;
6587 }
6588
6589 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
6590 {
6591         struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
6592
6593         if (HAS_GMCH_DISPLAY(dev_priv))
6594                 /* FIXME calculate proper pipe pixel rate for GMCH pfit */
6595                 crtc_state->pixel_rate =
6596                         crtc_state->base.adjusted_mode.crtc_clock;
6597         else
6598                 crtc_state->pixel_rate =
6599                         ilk_pipe_pixel_rate(crtc_state);
6600 }
6601
6602 static int intel_crtc_compute_config(struct intel_crtc *crtc,
6603                                      struct intel_crtc_state *pipe_config)
6604 {
6605         struct drm_device *dev = crtc->base.dev;
6606         struct drm_i915_private *dev_priv = to_i915(dev);
6607         const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
6608         int clock_limit = dev_priv->max_dotclk_freq;
6609
6610         if (INTEL_GEN(dev_priv) < 4) {
6611                 clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
6612
6613                 /*
6614                  * Enable double wide mode when the dot clock
6615                  * is > 90% of the (display) core speed.
6616                  */
6617                 if (intel_crtc_supports_double_wide(crtc) &&
6618                     adjusted_mode->crtc_clock > clock_limit) {
6619                         clock_limit = dev_priv->max_dotclk_freq;
6620                         pipe_config->double_wide = true;
6621                 }
6622         }
6623
6624         if (adjusted_mode->crtc_clock > clock_limit) {
6625                 DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
6626                               adjusted_mode->crtc_clock, clock_limit,
6627                               yesno(pipe_config->double_wide));
6628                 return -EINVAL;
6629         }
6630
6631         if (pipe_config->ycbcr420 && pipe_config->base.ctm) {
6632                 /*
6633                  * There is only one pipe CSC unit per pipe, and we need that
6634                  * for output conversion from RGB->YCBCR. So if CTM is already
6635                  * applied we can't support YCBCR420 output.
6636                  */
6637                 DRM_DEBUG_KMS("YCBCR420 and CTM together are not possible\n");
6638                 return -EINVAL;
6639         }
6640
6641         /*
6642          * Pipe horizontal size must be even in:
6643          * - DVO ganged mode
6644          * - LVDS dual channel mode
6645          * - Double wide pipe
6646          */
6647         if (pipe_config->pipe_src_w & 1) {
6648                 if (pipe_config->double_wide) {
6649                         DRM_DEBUG_KMS("Odd pipe source width not supported with double wide pipe\n");
6650                         return -EINVAL;
6651                 }
6652
6653                 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
6654                     intel_is_dual_link_lvds(dev)) {
6655                         DRM_DEBUG_KMS("Odd pipe source width not supported with dual link LVDS\n");
6656                         return -EINVAL;
6657                 }
6658         }
6659
6660         /* Cantiga+ cannot handle modes with a hsync front porch of 0.
6661          * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
6662          */
6663         if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) &&
6664                 adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
6665                 return -EINVAL;
6666
6667         intel_crtc_compute_pixel_rate(pipe_config);
6668
6669         if (pipe_config->has_pch_encoder)
6670                 return ironlake_fdi_compute_config(crtc, pipe_config);
6671
6672         return 0;
6673 }
6674
6675 static void
6676 intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
6677 {
6678         while (*num > DATA_LINK_M_N_MASK ||
6679                *den > DATA_LINK_M_N_MASK) {
6680                 *num >>= 1;
6681                 *den >>= 1;
6682         }
6683 }
6684
6685 static void compute_m_n(unsigned int m, unsigned int n,
6686                         uint32_t *ret_m, uint32_t *ret_n,
6687                         bool reduce_m_n)
6688 {
6689         /*
6690          * Reduce M/N as much as possible without loss in precision. Several DP
6691          * dongles in particular seem to be fussy about too large *link* M/N
6692          * values. The passed in values are more likely to have the least
6693          * significant bits zero than M after rounding below, so do this first.
6694          */
6695         if (reduce_m_n) {
6696                 while ((m & 1) == 0 && (n & 1) == 0) {
6697                         m >>= 1;
6698                         n >>= 1;
6699                 }
6700         }
6701
6702         *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
6703         *ret_m = div_u64((uint64_t) m * *ret_n, n);
6704         intel_reduce_m_n_ratio(ret_m, ret_n);
6705 }
6706
6707 void
6708 intel_link_compute_m_n(int bits_per_pixel, int nlanes,
6709                        int pixel_clock, int link_clock,
6710                        struct intel_link_m_n *m_n,
6711                        bool reduce_m_n)
6712 {
6713         m_n->tu = 64;
6714
6715         compute_m_n(bits_per_pixel * pixel_clock,
6716                     link_clock * nlanes * 8,
6717                     &m_n->gmch_m, &m_n->gmch_n,
6718                     reduce_m_n);
6719
6720         compute_m_n(pixel_clock, link_clock,
6721                     &m_n->link_m, &m_n->link_n,
6722                     reduce_m_n);
6723 }
6724
6725 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
6726 {
6727         if (i915_modparams.panel_use_ssc >= 0)
6728                 return i915_modparams.panel_use_ssc != 0;
6729         return dev_priv->vbt.lvds_use_ssc
6730                 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
6731 }
6732
6733 static uint32_t pnv_dpll_compute_fp(struct dpll *dpll)
6734 {
6735         return (1 << dpll->n) << 16 | dpll->m2;
6736 }
6737
6738 static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll)
6739 {
6740         return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
6741 }
6742
6743 static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
6744                                      struct intel_crtc_state *crtc_state,
6745                                      struct dpll *reduced_clock)
6746 {
6747         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6748         u32 fp, fp2 = 0;
6749
6750         if (IS_PINEVIEW(dev_priv)) {
6751                 fp = pnv_dpll_compute_fp(&crtc_state->dpll);
6752                 if (reduced_clock)
6753                         fp2 = pnv_dpll_compute_fp(reduced_clock);
6754         } else {
6755                 fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
6756                 if (reduced_clock)
6757                         fp2 = i9xx_dpll_compute_fp(reduced_clock);
6758         }
6759
6760         crtc_state->dpll_hw_state.fp0 = fp;
6761
6762         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
6763             reduced_clock) {
6764                 crtc_state->dpll_hw_state.fp1 = fp2;
6765         } else {
6766                 crtc_state->dpll_hw_state.fp1 = fp;
6767         }
6768 }
6769
6770 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
6771                 pipe)
6772 {
6773         u32 reg_val;
6774
6775         /*
6776          * PLLB opamp always calibrates to max value of 0x3f, force enable it
6777          * and set it to a reasonable value instead.
6778          */
6779         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
6780         reg_val &= 0xffffff00;
6781         reg_val |= 0x00000030;
6782         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
6783
6784         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
6785         reg_val &= 0x00ffffff;
6786         reg_val |= 0x8c000000;
6787         vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
6788
6789         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
6790         reg_val &= 0xffffff00;
6791         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
6792
6793         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
6794         reg_val &= 0x00ffffff;
6795         reg_val |= 0xb0000000;
6796         vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
6797 }
6798
6799 static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
6800                                          struct intel_link_m_n *m_n)
6801 {
6802         struct drm_device *dev = crtc->base.dev;
6803         struct drm_i915_private *dev_priv = to_i915(dev);
6804         int pipe = crtc->pipe;
6805
6806         I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
6807         I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
6808         I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
6809         I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
6810 }
6811
6812 static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
6813                                          struct intel_link_m_n *m_n,
6814                                          struct intel_link_m_n *m2_n2)
6815 {
6816         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6817         int pipe = crtc->pipe;
6818         enum transcoder transcoder = crtc->config->cpu_transcoder;
6819
6820         if (INTEL_GEN(dev_priv) >= 5) {
6821                 I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
6822                 I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
6823                 I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
6824                 I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
6825                 /* M2_N2 registers to be set only for gen < 8 (M2_N2 available
6826                  * for gen < 8) and if DRRS is supported (to make sure the
6827                  * registers are not unnecessarily accessed).
6828                  */
6829                 if (m2_n2 && (IS_CHERRYVIEW(dev_priv) ||
6830                     INTEL_GEN(dev_priv) < 8) && crtc->config->has_drrs) {
6831                         I915_WRITE(PIPE_DATA_M2(transcoder),
6832                                         TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
6833                         I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
6834                         I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m);
6835                         I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n);
6836                 }
6837         } else {
6838                 I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
6839                 I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
6840                 I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
6841                 I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
6842         }
6843 }
6844
6845 void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n)
6846 {
6847         struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
6848
6849         if (m_n == M1_N1) {
6850                 dp_m_n = &crtc->config->dp_m_n;
6851                 dp_m2_n2 = &crtc->config->dp_m2_n2;
6852         } else if (m_n == M2_N2) {
6853
6854                 /*
6855                  * M2_N2 registers are not supported. Hence m2_n2 divider value
6856                  * needs to be programmed into M1_N1.
6857                  */
6858                 dp_m_n = &crtc->config->dp_m2_n2;
6859         } else {
6860                 DRM_ERROR("Unsupported divider value\n");
6861                 return;
6862         }
6863
6864         if (crtc->config->has_pch_encoder)
6865                 intel_pch_transcoder_set_m_n(crtc, &crtc->config->dp_m_n);
6866         else
6867                 intel_cpu_transcoder_set_m_n(crtc, dp_m_n, dp_m2_n2);
6868 }
6869
6870 static void vlv_compute_dpll(struct intel_crtc *crtc,
6871                              struct intel_crtc_state *pipe_config)
6872 {
6873         pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
6874                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
6875         if (crtc->pipe != PIPE_A)
6876                 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
6877
6878         /* DPLL not used with DSI, but still need the rest set up */
6879         if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
6880                 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
6881                         DPLL_EXT_BUFFER_ENABLE_VLV;
6882
6883         pipe_config->dpll_hw_state.dpll_md =
6884                 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
6885 }
6886
6887 static void chv_compute_dpll(struct intel_crtc *crtc,
6888                              struct intel_crtc_state *pipe_config)
6889 {
6890         pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
6891                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
6892         if (crtc->pipe != PIPE_A)
6893                 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
6894
6895         /* DPLL not used with DSI, but still need the rest set up */
6896         if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
6897                 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
6898
6899         pipe_config->dpll_hw_state.dpll_md =
6900                 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
6901 }
6902
6903 static void vlv_prepare_pll(struct intel_crtc *crtc,
6904                             const struct intel_crtc_state *pipe_config)
6905 {
6906         struct drm_device *dev = crtc->base.dev;
6907         struct drm_i915_private *dev_priv = to_i915(dev);
6908         enum pipe pipe = crtc->pipe;
6909         u32 mdiv;
6910         u32 bestn, bestm1, bestm2, bestp1, bestp2;
6911         u32 coreclk, reg_val;
6912
6913         /* Enable Refclk */
6914         I915_WRITE(DPLL(pipe),
6915                    pipe_config->dpll_hw_state.dpll &
6916                    ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
6917
6918         /* No need to actually set up the DPLL with DSI */
6919         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
6920                 return;
6921
6922         mutex_lock(&dev_priv->sb_lock);
6923
6924         bestn = pipe_config->dpll.n;
6925         bestm1 = pipe_config->dpll.m1;
6926         bestm2 = pipe_config->dpll.m2;
6927         bestp1 = pipe_config->dpll.p1;
6928         bestp2 = pipe_config->dpll.p2;
6929
6930         /* See eDP HDMI DPIO driver vbios notes doc */
6931
6932         /* PLL B needs special handling */
6933         if (pipe == PIPE_B)
6934                 vlv_pllb_recal_opamp(dev_priv, pipe);
6935
6936         /* Set up Tx target for periodic Rcomp update */
6937         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
6938
6939         /* Disable target IRef on PLL */
6940         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
6941         reg_val &= 0x00ffffff;
6942         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
6943
6944         /* Disable fast lock */
6945         vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
6946
6947         /* Set idtafcrecal before PLL is enabled */
6948         mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
6949         mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
6950         mdiv |= ((bestn << DPIO_N_SHIFT));
6951         mdiv |= (1 << DPIO_K_SHIFT);
6952
6953         /*
6954          * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
6955          * but we don't support that).
6956          * Note: don't use the DAC post divider as it seems unstable.
6957          */
6958         mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
6959         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
6960
6961         mdiv |= DPIO_ENABLE_CALIBRATION;
6962         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
6963
6964         /* Set HBR and RBR LPF coefficients */
6965         if (pipe_config->port_clock == 162000 ||
6966             intel_crtc_has_type(crtc->config, INTEL_OUTPUT_ANALOG) ||
6967             intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI))
6968                 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
6969                                  0x009f0003);
6970         else
6971                 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
6972                                  0x00d0000f);
6973
6974         if (intel_crtc_has_dp_encoder(pipe_config)) {
6975                 /* Use SSC source */
6976                 if (pipe == PIPE_A)
6977                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
6978                                          0x0df40000);
6979                 else
6980                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
6981                                          0x0df70000);
6982         } else { /* HDMI or VGA */
6983                 /* Use bend source */
6984                 if (pipe == PIPE_A)
6985                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
6986                                          0x0df70000);
6987                 else
6988                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
6989                                          0x0df40000);
6990         }
6991
6992         coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
6993         coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
6994         if (intel_crtc_has_dp_encoder(crtc->config))
6995                 coreclk |= 0x01000000;
6996         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
6997
6998         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
6999         mutex_unlock(&dev_priv->sb_lock);
7000 }
7001
7002 static void chv_prepare_pll(struct intel_crtc *crtc,
7003                             const struct intel_crtc_state *pipe_config)
7004 {
7005         struct drm_device *dev = crtc->base.dev;
7006         struct drm_i915_private *dev_priv = to_i915(dev);
7007         enum pipe pipe = crtc->pipe;
7008         enum dpio_channel port = vlv_pipe_to_channel(pipe);
7009         u32 loopfilter, tribuf_calcntr;
7010         u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
7011         u32 dpio_val;
7012         int vco;
7013
7014         /* Enable Refclk and SSC */
7015         I915_WRITE(DPLL(pipe),
7016                    pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
7017
7018         /* No need to actually set up the DPLL with DSI */
7019         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7020                 return;
7021
7022         bestn = pipe_config->dpll.n;
7023         bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
7024         bestm1 = pipe_config->dpll.m1;
7025         bestm2 = pipe_config->dpll.m2 >> 22;
7026         bestp1 = pipe_config->dpll.p1;
7027         bestp2 = pipe_config->dpll.p2;
7028         vco = pipe_config->dpll.vco;
7029         dpio_val = 0;
7030         loopfilter = 0;
7031
7032         mutex_lock(&dev_priv->sb_lock);
7033
7034         /* p1 and p2 divider */
7035         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
7036                         5 << DPIO_CHV_S1_DIV_SHIFT |
7037                         bestp1 << DPIO_CHV_P1_DIV_SHIFT |
7038                         bestp2 << DPIO_CHV_P2_DIV_SHIFT |
7039                         1 << DPIO_CHV_K_DIV_SHIFT);
7040
7041         /* Feedback post-divider - m2 */
7042         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
7043
7044         /* Feedback refclk divider - n and m1 */
7045         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
7046                         DPIO_CHV_M1_DIV_BY_2 |
7047                         1 << DPIO_CHV_N_DIV_SHIFT);
7048
7049         /* M2 fraction division */
7050         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
7051
7052         /* M2 fraction division enable */
7053         dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
7054         dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
7055         dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
7056         if (bestm2_frac)
7057                 dpio_val |= DPIO_CHV_FRAC_DIV_EN;
7058         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
7059
7060         /* Program digital lock detect threshold */
7061         dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
7062         dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
7063                                         DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
7064         dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
7065         if (!bestm2_frac)
7066                 dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
7067         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
7068
7069         /* Loop filter */
7070         if (vco == 5400000) {
7071                 loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
7072                 loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
7073                 loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
7074                 tribuf_calcntr = 0x9;
7075         } else if (vco <= 6200000) {
7076                 loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
7077                 loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
7078                 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7079                 tribuf_calcntr = 0x9;
7080         } else if (vco <= 6480000) {
7081                 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7082                 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7083                 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7084                 tribuf_calcntr = 0x8;
7085         } else {
7086                 /* Not supported. Apply the same limits as in the max case */
7087                 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7088                 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7089                 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7090                 tribuf_calcntr = 0;
7091         }
7092         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
7093
7094         dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
7095         dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
7096         dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
7097         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
7098
7099         /* AFC Recal */
7100         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
7101                         vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
7102                         DPIO_AFC_RECAL);
7103
7104         mutex_unlock(&dev_priv->sb_lock);
7105 }
7106
7107 /**
7108  * vlv_force_pll_on - forcibly enable just the PLL
7109  * @dev_priv: i915 private structure
7110  * @pipe: pipe PLL to enable
7111  * @dpll: PLL configuration
7112  *
7113  * Enable the PLL for @pipe using the supplied @dpll config. To be used
7114  * in cases where we need the PLL enabled even when @pipe is not going to
7115  * be enabled.
7116  */
7117 int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
7118                      const struct dpll *dpll)
7119 {
7120         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
7121         struct intel_crtc_state *pipe_config;
7122
7123         pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
7124         if (!pipe_config)
7125                 return -ENOMEM;
7126
7127         pipe_config->base.crtc = &crtc->base;
7128         pipe_config->pixel_multiplier = 1;
7129         pipe_config->dpll = *dpll;
7130
7131         if (IS_CHERRYVIEW(dev_priv)) {
7132                 chv_compute_dpll(crtc, pipe_config);
7133                 chv_prepare_pll(crtc, pipe_config);
7134                 chv_enable_pll(crtc, pipe_config);
7135         } else {
7136                 vlv_compute_dpll(crtc, pipe_config);
7137                 vlv_prepare_pll(crtc, pipe_config);
7138                 vlv_enable_pll(crtc, pipe_config);
7139         }
7140
7141         kfree(pipe_config);
7142
7143         return 0;
7144 }
7145
7146 /**
7147  * vlv_force_pll_off - forcibly disable just the PLL
7148  * @dev_priv: i915 private structure
7149  * @pipe: pipe PLL to disable
7150  *
7151  * Disable the PLL for @pipe. To be used in cases where we need
7152  * the PLL enabled even when @pipe is not going to be enabled.
7153  */
7154 void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
7155 {
7156         if (IS_CHERRYVIEW(dev_priv))
7157                 chv_disable_pll(dev_priv, pipe);
7158         else
7159                 vlv_disable_pll(dev_priv, pipe);
7160 }
7161
7162 static void i9xx_compute_dpll(struct intel_crtc *crtc,
7163                               struct intel_crtc_state *crtc_state,
7164                               struct dpll *reduced_clock)
7165 {
7166         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7167         u32 dpll;
7168         struct dpll *clock = &crtc_state->dpll;
7169
7170         i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
7171
7172         dpll = DPLL_VGA_MODE_DIS;
7173
7174         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
7175                 dpll |= DPLLB_MODE_LVDS;
7176         else
7177                 dpll |= DPLLB_MODE_DAC_SERIAL;
7178
7179         if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
7180             IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
7181                 dpll |= (crtc_state->pixel_multiplier - 1)
7182                         << SDVO_MULTIPLIER_SHIFT_HIRES;
7183         }
7184
7185         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
7186             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
7187                 dpll |= DPLL_SDVO_HIGH_SPEED;
7188
7189         if (intel_crtc_has_dp_encoder(crtc_state))
7190                 dpll |= DPLL_SDVO_HIGH_SPEED;
7191
7192         /* compute bitmask from p1 value */
7193         if (IS_PINEVIEW(dev_priv))
7194                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
7195         else {
7196                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7197                 if (IS_G4X(dev_priv) && reduced_clock)
7198                         dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
7199         }
7200         switch (clock->p2) {
7201         case 5:
7202                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
7203                 break;
7204         case 7:
7205                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
7206                 break;
7207         case 10:
7208                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
7209                 break;
7210         case 14:
7211                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
7212                 break;
7213         }
7214         if (INTEL_GEN(dev_priv) >= 4)
7215                 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
7216
7217         if (crtc_state->sdvo_tv_clock)
7218                 dpll |= PLL_REF_INPUT_TVCLKINBC;
7219         else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7220                  intel_panel_use_ssc(dev_priv))
7221                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7222         else
7223                 dpll |= PLL_REF_INPUT_DREFCLK;
7224
7225         dpll |= DPLL_VCO_ENABLE;
7226         crtc_state->dpll_hw_state.dpll = dpll;
7227
7228         if (INTEL_GEN(dev_priv) >= 4) {
7229                 u32 dpll_md = (crtc_state->pixel_multiplier - 1)
7230                         << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7231                 crtc_state->dpll_hw_state.dpll_md = dpll_md;
7232         }
7233 }
7234
7235 static void i8xx_compute_dpll(struct intel_crtc *crtc,
7236                               struct intel_crtc_state *crtc_state,
7237                               struct dpll *reduced_clock)
7238 {
7239         struct drm_device *dev = crtc->base.dev;
7240         struct drm_i915_private *dev_priv = to_i915(dev);
7241         u32 dpll;
7242         struct dpll *clock = &crtc_state->dpll;
7243
7244         i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
7245
7246         dpll = DPLL_VGA_MODE_DIS;
7247
7248         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7249                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7250         } else {
7251                 if (clock->p1 == 2)
7252                         dpll |= PLL_P1_DIVIDE_BY_TWO;
7253                 else
7254                         dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7255                 if (clock->p2 == 4)
7256                         dpll |= PLL_P2_DIVIDE_BY_4;
7257         }
7258
7259         if (!IS_I830(dev_priv) &&
7260             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
7261                 dpll |= DPLL_DVO_2X_MODE;
7262
7263         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7264             intel_panel_use_ssc(dev_priv))
7265                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7266         else
7267                 dpll |= PLL_REF_INPUT_DREFCLK;
7268
7269         dpll |= DPLL_VCO_ENABLE;
7270         crtc_state->dpll_hw_state.dpll = dpll;
7271 }
7272
7273 static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
7274 {
7275         struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
7276         enum pipe pipe = intel_crtc->pipe;
7277         enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
7278         const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
7279         uint32_t crtc_vtotal, crtc_vblank_end;
7280         int vsyncshift = 0;
7281
7282         /* We need to be careful not to changed the adjusted mode, for otherwise
7283          * the hw state checker will get angry at the mismatch. */
7284         crtc_vtotal = adjusted_mode->crtc_vtotal;
7285         crtc_vblank_end = adjusted_mode->crtc_vblank_end;
7286
7287         if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
7288                 /* the chip adds 2 halflines automatically */
7289                 crtc_vtotal -= 1;
7290                 crtc_vblank_end -= 1;
7291
7292                 if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
7293                         vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
7294                 else
7295                         vsyncshift = adjusted_mode->crtc_hsync_start -
7296                                 adjusted_mode->crtc_htotal / 2;
7297                 if (vsyncshift < 0)
7298                         vsyncshift += adjusted_mode->crtc_htotal;
7299         }
7300
7301         if (INTEL_GEN(dev_priv) > 3)
7302                 I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
7303
7304         I915_WRITE(HTOTAL(cpu_transcoder),
7305                    (adjusted_mode->crtc_hdisplay - 1) |
7306                    ((adjusted_mode->crtc_htotal - 1) << 16));
7307         I915_WRITE(HBLANK(cpu_transcoder),
7308                    (adjusted_mode->crtc_hblank_start - 1) |
7309                    ((adjusted_mode->crtc_hblank_end - 1) << 16));
7310         I915_WRITE(HSYNC(cpu_transcoder),
7311                    (adjusted_mode->crtc_hsync_start - 1) |
7312                    ((adjusted_mode->crtc_hsync_end - 1) << 16));
7313
7314         I915_WRITE(VTOTAL(cpu_transcoder),
7315                    (adjusted_mode->crtc_vdisplay - 1) |
7316                    ((crtc_vtotal - 1) << 16));
7317         I915_WRITE(VBLANK(cpu_transcoder),
7318                    (adjusted_mode->crtc_vblank_start - 1) |
7319                    ((crtc_vblank_end - 1) << 16));
7320         I915_WRITE(VSYNC(cpu_transcoder),
7321                    (adjusted_mode->crtc_vsync_start - 1) |
7322                    ((adjusted_mode->crtc_vsync_end - 1) << 16));
7323
7324         /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
7325          * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
7326          * documented on the DDI_FUNC_CTL register description, EDP Input Select
7327          * bits. */
7328         if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
7329             (pipe == PIPE_B || pipe == PIPE_C))
7330                 I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
7331
7332 }
7333
7334 static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc)
7335 {
7336         struct drm_device *dev = intel_crtc->base.dev;
7337         struct drm_i915_private *dev_priv = to_i915(dev);
7338         enum pipe pipe = intel_crtc->pipe;
7339
7340         /* pipesrc controls the size that is scaled from, which should
7341          * always be the user's requested size.
7342          */
7343         I915_WRITE(PIPESRC(pipe),
7344                    ((intel_crtc->config->pipe_src_w - 1) << 16) |
7345                    (intel_crtc->config->pipe_src_h - 1));
7346 }
7347
7348 static void intel_get_pipe_timings(struct intel_crtc *crtc,
7349                                    struct intel_crtc_state *pipe_config)
7350 {
7351         struct drm_device *dev = crtc->base.dev;
7352         struct drm_i915_private *dev_priv = to_i915(dev);
7353         enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
7354         uint32_t tmp;
7355
7356         tmp = I915_READ(HTOTAL(cpu_transcoder));
7357         pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
7358         pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
7359         tmp = I915_READ(HBLANK(cpu_transcoder));
7360         pipe_config->base.adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1;
7361         pipe_config->base.adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1;
7362         tmp = I915_READ(HSYNC(cpu_transcoder));
7363         pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
7364         pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
7365
7366         tmp = I915_READ(VTOTAL(cpu_transcoder));
7367         pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
7368         pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
7369         tmp = I915_READ(VBLANK(cpu_transcoder));
7370         pipe_config->base.adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1;
7371         pipe_config->base.adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1;
7372         tmp = I915_READ(VSYNC(cpu_transcoder));
7373         pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
7374         pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
7375
7376         if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
7377                 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
7378                 pipe_config->base.adjusted_mode.crtc_vtotal += 1;
7379                 pipe_config->base.adjusted_mode.crtc_vblank_end += 1;
7380         }
7381 }
7382
7383 static void intel_get_pipe_src_size(struct intel_crtc *crtc,
7384                                     struct intel_crtc_state *pipe_config)
7385 {
7386         struct drm_device *dev = crtc->base.dev;
7387         struct drm_i915_private *dev_priv = to_i915(dev);
7388         u32 tmp;
7389
7390         tmp = I915_READ(PIPESRC(crtc->pipe));
7391         pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
7392         pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
7393
7394         pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h;
7395         pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w;
7396 }
7397
7398 void intel_mode_from_pipe_config(struct drm_display_mode *mode,
7399                                  struct intel_crtc_state *pipe_config)
7400 {
7401         mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay;
7402         mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal;
7403         mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start;
7404         mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end;
7405
7406         mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay;
7407         mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal;
7408         mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start;
7409         mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end;
7410
7411         mode->flags = pipe_config->base.adjusted_mode.flags;
7412         mode->type = DRM_MODE_TYPE_DRIVER;
7413
7414         mode->clock = pipe_config->base.adjusted_mode.crtc_clock;
7415
7416         mode->hsync = drm_mode_hsync(mode);
7417         mode->vrefresh = drm_mode_vrefresh(mode);
7418         drm_mode_set_name(mode);
7419 }
7420
7421 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
7422 {
7423         struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
7424         uint32_t pipeconf;
7425
7426         pipeconf = 0;
7427
7428         /* we keep both pipes enabled on 830 */
7429         if (IS_I830(dev_priv))
7430                 pipeconf |= I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE;
7431
7432         if (intel_crtc->config->double_wide)
7433                 pipeconf |= PIPECONF_DOUBLE_WIDE;
7434
7435         /* only g4x and later have fancy bpc/dither controls */
7436         if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
7437             IS_CHERRYVIEW(dev_priv)) {
7438                 /* Bspec claims that we can't use dithering for 30bpp pipes. */
7439                 if (intel_crtc->config->dither && intel_crtc->config->pipe_bpp != 30)
7440                         pipeconf |= PIPECONF_DITHER_EN |
7441                                     PIPECONF_DITHER_TYPE_SP;
7442
7443                 switch (intel_crtc->config->pipe_bpp) {
7444                 case 18:
7445                         pipeconf |= PIPECONF_6BPC;
7446                         break;
7447                 case 24:
7448                         pipeconf |= PIPECONF_8BPC;
7449                         break;
7450                 case 30:
7451                         pipeconf |= PIPECONF_10BPC;
7452                         break;
7453                 default:
7454                         /* Case prevented by intel_choose_pipe_bpp_dither. */
7455                         BUG();
7456                 }
7457         }
7458
7459         if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
7460                 if (INTEL_GEN(dev_priv) < 4 ||
7461                     intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
7462                         pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
7463                 else
7464                         pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
7465         } else
7466                 pipeconf |= PIPECONF_PROGRESSIVE;
7467
7468         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
7469              intel_crtc->config->limited_color_range)
7470                 pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
7471
7472         I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf);
7473         POSTING_READ(PIPECONF(intel_crtc->pipe));
7474 }
7475
7476 static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
7477                                    struct intel_crtc_state *crtc_state)
7478 {
7479         struct drm_device *dev = crtc->base.dev;
7480         struct drm_i915_private *dev_priv = to_i915(dev);
7481         const struct intel_limit *limit;
7482         int refclk = 48000;
7483
7484         memset(&crtc_state->dpll_hw_state, 0,
7485                sizeof(crtc_state->dpll_hw_state));
7486
7487         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7488                 if (intel_panel_use_ssc(dev_priv)) {
7489                         refclk = dev_priv->vbt.lvds_ssc_freq;
7490                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7491                 }
7492
7493                 limit = &intel_limits_i8xx_lvds;
7494         } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) {
7495                 limit = &intel_limits_i8xx_dvo;
7496         } else {
7497                 limit = &intel_limits_i8xx_dac;
7498         }
7499
7500         if (!crtc_state->clock_set &&
7501             !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7502                                  refclk, NULL, &crtc_state->dpll)) {
7503                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7504                 return -EINVAL;
7505         }
7506
7507         i8xx_compute_dpll(crtc, crtc_state, NULL);
7508
7509         return 0;
7510 }
7511
7512 static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
7513                                   struct intel_crtc_state *crtc_state)
7514 {
7515         struct drm_device *dev = crtc->base.dev;
7516         struct drm_i915_private *dev_priv = to_i915(dev);
7517         const struct intel_limit *limit;
7518         int refclk = 96000;
7519
7520         memset(&crtc_state->dpll_hw_state, 0,
7521                sizeof(crtc_state->dpll_hw_state));
7522
7523         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7524                 if (intel_panel_use_ssc(dev_priv)) {
7525                         refclk = dev_priv->vbt.lvds_ssc_freq;
7526                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7527                 }
7528
7529                 if (intel_is_dual_link_lvds(dev))
7530                         limit = &intel_limits_g4x_dual_channel_lvds;
7531                 else
7532                         limit = &intel_limits_g4x_single_channel_lvds;
7533         } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
7534                    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
7535                 limit = &intel_limits_g4x_hdmi;
7536         } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) {
7537                 limit = &intel_limits_g4x_sdvo;
7538         } else {
7539                 /* The option is for other outputs */
7540                 limit = &intel_limits_i9xx_sdvo;
7541         }
7542
7543         if (!crtc_state->clock_set &&
7544             !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7545                                 refclk, NULL, &crtc_state->dpll)) {
7546                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7547                 return -EINVAL;
7548         }
7549
7550         i9xx_compute_dpll(crtc, crtc_state, NULL);
7551
7552         return 0;
7553 }
7554
7555 static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
7556                                   struct intel_crtc_state *crtc_state)
7557 {
7558         struct drm_device *dev = crtc->base.dev;
7559         struct drm_i915_private *dev_priv = to_i915(dev);
7560         const struct intel_limit *limit;
7561         int refclk = 96000;
7562
7563         memset(&crtc_state->dpll_hw_state, 0,
7564                sizeof(crtc_state->dpll_hw_state));
7565
7566         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7567                 if (intel_panel_use_ssc(dev_priv)) {
7568                         refclk = dev_priv->vbt.lvds_ssc_freq;
7569                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7570                 }
7571
7572                 limit = &intel_limits_pineview_lvds;
7573         } else {
7574                 limit = &intel_limits_pineview_sdvo;
7575         }
7576
7577         if (!crtc_state->clock_set &&
7578             !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7579                                 refclk, NULL, &crtc_state->dpll)) {
7580                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7581                 return -EINVAL;
7582         }
7583
7584         i9xx_compute_dpll(crtc, crtc_state, NULL);
7585
7586         return 0;
7587 }
7588
7589 static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
7590                                    struct intel_crtc_state *crtc_state)
7591 {
7592         struct drm_device *dev = crtc->base.dev;
7593         struct drm_i915_private *dev_priv = to_i915(dev);
7594         const struct intel_limit *limit;
7595         int refclk = 96000;
7596
7597         memset(&crtc_state->dpll_hw_state, 0,
7598                sizeof(crtc_state->dpll_hw_state));
7599
7600         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7601                 if (intel_panel_use_ssc(dev_priv)) {
7602                         refclk = dev_priv->vbt.lvds_ssc_freq;
7603                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7604                 }
7605
7606                 limit = &intel_limits_i9xx_lvds;
7607         } else {
7608                 limit = &intel_limits_i9xx_sdvo;
7609         }
7610
7611         if (!crtc_state->clock_set &&
7612             !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7613                                  refclk, NULL, &crtc_state->dpll)) {
7614                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7615                 return -EINVAL;
7616         }
7617
7618         i9xx_compute_dpll(crtc, crtc_state, NULL);
7619
7620         return 0;
7621 }
7622
7623 static int chv_crtc_compute_clock(struct intel_crtc *crtc,
7624                                   struct intel_crtc_state *crtc_state)
7625 {
7626         int refclk = 100000;
7627         const struct intel_limit *limit = &intel_limits_chv;
7628
7629         memset(&crtc_state->dpll_hw_state, 0,
7630                sizeof(crtc_state->dpll_hw_state));
7631
7632         if (!crtc_state->clock_set &&
7633             !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7634                                 refclk, NULL, &crtc_state->dpll)) {
7635                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7636                 return -EINVAL;
7637         }
7638
7639         chv_compute_dpll(crtc, crtc_state);
7640
7641         return 0;
7642 }
7643
7644 static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
7645                                   struct intel_crtc_state *crtc_state)
7646 {
7647         int refclk = 100000;
7648         const struct intel_limit *limit = &intel_limits_vlv;
7649
7650         memset(&crtc_state->dpll_hw_state, 0,
7651                sizeof(crtc_state->dpll_hw_state));
7652
7653         if (!crtc_state->clock_set &&
7654             !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7655                                 refclk, NULL, &crtc_state->dpll)) {
7656                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7657                 return -EINVAL;
7658         }
7659
7660         vlv_compute_dpll(crtc, crtc_state);
7661
7662         return 0;
7663 }
7664
7665 static void i9xx_get_pfit_config(struct intel_crtc *crtc,
7666                                  struct intel_crtc_state *pipe_config)
7667 {
7668         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7669         uint32_t tmp;
7670
7671         if (INTEL_GEN(dev_priv) <= 3 &&
7672             (IS_I830(dev_priv) || !IS_MOBILE(dev_priv)))
7673                 return;
7674
7675         tmp = I915_READ(PFIT_CONTROL);
7676         if (!(tmp & PFIT_ENABLE))
7677                 return;
7678
7679         /* Check whether the pfit is attached to our pipe. */
7680         if (INTEL_GEN(dev_priv) < 4) {
7681                 if (crtc->pipe != PIPE_B)
7682                         return;
7683         } else {
7684                 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
7685                         return;
7686         }
7687
7688         pipe_config->gmch_pfit.control = tmp;
7689         pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
7690 }
7691
7692 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
7693                                struct intel_crtc_state *pipe_config)
7694 {
7695         struct drm_device *dev = crtc->base.dev;
7696         struct drm_i915_private *dev_priv = to_i915(dev);
7697         int pipe = pipe_config->cpu_transcoder;
7698         struct dpll clock;
7699         u32 mdiv;
7700         int refclk = 100000;
7701
7702         /* In case of DSI, DPLL will not be used */
7703         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7704                 return;
7705
7706         mutex_lock(&dev_priv->sb_lock);
7707         mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
7708         mutex_unlock(&dev_priv->sb_lock);
7709
7710         clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
7711         clock.m2 = mdiv & DPIO_M2DIV_MASK;
7712         clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
7713         clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
7714         clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
7715
7716         pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
7717 }
7718
7719 static void
7720 i9xx_get_initial_plane_config(struct intel_crtc *crtc,
7721                               struct intel_initial_plane_config *plane_config)
7722 {
7723         struct drm_device *dev = crtc->base.dev;
7724         struct drm_i915_private *dev_priv = to_i915(dev);
7725         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
7726         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
7727         enum pipe pipe;
7728         u32 val, base, offset;
7729         int fourcc, pixel_format;
7730         unsigned int aligned_height;
7731         struct drm_framebuffer *fb;
7732         struct intel_framebuffer *intel_fb;
7733
7734         if (!plane->get_hw_state(plane, &pipe))
7735                 return;
7736
7737         WARN_ON(pipe != crtc->pipe);
7738
7739         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
7740         if (!intel_fb) {
7741                 DRM_DEBUG_KMS("failed to alloc fb\n");
7742                 return;
7743         }
7744
7745         fb = &intel_fb->base;
7746
7747         fb->dev = dev;
7748
7749         val = I915_READ(DSPCNTR(i9xx_plane));
7750
7751         if (INTEL_GEN(dev_priv) >= 4) {
7752                 if (val & DISPPLANE_TILED) {
7753                         plane_config->tiling = I915_TILING_X;
7754                         fb->modifier = I915_FORMAT_MOD_X_TILED;
7755                 }
7756         }
7757
7758         pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
7759         fourcc = i9xx_format_to_fourcc(pixel_format);
7760         fb->format = drm_format_info(fourcc);
7761
7762         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
7763                 offset = I915_READ(DSPOFFSET(i9xx_plane));
7764                 base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
7765         } else if (INTEL_GEN(dev_priv) >= 4) {
7766                 if (plane_config->tiling)
7767                         offset = I915_READ(DSPTILEOFF(i9xx_plane));
7768                 else
7769                         offset = I915_READ(DSPLINOFF(i9xx_plane));
7770                 base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
7771         } else {
7772                 base = I915_READ(DSPADDR(i9xx_plane));
7773         }
7774         plane_config->base = base;
7775
7776         val = I915_READ(PIPESRC(pipe));
7777         fb->width = ((val >> 16) & 0xfff) + 1;
7778         fb->height = ((val >> 0) & 0xfff) + 1;
7779
7780         val = I915_READ(DSPSTRIDE(i9xx_plane));
7781         fb->pitches[0] = val & 0xffffffc0;
7782
7783         aligned_height = intel_fb_align_height(fb, 0, fb->height);
7784
7785         plane_config->size = fb->pitches[0] * aligned_height;
7786
7787         DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
7788                       crtc->base.name, plane->base.name, fb->width, fb->height,
7789                       fb->format->cpp[0] * 8, base, fb->pitches[0],
7790                       plane_config->size);
7791
7792         plane_config->fb = intel_fb;
7793 }
7794
7795 static void chv_crtc_clock_get(struct intel_crtc *crtc,
7796                                struct intel_crtc_state *pipe_config)
7797 {
7798         struct drm_device *dev = crtc->base.dev;
7799         struct drm_i915_private *dev_priv = to_i915(dev);
7800         int pipe = pipe_config->cpu_transcoder;
7801         enum dpio_channel port = vlv_pipe_to_channel(pipe);
7802         struct dpll clock;
7803         u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
7804         int refclk = 100000;
7805
7806         /* In case of DSI, DPLL will not be used */
7807         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7808                 return;
7809
7810         mutex_lock(&dev_priv->sb_lock);
7811         cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
7812         pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
7813         pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
7814         pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
7815         pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
7816         mutex_unlock(&dev_priv->sb_lock);
7817
7818         clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
7819         clock.m2 = (pll_dw0 & 0xff) << 22;
7820         if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
7821                 clock.m2 |= pll_dw2 & 0x3fffff;
7822         clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
7823         clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
7824         clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
7825
7826         pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
7827 }
7828
7829 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
7830                                  struct intel_crtc_state *pipe_config)
7831 {
7832         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7833         enum intel_display_power_domain power_domain;
7834         uint32_t tmp;
7835         bool ret;
7836
7837         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
7838         if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
7839                 return false;
7840
7841         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
7842         pipe_config->shared_dpll = NULL;
7843
7844         ret = false;
7845
7846         tmp = I915_READ(PIPECONF(crtc->pipe));
7847         if (!(tmp & PIPECONF_ENABLE))
7848                 goto out;
7849
7850         if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
7851             IS_CHERRYVIEW(dev_priv)) {
7852                 switch (tmp & PIPECONF_BPC_MASK) {
7853                 case PIPECONF_6BPC:
7854                         pipe_config->pipe_bpp = 18;
7855                         break;
7856                 case PIPECONF_8BPC:
7857                         pipe_config->pipe_bpp = 24;
7858                         break;
7859                 case PIPECONF_10BPC:
7860                         pipe_config->pipe_bpp = 30;
7861                         break;
7862                 default:
7863                         break;
7864                 }
7865         }
7866
7867         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
7868             (tmp & PIPECONF_COLOR_RANGE_SELECT))
7869                 pipe_config->limited_color_range = true;
7870
7871         if (INTEL_GEN(dev_priv) < 4)
7872                 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
7873
7874         intel_get_pipe_timings(crtc, pipe_config);
7875         intel_get_pipe_src_size(crtc, pipe_config);
7876
7877         i9xx_get_pfit_config(crtc, pipe_config);
7878
7879         if (INTEL_GEN(dev_priv) >= 4) {
7880                 /* No way to read it out on pipes B and C */
7881                 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
7882                         tmp = dev_priv->chv_dpll_md[crtc->pipe];
7883                 else
7884                         tmp = I915_READ(DPLL_MD(crtc->pipe));
7885                 pipe_config->pixel_multiplier =
7886                         ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
7887                          >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
7888                 pipe_config->dpll_hw_state.dpll_md = tmp;
7889         } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
7890                    IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
7891                 tmp = I915_READ(DPLL(crtc->pipe));
7892                 pipe_config->pixel_multiplier =
7893                         ((tmp & SDVO_MULTIPLIER_MASK)
7894                          >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
7895         } else {
7896                 /* Note that on i915G/GM the pixel multiplier is in the sdvo
7897                  * port and will be fixed up in the encoder->get_config
7898                  * function. */
7899                 pipe_config->pixel_multiplier = 1;
7900         }
7901         pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
7902         if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
7903                 /*
7904                  * DPLL_DVO_2X_MODE must be enabled for both DPLLs
7905                  * on 830. Filter it out here so that we don't
7906                  * report errors due to that.
7907                  */
7908                 if (IS_I830(dev_priv))
7909                         pipe_config->dpll_hw_state.dpll &= ~DPLL_DVO_2X_MODE;
7910
7911                 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
7912                 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
7913         } else {
7914                 /* Mask out read-only status bits. */
7915                 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
7916                                                      DPLL_PORTC_READY_MASK |
7917                                                      DPLL_PORTB_READY_MASK);
7918         }
7919
7920         if (IS_CHERRYVIEW(dev_priv))
7921                 chv_crtc_clock_get(crtc, pipe_config);
7922         else if (IS_VALLEYVIEW(dev_priv))
7923                 vlv_crtc_clock_get(crtc, pipe_config);
7924         else
7925                 i9xx_crtc_clock_get(crtc, pipe_config);
7926
7927         /*
7928          * Normally the dotclock is filled in by the encoder .get_config()
7929          * but in case the pipe is enabled w/o any ports we need a sane
7930          * default.
7931          */
7932         pipe_config->base.adjusted_mode.crtc_clock =
7933                 pipe_config->port_clock / pipe_config->pixel_multiplier;
7934
7935         ret = true;
7936
7937 out:
7938         intel_display_power_put(dev_priv, power_domain);
7939
7940         return ret;
7941 }
7942
7943 static void ironlake_init_pch_refclk(struct drm_i915_private *dev_priv)
7944 {
7945         struct intel_encoder *encoder;
7946         int i;
7947         u32 val, final;
7948         bool has_lvds = false;
7949         bool has_cpu_edp = false;
7950         bool has_panel = false;
7951         bool has_ck505 = false;
7952         bool can_ssc = false;
7953         bool using_ssc_source = false;
7954
7955         /* We need to take the global config into account */
7956         for_each_intel_encoder(&dev_priv->drm, encoder) {
7957                 switch (encoder->type) {
7958                 case INTEL_OUTPUT_LVDS:
7959                         has_panel = true;
7960                         has_lvds = true;
7961                         break;
7962                 case INTEL_OUTPUT_EDP:
7963                         has_panel = true;
7964                         if (encoder->port == PORT_A)
7965                                 has_cpu_edp = true;
7966                         break;
7967                 default:
7968                         break;
7969                 }
7970         }
7971
7972         if (HAS_PCH_IBX(dev_priv)) {
7973                 has_ck505 = dev_priv->vbt.display_clock_mode;
7974                 can_ssc = has_ck505;
7975         } else {
7976                 has_ck505 = false;
7977                 can_ssc = true;
7978         }
7979
7980         /* Check if any DPLLs are using the SSC source */
7981         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
7982                 u32 temp = I915_READ(PCH_DPLL(i));
7983
7984                 if (!(temp & DPLL_VCO_ENABLE))
7985                         continue;
7986
7987                 if ((temp & PLL_REF_INPUT_MASK) ==
7988                     PLLB_REF_INPUT_SPREADSPECTRUMIN) {
7989                         using_ssc_source = true;
7990                         break;
7991                 }
7992         }
7993
7994         DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
7995                       has_panel, has_lvds, has_ck505, using_ssc_source);
7996
7997         /* Ironlake: try to setup display ref clock before DPLL
7998          * enabling. This is only under driver's control after
7999          * PCH B stepping, previous chipset stepping should be
8000          * ignoring this setting.
8001          */
8002         val = I915_READ(PCH_DREF_CONTROL);
8003
8004         /* As we must carefully and slowly disable/enable each source in turn,
8005          * compute the final state we want first and check if we need to
8006          * make any changes at all.
8007          */
8008         final = val;
8009         final &= ~DREF_NONSPREAD_SOURCE_MASK;
8010         if (has_ck505)
8011                 final |= DREF_NONSPREAD_CK505_ENABLE;
8012         else
8013                 final |= DREF_NONSPREAD_SOURCE_ENABLE;
8014
8015         final &= ~DREF_SSC_SOURCE_MASK;
8016         final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8017         final &= ~DREF_SSC1_ENABLE;
8018
8019         if (has_panel) {
8020                 final |= DREF_SSC_SOURCE_ENABLE;
8021
8022                 if (intel_panel_use_ssc(dev_priv) && can_ssc)
8023                         final |= DREF_SSC1_ENABLE;
8024
8025                 if (has_cpu_edp) {
8026                         if (intel_panel_use_ssc(dev_priv) && can_ssc)
8027                                 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
8028                         else
8029                                 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
8030                 } else
8031                         final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8032         } else if (using_ssc_source) {
8033                 final |= DREF_SSC_SOURCE_ENABLE;
8034                 final |= DREF_SSC1_ENABLE;
8035         }
8036
8037         if (final == val)
8038                 return;
8039
8040         /* Always enable nonspread source */
8041         val &= ~DREF_NONSPREAD_SOURCE_MASK;
8042
8043         if (has_ck505)
8044                 val |= DREF_NONSPREAD_CK505_ENABLE;
8045         else
8046                 val |= DREF_NONSPREAD_SOURCE_ENABLE;
8047
8048         if (has_panel) {
8049                 val &= ~DREF_SSC_SOURCE_MASK;
8050                 val |= DREF_SSC_SOURCE_ENABLE;
8051
8052                 /* SSC must be turned on before enabling the CPU output  */
8053                 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
8054                         DRM_DEBUG_KMS("Using SSC on panel\n");
8055                         val |= DREF_SSC1_ENABLE;
8056                 } else
8057                         val &= ~DREF_SSC1_ENABLE;
8058
8059                 /* Get SSC going before enabling the outputs */
8060                 I915_WRITE(PCH_DREF_CONTROL, val);
8061                 POSTING_READ(PCH_DREF_CONTROL);
8062                 udelay(200);
8063
8064                 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8065
8066                 /* Enable CPU source on CPU attached eDP */
8067                 if (has_cpu_edp) {
8068                         if (intel_panel_use_ssc(dev_priv) && can_ssc) {
8069                                 DRM_DEBUG_KMS("Using SSC on eDP\n");
8070                                 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
8071                         } else
8072                                 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
8073                 } else
8074                         val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8075
8076                 I915_WRITE(PCH_DREF_CONTROL, val);
8077                 POSTING_READ(PCH_DREF_CONTROL);
8078                 udelay(200);
8079         } else {
8080                 DRM_DEBUG_KMS("Disabling CPU source output\n");
8081
8082                 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8083
8084                 /* Turn off CPU output */
8085                 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8086
8087                 I915_WRITE(PCH_DREF_CONTROL, val);
8088                 POSTING_READ(PCH_DREF_CONTROL);
8089                 udelay(200);
8090
8091                 if (!using_ssc_source) {
8092                         DRM_DEBUG_KMS("Disabling SSC source\n");
8093
8094                         /* Turn off the SSC source */
8095                         val &= ~DREF_SSC_SOURCE_MASK;
8096                         val |= DREF_SSC_SOURCE_DISABLE;
8097
8098                         /* Turn off SSC1 */
8099                         val &= ~DREF_SSC1_ENABLE;
8100
8101                         I915_WRITE(PCH_DREF_CONTROL, val);
8102                         POSTING_READ(PCH_DREF_CONTROL);
8103                         udelay(200);
8104                 }
8105         }
8106
8107         BUG_ON(val != final);
8108 }
8109
8110 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
8111 {
8112         uint32_t tmp;
8113
8114         tmp = I915_READ(SOUTH_CHICKEN2);
8115         tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
8116         I915_WRITE(SOUTH_CHICKEN2, tmp);
8117
8118         if (wait_for_us(I915_READ(SOUTH_CHICKEN2) &
8119                         FDI_MPHY_IOSFSB_RESET_STATUS, 100))
8120                 DRM_ERROR("FDI mPHY reset assert timeout\n");
8121
8122         tmp = I915_READ(SOUTH_CHICKEN2);
8123         tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
8124         I915_WRITE(SOUTH_CHICKEN2, tmp);
8125
8126         if (wait_for_us((I915_READ(SOUTH_CHICKEN2) &
8127                          FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
8128                 DRM_ERROR("FDI mPHY reset de-assert timeout\n");
8129 }
8130
8131 /* WaMPhyProgramming:hsw */
8132 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
8133 {
8134         uint32_t tmp;
8135
8136         tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
8137         tmp &= ~(0xFF << 24);
8138         tmp |= (0x12 << 24);
8139         intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
8140
8141         tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
8142         tmp |= (1 << 11);
8143         intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
8144
8145         tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
8146         tmp |= (1 << 11);
8147         intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
8148
8149         tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
8150         tmp |= (1 << 24) | (1 << 21) | (1 << 18);
8151         intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
8152
8153         tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
8154         tmp |= (1 << 24) | (1 << 21) | (1 << 18);
8155         intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
8156
8157         tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
8158         tmp &= ~(7 << 13);
8159         tmp |= (5 << 13);
8160         intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
8161
8162         tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
8163         tmp &= ~(7 << 13);
8164         tmp |= (5 << 13);
8165         intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
8166
8167         tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
8168         tmp &= ~0xFF;
8169         tmp |= 0x1C;
8170         intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
8171
8172         tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
8173         tmp &= ~0xFF;
8174         tmp |= 0x1C;
8175         intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
8176
8177         tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
8178         tmp &= ~(0xFF << 16);
8179         tmp |= (0x1C << 16);
8180         intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
8181
8182         tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
8183         tmp &= ~(0xFF << 16);
8184         tmp |= (0x1C << 16);
8185         intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
8186
8187         tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
8188         tmp |= (1 << 27);
8189         intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
8190
8191         tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
8192         tmp |= (1 << 27);
8193         intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
8194
8195         tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
8196         tmp &= ~(0xF << 28);
8197         tmp |= (4 << 28);
8198         intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
8199
8200         tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
8201         tmp &= ~(0xF << 28);
8202         tmp |= (4 << 28);
8203         intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
8204 }
8205
8206 /* Implements 3 different sequences from BSpec chapter "Display iCLK
8207  * Programming" based on the parameters passed:
8208  * - Sequence to enable CLKOUT_DP
8209  * - Sequence to enable CLKOUT_DP without spread
8210  * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
8211  */
8212 static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
8213                                  bool with_spread, bool with_fdi)
8214 {
8215         uint32_t reg, tmp;
8216
8217         if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
8218                 with_spread = true;
8219         if (WARN(HAS_PCH_LPT_LP(dev_priv) &&
8220             with_fdi, "LP PCH doesn't have FDI\n"))
8221                 with_fdi = false;
8222
8223         mutex_lock(&dev_priv->sb_lock);
8224
8225         tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8226         tmp &= ~SBI_SSCCTL_DISABLE;
8227         tmp |= SBI_SSCCTL_PATHALT;
8228         intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8229
8230         udelay(24);
8231
8232         if (with_spread) {
8233                 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8234                 tmp &= ~SBI_SSCCTL_PATHALT;
8235                 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8236
8237                 if (with_fdi) {
8238                         lpt_reset_fdi_mphy(dev_priv);
8239                         lpt_program_fdi_mphy(dev_priv);
8240                 }
8241         }
8242
8243         reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
8244         tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
8245         tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
8246         intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
8247
8248         mutex_unlock(&dev_priv->sb_lock);
8249 }
8250
8251 /* Sequence to disable CLKOUT_DP */
8252 static void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
8253 {
8254         uint32_t reg, tmp;
8255
8256         mutex_lock(&dev_priv->sb_lock);
8257
8258         reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
8259         tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
8260         tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
8261         intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
8262
8263         tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8264         if (!(tmp & SBI_SSCCTL_DISABLE)) {
8265                 if (!(tmp & SBI_SSCCTL_PATHALT)) {
8266                         tmp |= SBI_SSCCTL_PATHALT;
8267                         intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8268                         udelay(32);
8269                 }
8270                 tmp |= SBI_SSCCTL_DISABLE;
8271                 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8272         }
8273
8274         mutex_unlock(&dev_priv->sb_lock);
8275 }
8276
8277 #define BEND_IDX(steps) ((50 + (steps)) / 5)
8278
8279 static const uint16_t sscdivintphase[] = {
8280         [BEND_IDX( 50)] = 0x3B23,
8281         [BEND_IDX( 45)] = 0x3B23,
8282         [BEND_IDX( 40)] = 0x3C23,
8283         [BEND_IDX( 35)] = 0x3C23,
8284         [BEND_IDX( 30)] = 0x3D23,
8285         [BEND_IDX( 25)] = 0x3D23,
8286         [BEND_IDX( 20)] = 0x3E23,
8287         [BEND_IDX( 15)] = 0x3E23,
8288         [BEND_IDX( 10)] = 0x3F23,
8289         [BEND_IDX(  5)] = 0x3F23,
8290         [BEND_IDX(  0)] = 0x0025,
8291         [BEND_IDX( -5)] = 0x0025,
8292         [BEND_IDX(-10)] = 0x0125,
8293         [BEND_IDX(-15)] = 0x0125,
8294         [BEND_IDX(-20)] = 0x0225,
8295         [BEND_IDX(-25)] = 0x0225,
8296         [BEND_IDX(-30)] = 0x0325,
8297         [BEND_IDX(-35)] = 0x0325,
8298         [BEND_IDX(-40)] = 0x0425,
8299         [BEND_IDX(-45)] = 0x0425,
8300         [BEND_IDX(-50)] = 0x0525,
8301 };
8302
8303 /*
8304  * Bend CLKOUT_DP
8305  * steps -50 to 50 inclusive, in steps of 5
8306  * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
8307  * change in clock period = -(steps / 10) * 5.787 ps
8308  */
8309 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
8310 {
8311         uint32_t tmp;
8312         int idx = BEND_IDX(steps);
8313
8314         if (WARN_ON(steps % 5 != 0))
8315                 return;
8316
8317         if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase)))
8318                 return;
8319
8320         mutex_lock(&dev_priv->sb_lock);
8321
8322         if (steps % 10 != 0)
8323                 tmp = 0xAAAAAAAB;
8324         else
8325                 tmp = 0x00000000;
8326         intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
8327
8328         tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
8329         tmp &= 0xffff0000;
8330         tmp |= sscdivintphase[idx];
8331         intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
8332
8333         mutex_unlock(&dev_priv->sb_lock);
8334 }
8335
8336 #undef BEND_IDX
8337
8338 static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
8339 {
8340         struct intel_encoder *encoder;
8341         bool has_vga = false;
8342
8343         for_each_intel_encoder(&dev_priv->drm, encoder) {
8344                 switch (encoder->type) {
8345                 case INTEL_OUTPUT_ANALOG:
8346                         has_vga = true;
8347                         break;
8348                 default:
8349                         break;
8350                 }
8351         }
8352
8353         if (has_vga) {
8354                 lpt_bend_clkout_dp(dev_priv, 0);
8355                 lpt_enable_clkout_dp(dev_priv, true, true);
8356         } else {
8357                 lpt_disable_clkout_dp(dev_priv);
8358         }
8359 }
8360
8361 /*
8362  * Initialize reference clocks when the driver loads
8363  */
8364 void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
8365 {
8366         if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
8367                 ironlake_init_pch_refclk(dev_priv);
8368         else if (HAS_PCH_LPT(dev_priv))
8369                 lpt_init_pch_refclk(dev_priv);
8370 }
8371
8372 static void ironlake_set_pipeconf(struct drm_crtc *crtc)
8373 {
8374         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
8375         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8376         int pipe = intel_crtc->pipe;
8377         uint32_t val;
8378
8379         val = 0;
8380
8381         switch (intel_crtc->config->pipe_bpp) {
8382         case 18:
8383                 val |= PIPECONF_6BPC;
8384                 break;
8385         case 24:
8386                 val |= PIPECONF_8BPC;
8387                 break;
8388         case 30:
8389                 val |= PIPECONF_10BPC;
8390                 break;
8391         case 36:
8392                 val |= PIPECONF_12BPC;
8393                 break;
8394         default:
8395                 /* Case prevented by intel_choose_pipe_bpp_dither. */
8396                 BUG();
8397         }
8398
8399         if (intel_crtc->config->dither)
8400                 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
8401
8402         if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
8403                 val |= PIPECONF_INTERLACED_ILK;
8404         else
8405                 val |= PIPECONF_PROGRESSIVE;
8406
8407         if (intel_crtc->config->limited_color_range)
8408                 val |= PIPECONF_COLOR_RANGE_SELECT;
8409
8410         I915_WRITE(PIPECONF(pipe), val);
8411         POSTING_READ(PIPECONF(pipe));
8412 }
8413
8414 static void haswell_set_pipeconf(struct drm_crtc *crtc)
8415 {
8416         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
8417         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8418         enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
8419         u32 val = 0;
8420
8421         if (IS_HASWELL(dev_priv) && intel_crtc->config->dither)
8422                 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
8423
8424         if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
8425                 val |= PIPECONF_INTERLACED_ILK;
8426         else
8427                 val |= PIPECONF_PROGRESSIVE;
8428
8429         I915_WRITE(PIPECONF(cpu_transcoder), val);
8430         POSTING_READ(PIPECONF(cpu_transcoder));
8431 }
8432
8433 static void haswell_set_pipemisc(struct drm_crtc *crtc)
8434 {
8435         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
8436         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8437         struct intel_crtc_state *config = intel_crtc->config;
8438
8439         if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) {
8440                 u32 val = 0;
8441
8442                 switch (intel_crtc->config->pipe_bpp) {
8443                 case 18:
8444                         val |= PIPEMISC_DITHER_6_BPC;
8445                         break;
8446                 case 24:
8447                         val |= PIPEMISC_DITHER_8_BPC;
8448                         break;
8449                 case 30:
8450                         val |= PIPEMISC_DITHER_10_BPC;
8451                         break;
8452                 case 36:
8453                         val |= PIPEMISC_DITHER_12_BPC;
8454                         break;
8455                 default:
8456                         /* Case prevented by pipe_config_set_bpp. */
8457                         BUG();
8458                 }
8459
8460                 if (intel_crtc->config->dither)
8461                         val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
8462
8463                 if (config->ycbcr420) {
8464                         val |= PIPEMISC_OUTPUT_COLORSPACE_YUV |
8465                                 PIPEMISC_YUV420_ENABLE |
8466                                 PIPEMISC_YUV420_MODE_FULL_BLEND;
8467                 }
8468
8469                 I915_WRITE(PIPEMISC(intel_crtc->pipe), val);
8470         }
8471 }
8472
8473 int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
8474 {
8475         /*
8476          * Account for spread spectrum to avoid
8477          * oversubscribing the link. Max center spread
8478          * is 2.5%; use 5% for safety's sake.
8479          */
8480         u32 bps = target_clock * bpp * 21 / 20;
8481         return DIV_ROUND_UP(bps, link_bw * 8);
8482 }
8483
8484 static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
8485 {
8486         return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
8487 }
8488
8489 static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
8490                                   struct intel_crtc_state *crtc_state,
8491                                   struct dpll *reduced_clock)
8492 {
8493         struct drm_crtc *crtc = &intel_crtc->base;
8494         struct drm_device *dev = crtc->dev;
8495         struct drm_i915_private *dev_priv = to_i915(dev);
8496         u32 dpll, fp, fp2;
8497         int factor;
8498
8499         /* Enable autotuning of the PLL clock (if permissible) */
8500         factor = 21;
8501         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8502                 if ((intel_panel_use_ssc(dev_priv) &&
8503                      dev_priv->vbt.lvds_ssc_freq == 100000) ||
8504                     (HAS_PCH_IBX(dev_priv) && intel_is_dual_link_lvds(dev)))
8505                         factor = 25;
8506         } else if (crtc_state->sdvo_tv_clock)
8507                 factor = 20;
8508
8509         fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
8510
8511         if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor))
8512                 fp |= FP_CB_TUNE;
8513
8514         if (reduced_clock) {
8515                 fp2 = i9xx_dpll_compute_fp(reduced_clock);
8516
8517                 if (reduced_clock->m < factor * reduced_clock->n)
8518                         fp2 |= FP_CB_TUNE;
8519         } else {
8520                 fp2 = fp;
8521         }
8522
8523         dpll = 0;
8524
8525         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
8526                 dpll |= DPLLB_MODE_LVDS;
8527         else
8528                 dpll |= DPLLB_MODE_DAC_SERIAL;
8529
8530         dpll |= (crtc_state->pixel_multiplier - 1)
8531                 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
8532
8533         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
8534             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
8535                 dpll |= DPLL_SDVO_HIGH_SPEED;
8536
8537         if (intel_crtc_has_dp_encoder(crtc_state))
8538                 dpll |= DPLL_SDVO_HIGH_SPEED;
8539
8540         /*
8541          * The high speed IO clock is only really required for
8542          * SDVO/HDMI/DP, but we also enable it for CRT to make it
8543          * possible to share the DPLL between CRT and HDMI. Enabling
8544          * the clock needlessly does no real harm, except use up a
8545          * bit of power potentially.
8546          *
8547          * We'll limit this to IVB with 3 pipes, since it has only two
8548          * DPLLs and so DPLL sharing is the only way to get three pipes
8549          * driving PCH ports at the same time. On SNB we could do this,
8550          * and potentially avoid enabling the second DPLL, but it's not
8551          * clear if it''s a win or loss power wise. No point in doing
8552          * this on ILK at all since it has a fixed DPLL<->pipe mapping.
8553          */
8554         if (INTEL_INFO(dev_priv)->num_pipes == 3 &&
8555             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
8556                 dpll |= DPLL_SDVO_HIGH_SPEED;
8557
8558         /* compute bitmask from p1 value */
8559         dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8560         /* also FPA1 */
8561         dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
8562
8563         switch (crtc_state->dpll.p2) {
8564         case 5:
8565                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
8566                 break;
8567         case 7:
8568                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
8569                 break;
8570         case 10:
8571                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
8572                 break;
8573         case 14:
8574                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
8575                 break;
8576         }
8577
8578         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
8579             intel_panel_use_ssc(dev_priv))
8580                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
8581         else
8582                 dpll |= PLL_REF_INPUT_DREFCLK;
8583
8584         dpll |= DPLL_VCO_ENABLE;
8585
8586         crtc_state->dpll_hw_state.dpll = dpll;
8587         crtc_state->dpll_hw_state.fp0 = fp;
8588         crtc_state->dpll_hw_state.fp1 = fp2;
8589 }
8590
8591 static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
8592                                        struct intel_crtc_state *crtc_state)
8593 {
8594         struct drm_device *dev = crtc->base.dev;
8595         struct drm_i915_private *dev_priv = to_i915(dev);
8596         const struct intel_limit *limit;
8597         int refclk = 120000;
8598
8599         memset(&crtc_state->dpll_hw_state, 0,
8600                sizeof(crtc_state->dpll_hw_state));
8601
8602         /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
8603         if (!crtc_state->has_pch_encoder)
8604                 return 0;
8605
8606         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8607                 if (intel_panel_use_ssc(dev_priv)) {
8608                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
8609                                       dev_priv->vbt.lvds_ssc_freq);
8610                         refclk = dev_priv->vbt.lvds_ssc_freq;
8611                 }
8612
8613                 if (intel_is_dual_link_lvds(dev)) {
8614                         if (refclk == 100000)
8615                                 limit = &intel_limits_ironlake_dual_lvds_100m;
8616                         else
8617                                 limit = &intel_limits_ironlake_dual_lvds;
8618                 } else {
8619                         if (refclk == 100000)
8620                                 limit = &intel_limits_ironlake_single_lvds_100m;
8621                         else
8622                                 limit = &intel_limits_ironlake_single_lvds;
8623                 }
8624         } else {
8625                 limit = &intel_limits_ironlake_dac;
8626         }
8627
8628         if (!crtc_state->clock_set &&
8629             !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8630                                 refclk, NULL, &crtc_state->dpll)) {
8631                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8632                 return -EINVAL;
8633         }
8634
8635         ironlake_compute_dpll(crtc, crtc_state, NULL);
8636
8637         if (!intel_get_shared_dpll(crtc, crtc_state, NULL)) {
8638                 DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
8639                                  pipe_name(crtc->pipe));
8640                 return -EINVAL;
8641         }
8642
8643         return 0;
8644 }
8645
8646 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
8647                                          struct intel_link_m_n *m_n)
8648 {
8649         struct drm_device *dev = crtc->base.dev;
8650         struct drm_i915_private *dev_priv = to_i915(dev);
8651         enum pipe pipe = crtc->pipe;
8652
8653         m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
8654         m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
8655         m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
8656                 & ~TU_SIZE_MASK;
8657         m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
8658         m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
8659                     & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
8660 }
8661
8662 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
8663                                          enum transcoder transcoder,
8664                                          struct intel_link_m_n *m_n,
8665                                          struct intel_link_m_n *m2_n2)
8666 {
8667         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8668         enum pipe pipe = crtc->pipe;
8669
8670         if (INTEL_GEN(dev_priv) >= 5) {
8671                 m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
8672                 m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
8673                 m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
8674                         & ~TU_SIZE_MASK;
8675                 m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
8676                 m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
8677                             & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
8678                 /* Read M2_N2 registers only for gen < 8 (M2_N2 available for
8679                  * gen < 8) and if DRRS is supported (to make sure the
8680                  * registers are not unnecessarily read).
8681                  */
8682                 if (m2_n2 && INTEL_GEN(dev_priv) < 8 &&
8683                         crtc->config->has_drrs) {
8684                         m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
8685                         m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder));
8686                         m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder))
8687                                         & ~TU_SIZE_MASK;
8688                         m2_n2->gmch_n = I915_READ(PIPE_DATA_N2(transcoder));
8689                         m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder))
8690                                         & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
8691                 }
8692         } else {
8693                 m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
8694                 m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
8695                 m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
8696                         & ~TU_SIZE_MASK;
8697                 m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
8698                 m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
8699                             & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
8700         }
8701 }
8702
8703 void intel_dp_get_m_n(struct intel_crtc *crtc,
8704                       struct intel_crtc_state *pipe_config)
8705 {
8706         if (pipe_config->has_pch_encoder)
8707                 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
8708         else
8709                 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
8710                                              &pipe_config->dp_m_n,
8711                                              &pipe_config->dp_m2_n2);
8712 }
8713
8714 static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
8715                                         struct intel_crtc_state *pipe_config)
8716 {
8717         intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
8718                                      &pipe_config->fdi_m_n, NULL);
8719 }
8720
8721 static void skylake_get_pfit_config(struct intel_crtc *crtc,
8722                                     struct intel_crtc_state *pipe_config)
8723 {
8724         struct drm_device *dev = crtc->base.dev;
8725         struct drm_i915_private *dev_priv = to_i915(dev);
8726         struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
8727         uint32_t ps_ctrl = 0;
8728         int id = -1;
8729         int i;
8730
8731         /* find scaler attached to this pipe */
8732         for (i = 0; i < crtc->num_scalers; i++) {
8733                 ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i));
8734                 if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) {
8735                         id = i;
8736                         pipe_config->pch_pfit.enabled = true;
8737                         pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i));
8738                         pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i));
8739                         break;
8740                 }
8741         }
8742
8743         scaler_state->scaler_id = id;
8744         if (id >= 0) {
8745                 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
8746         } else {
8747                 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
8748         }
8749 }
8750
8751 static void
8752 skylake_get_initial_plane_config(struct intel_crtc *crtc,
8753                                  struct intel_initial_plane_config *plane_config)
8754 {
8755         struct drm_device *dev = crtc->base.dev;
8756         struct drm_i915_private *dev_priv = to_i915(dev);
8757         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
8758         enum plane_id plane_id = plane->id;
8759         enum pipe pipe;
8760         u32 val, base, offset, stride_mult, tiling, alpha;
8761         int fourcc, pixel_format;
8762         unsigned int aligned_height;
8763         struct drm_framebuffer *fb;
8764         struct intel_framebuffer *intel_fb;
8765
8766         if (!plane->get_hw_state(plane, &pipe))
8767                 return;
8768
8769         WARN_ON(pipe != crtc->pipe);
8770
8771         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
8772         if (!intel_fb) {
8773                 DRM_DEBUG_KMS("failed to alloc fb\n");
8774                 return;
8775         }
8776
8777         fb = &intel_fb->base;
8778
8779         fb->dev = dev;
8780
8781         val = I915_READ(PLANE_CTL(pipe, plane_id));
8782
8783         if (INTEL_GEN(dev_priv) >= 11)
8784                 pixel_format = val & ICL_PLANE_CTL_FORMAT_MASK;
8785         else
8786                 pixel_format = val & PLANE_CTL_FORMAT_MASK;
8787
8788         if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
8789                 alpha = I915_READ(PLANE_COLOR_CTL(pipe, plane_id));
8790                 alpha &= PLANE_COLOR_ALPHA_MASK;
8791         } else {
8792                 alpha = val & PLANE_CTL_ALPHA_MASK;
8793         }
8794
8795         fourcc = skl_format_to_fourcc(pixel_format,
8796                                       val & PLANE_CTL_ORDER_RGBX, alpha);
8797         fb->format = drm_format_info(fourcc);
8798
8799         tiling = val & PLANE_CTL_TILED_MASK;
8800         switch (tiling) {
8801         case PLANE_CTL_TILED_LINEAR:
8802                 fb->modifier = DRM_FORMAT_MOD_LINEAR;
8803                 break;
8804         case PLANE_CTL_TILED_X:
8805                 plane_config->tiling = I915_TILING_X;
8806                 fb->modifier = I915_FORMAT_MOD_X_TILED;
8807                 break;
8808         case PLANE_CTL_TILED_Y:
8809                 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
8810                         fb->modifier = I915_FORMAT_MOD_Y_TILED_CCS;
8811                 else
8812                         fb->modifier = I915_FORMAT_MOD_Y_TILED;
8813                 break;
8814         case PLANE_CTL_TILED_YF:
8815                 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
8816                         fb->modifier = I915_FORMAT_MOD_Yf_TILED_CCS;
8817                 else
8818                         fb->modifier = I915_FORMAT_MOD_Yf_TILED;
8819                 break;
8820         default:
8821                 MISSING_CASE(tiling);
8822                 goto error;
8823         }
8824
8825         base = I915_READ(PLANE_SURF(pipe, plane_id)) & 0xfffff000;
8826         plane_config->base = base;
8827
8828         offset = I915_READ(PLANE_OFFSET(pipe, plane_id));
8829
8830         val = I915_READ(PLANE_SIZE(pipe, plane_id));
8831         fb->height = ((val >> 16) & 0xfff) + 1;
8832         fb->width = ((val >> 0) & 0x1fff) + 1;
8833
8834         val = I915_READ(PLANE_STRIDE(pipe, plane_id));
8835         stride_mult = intel_fb_stride_alignment(fb, 0);
8836         fb->pitches[0] = (val & 0x3ff) * stride_mult;
8837
8838         aligned_height = intel_fb_align_height(fb, 0, fb->height);
8839
8840         plane_config->size = fb->pitches[0] * aligned_height;
8841
8842         DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
8843                       crtc->base.name, plane->base.name, fb->width, fb->height,
8844                       fb->format->cpp[0] * 8, base, fb->pitches[0],
8845                       plane_config->size);
8846
8847         plane_config->fb = intel_fb;
8848         return;
8849
8850 error:
8851         kfree(intel_fb);
8852 }
8853
8854 static void ironlake_get_pfit_config(struct intel_crtc *crtc,
8855                                      struct intel_crtc_state *pipe_config)
8856 {
8857         struct drm_device *dev = crtc->base.dev;
8858         struct drm_i915_private *dev_priv = to_i915(dev);
8859         uint32_t tmp;
8860
8861         tmp = I915_READ(PF_CTL(crtc->pipe));
8862
8863         if (tmp & PF_ENABLE) {
8864                 pipe_config->pch_pfit.enabled = true;
8865                 pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
8866                 pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
8867
8868                 /* We currently do not free assignements of panel fitters on
8869                  * ivb/hsw (since we don't use the higher upscaling modes which
8870                  * differentiates them) so just WARN about this case for now. */
8871                 if (IS_GEN7(dev_priv)) {
8872                         WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
8873                                 PF_PIPE_SEL_IVB(crtc->pipe));
8874                 }
8875         }
8876 }
8877
8878 static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
8879                                      struct intel_crtc_state *pipe_config)
8880 {
8881         struct drm_device *dev = crtc->base.dev;
8882         struct drm_i915_private *dev_priv = to_i915(dev);
8883         enum intel_display_power_domain power_domain;
8884         uint32_t tmp;
8885         bool ret;
8886
8887         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
8888         if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
8889                 return false;
8890
8891         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
8892         pipe_config->shared_dpll = NULL;
8893
8894         ret = false;
8895         tmp = I915_READ(PIPECONF(crtc->pipe));
8896         if (!(tmp & PIPECONF_ENABLE))
8897                 goto out;
8898
8899         switch (tmp & PIPECONF_BPC_MASK) {
8900         case PIPECONF_6BPC:
8901                 pipe_config->pipe_bpp = 18;
8902                 break;
8903         case PIPECONF_8BPC:
8904                 pipe_config->pipe_bpp = 24;
8905                 break;
8906         case PIPECONF_10BPC:
8907                 pipe_config->pipe_bpp = 30;
8908                 break;
8909         case PIPECONF_12BPC:
8910                 pipe_config->pipe_bpp = 36;
8911                 break;
8912         default:
8913                 break;
8914         }
8915
8916         if (tmp & PIPECONF_COLOR_RANGE_SELECT)
8917                 pipe_config->limited_color_range = true;
8918
8919         if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
8920                 struct intel_shared_dpll *pll;
8921                 enum intel_dpll_id pll_id;
8922
8923                 pipe_config->has_pch_encoder = true;
8924
8925                 tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
8926                 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
8927                                           FDI_DP_PORT_WIDTH_SHIFT) + 1;
8928
8929                 ironlake_get_fdi_m_n_config(crtc, pipe_config);
8930
8931                 if (HAS_PCH_IBX(dev_priv)) {
8932                         /*
8933                          * The pipe->pch transcoder and pch transcoder->pll
8934                          * mapping is fixed.
8935                          */
8936                         pll_id = (enum intel_dpll_id) crtc->pipe;
8937                 } else {
8938                         tmp = I915_READ(PCH_DPLL_SEL);
8939                         if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
8940                                 pll_id = DPLL_ID_PCH_PLL_B;
8941                         else
8942                                 pll_id= DPLL_ID_PCH_PLL_A;
8943                 }
8944
8945                 pipe_config->shared_dpll =
8946                         intel_get_shared_dpll_by_id(dev_priv, pll_id);
8947                 pll = pipe_config->shared_dpll;
8948
8949                 WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
8950                                                 &pipe_config->dpll_hw_state));
8951
8952                 tmp = pipe_config->dpll_hw_state.dpll;
8953                 pipe_config->pixel_multiplier =
8954                         ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
8955                          >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
8956
8957                 ironlake_pch_clock_get(crtc, pipe_config);
8958         } else {
8959                 pipe_config->pixel_multiplier = 1;
8960         }
8961
8962         intel_get_pipe_timings(crtc, pipe_config);
8963         intel_get_pipe_src_size(crtc, pipe_config);
8964
8965         ironlake_get_pfit_config(crtc, pipe_config);
8966
8967         ret = true;
8968
8969 out:
8970         intel_display_power_put(dev_priv, power_domain);
8971
8972         return ret;
8973 }
8974
8975 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
8976 {
8977         struct drm_device *dev = &dev_priv->drm;
8978         struct intel_crtc *crtc;
8979
8980         for_each_intel_crtc(dev, crtc)
8981                 I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
8982                      pipe_name(crtc->pipe));
8983
8984         I915_STATE_WARN(I915_READ(HSW_PWR_WELL_CTL2),
8985                         "Display power well on\n");
8986         I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
8987         I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
8988         I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
8989         I915_STATE_WARN(I915_READ(PP_STATUS(0)) & PP_ON, "Panel power on\n");
8990         I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
8991              "CPU PWM1 enabled\n");
8992         if (IS_HASWELL(dev_priv))
8993                 I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
8994                      "CPU PWM2 enabled\n");
8995         I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
8996              "PCH PWM1 enabled\n");
8997         I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
8998              "Utility pin enabled\n");
8999         I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
9000
9001         /*
9002          * In theory we can still leave IRQs enabled, as long as only the HPD
9003          * interrupts remain enabled. We used to check for that, but since it's
9004          * gen-specific and since we only disable LCPLL after we fully disable
9005          * the interrupts, the check below should be enough.
9006          */
9007         I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
9008 }
9009
9010 static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
9011 {
9012         if (IS_HASWELL(dev_priv))
9013                 return I915_READ(D_COMP_HSW);
9014         else
9015                 return I915_READ(D_COMP_BDW);
9016 }
9017
9018 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
9019 {
9020         if (IS_HASWELL(dev_priv)) {
9021                 mutex_lock(&dev_priv->pcu_lock);
9022                 if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
9023                                             val))
9024                         DRM_DEBUG_KMS("Failed to write to D_COMP\n");
9025                 mutex_unlock(&dev_priv->pcu_lock);
9026         } else {
9027                 I915_WRITE(D_COMP_BDW, val);
9028                 POSTING_READ(D_COMP_BDW);
9029         }
9030 }
9031
9032 /*
9033  * This function implements pieces of two sequences from BSpec:
9034  * - Sequence for display software to disable LCPLL
9035  * - Sequence for display software to allow package C8+
9036  * The steps implemented here are just the steps that actually touch the LCPLL
9037  * register. Callers should take care of disabling all the display engine
9038  * functions, doing the mode unset, fixing interrupts, etc.
9039  */
9040 static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
9041                               bool switch_to_fclk, bool allow_power_down)
9042 {
9043         uint32_t val;
9044
9045         assert_can_disable_lcpll(dev_priv);
9046
9047         val = I915_READ(LCPLL_CTL);
9048
9049         if (switch_to_fclk) {
9050                 val |= LCPLL_CD_SOURCE_FCLK;
9051                 I915_WRITE(LCPLL_CTL, val);
9052
9053                 if (wait_for_us(I915_READ(LCPLL_CTL) &
9054                                 LCPLL_CD_SOURCE_FCLK_DONE, 1))
9055                         DRM_ERROR("Switching to FCLK failed\n");
9056
9057                 val = I915_READ(LCPLL_CTL);
9058         }
9059
9060         val |= LCPLL_PLL_DISABLE;
9061         I915_WRITE(LCPLL_CTL, val);
9062         POSTING_READ(LCPLL_CTL);
9063
9064         if (intel_wait_for_register(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 0, 1))
9065                 DRM_ERROR("LCPLL still locked\n");
9066
9067         val = hsw_read_dcomp(dev_priv);
9068         val |= D_COMP_COMP_DISABLE;
9069         hsw_write_dcomp(dev_priv, val);
9070         ndelay(100);
9071
9072         if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0,
9073                      1))
9074                 DRM_ERROR("D_COMP RCOMP still in progress\n");
9075
9076         if (allow_power_down) {
9077                 val = I915_READ(LCPLL_CTL);
9078                 val |= LCPLL_POWER_DOWN_ALLOW;
9079                 I915_WRITE(LCPLL_CTL, val);
9080                 POSTING_READ(LCPLL_CTL);
9081         }
9082 }
9083
9084 /*
9085  * Fully restores LCPLL, disallowing power down and switching back to LCPLL
9086  * source.
9087  */
9088 static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
9089 {
9090         uint32_t val;
9091
9092         val = I915_READ(LCPLL_CTL);
9093
9094         if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
9095                     LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
9096                 return;
9097
9098         /*
9099          * Make sure we're not on PC8 state before disabling PC8, otherwise
9100          * we'll hang the machine. To prevent PC8 state, just enable force_wake.
9101          */
9102         intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
9103
9104         if (val & LCPLL_POWER_DOWN_ALLOW) {
9105                 val &= ~LCPLL_POWER_DOWN_ALLOW;
9106                 I915_WRITE(LCPLL_CTL, val);
9107                 POSTING_READ(LCPLL_CTL);
9108         }
9109
9110         val = hsw_read_dcomp(dev_priv);
9111         val |= D_COMP_COMP_FORCE;
9112         val &= ~D_COMP_COMP_DISABLE;
9113         hsw_write_dcomp(dev_priv, val);
9114
9115         val = I915_READ(LCPLL_CTL);
9116         val &= ~LCPLL_PLL_DISABLE;
9117         I915_WRITE(LCPLL_CTL, val);
9118
9119         if (intel_wait_for_register(dev_priv,
9120                                     LCPLL_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK,
9121                                     5))
9122                 DRM_ERROR("LCPLL not locked yet\n");
9123
9124         if (val & LCPLL_CD_SOURCE_FCLK) {
9125                 val = I915_READ(LCPLL_CTL);
9126                 val &= ~LCPLL_CD_SOURCE_FCLK;
9127                 I915_WRITE(LCPLL_CTL, val);
9128
9129                 if (wait_for_us((I915_READ(LCPLL_CTL) &
9130                                  LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
9131                         DRM_ERROR("Switching back to LCPLL failed\n");
9132         }
9133
9134         intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
9135
9136         intel_update_cdclk(dev_priv);
9137         intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
9138 }
9139
9140 /*
9141  * Package states C8 and deeper are really deep PC states that can only be
9142  * reached when all the devices on the system allow it, so even if the graphics
9143  * device allows PC8+, it doesn't mean the system will actually get to these
9144  * states. Our driver only allows PC8+ when going into runtime PM.
9145  *
9146  * The requirements for PC8+ are that all the outputs are disabled, the power
9147  * well is disabled and most interrupts are disabled, and these are also
9148  * requirements for runtime PM. When these conditions are met, we manually do
9149  * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
9150  * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
9151  * hang the machine.
9152  *
9153  * When we really reach PC8 or deeper states (not just when we allow it) we lose
9154  * the state of some registers, so when we come back from PC8+ we need to
9155  * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
9156  * need to take care of the registers kept by RC6. Notice that this happens even
9157  * if we don't put the device in PCI D3 state (which is what currently happens
9158  * because of the runtime PM support).
9159  *
9160  * For more, read "Display Sequences for Package C8" on the hardware
9161  * documentation.
9162  */
9163 void hsw_enable_pc8(struct drm_i915_private *dev_priv)
9164 {
9165         uint32_t val;
9166
9167         DRM_DEBUG_KMS("Enabling package C8+\n");
9168
9169         if (HAS_PCH_LPT_LP(dev_priv)) {
9170                 val = I915_READ(SOUTH_DSPCLK_GATE_D);
9171                 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
9172                 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
9173         }
9174
9175         lpt_disable_clkout_dp(dev_priv);
9176         hsw_disable_lcpll(dev_priv, true, true);
9177 }
9178
9179 void hsw_disable_pc8(struct drm_i915_private *dev_priv)
9180 {
9181         uint32_t val;
9182
9183         DRM_DEBUG_KMS("Disabling package C8+\n");
9184
9185         hsw_restore_lcpll(dev_priv);
9186         lpt_init_pch_refclk(dev_priv);
9187
9188         if (HAS_PCH_LPT_LP(dev_priv)) {
9189                 val = I915_READ(SOUTH_DSPCLK_GATE_D);
9190                 val |= PCH_LP_PARTITION_LEVEL_DISABLE;
9191                 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
9192         }
9193 }
9194
9195 static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
9196                                       struct intel_crtc_state *crtc_state)
9197 {
9198         struct intel_atomic_state *state =
9199                 to_intel_atomic_state(crtc_state->base.state);
9200
9201         if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) {
9202                 struct intel_encoder *encoder =
9203                         intel_get_crtc_new_encoder(state, crtc_state);
9204
9205                 if (!intel_get_shared_dpll(crtc, crtc_state, encoder)) {
9206                         DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
9207                                          pipe_name(crtc->pipe));
9208                         return -EINVAL;
9209                 }
9210         }
9211
9212         return 0;
9213 }
9214
9215 static void cannonlake_get_ddi_pll(struct drm_i915_private *dev_priv,
9216                                    enum port port,
9217                                    struct intel_crtc_state *pipe_config)
9218 {
9219         enum intel_dpll_id id;
9220         u32 temp;
9221
9222         temp = I915_READ(DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
9223         id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
9224
9225         if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL2))
9226                 return;
9227
9228         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9229 }
9230
9231 static void icelake_get_ddi_pll(struct drm_i915_private *dev_priv,
9232                                 enum port port,
9233                                 struct intel_crtc_state *pipe_config)
9234 {
9235         enum intel_dpll_id id;
9236         u32 temp;
9237
9238         /* TODO: TBT pll not implemented. */
9239         switch (port) {
9240         case PORT_A:
9241         case PORT_B:
9242                 temp = I915_READ(DPCLKA_CFGCR0_ICL) &
9243                        DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
9244                 id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
9245
9246                 if (WARN_ON(id != DPLL_ID_ICL_DPLL0 && id != DPLL_ID_ICL_DPLL1))
9247                         return;
9248                 break;
9249         case PORT_C:
9250                 id = DPLL_ID_ICL_MGPLL1;
9251                 break;
9252         case PORT_D:
9253                 id = DPLL_ID_ICL_MGPLL2;
9254                 break;
9255         case PORT_E:
9256                 id = DPLL_ID_ICL_MGPLL3;
9257                 break;
9258         case PORT_F:
9259                 id = DPLL_ID_ICL_MGPLL4;
9260                 break;
9261         default:
9262                 MISSING_CASE(port);
9263                 return;
9264         }
9265
9266         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9267 }
9268
9269 static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
9270                                 enum port port,
9271                                 struct intel_crtc_state *pipe_config)
9272 {
9273         enum intel_dpll_id id;
9274
9275         switch (port) {
9276         case PORT_A:
9277                 id = DPLL_ID_SKL_DPLL0;
9278                 break;
9279         case PORT_B:
9280                 id = DPLL_ID_SKL_DPLL1;
9281                 break;
9282         case PORT_C:
9283                 id = DPLL_ID_SKL_DPLL2;
9284                 break;
9285         default:
9286                 DRM_ERROR("Incorrect port type\n");
9287                 return;
9288         }
9289
9290         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9291 }
9292
9293 static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
9294                                 enum port port,
9295                                 struct intel_crtc_state *pipe_config)
9296 {
9297         enum intel_dpll_id id;
9298         u32 temp;
9299
9300         temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
9301         id = temp >> (port * 3 + 1);
9302
9303         if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL3))
9304                 return;
9305
9306         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9307 }
9308
9309 static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
9310                                 enum port port,
9311                                 struct intel_crtc_state *pipe_config)
9312 {
9313         enum intel_dpll_id id;
9314         uint32_t ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
9315
9316         switch (ddi_pll_sel) {
9317         case PORT_CLK_SEL_WRPLL1:
9318                 id = DPLL_ID_WRPLL1;
9319                 break;
9320         case PORT_CLK_SEL_WRPLL2:
9321                 id = DPLL_ID_WRPLL2;
9322                 break;
9323         case PORT_CLK_SEL_SPLL:
9324                 id = DPLL_ID_SPLL;
9325                 break;
9326         case PORT_CLK_SEL_LCPLL_810:
9327                 id = DPLL_ID_LCPLL_810;
9328                 break;
9329         case PORT_CLK_SEL_LCPLL_1350:
9330                 id = DPLL_ID_LCPLL_1350;
9331                 break;
9332         case PORT_CLK_SEL_LCPLL_2700:
9333                 id = DPLL_ID_LCPLL_2700;
9334                 break;
9335         default:
9336                 MISSING_CASE(ddi_pll_sel);
9337                 /* fall through */
9338         case PORT_CLK_SEL_NONE:
9339                 return;
9340         }
9341
9342         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9343 }
9344
9345 static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
9346                                      struct intel_crtc_state *pipe_config,
9347                                      u64 *power_domain_mask)
9348 {
9349         struct drm_device *dev = crtc->base.dev;
9350         struct drm_i915_private *dev_priv = to_i915(dev);
9351         enum intel_display_power_domain power_domain;
9352         u32 tmp;
9353
9354         /*
9355          * The pipe->transcoder mapping is fixed with the exception of the eDP
9356          * transcoder handled below.
9357          */
9358         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9359
9360         /*
9361          * XXX: Do intel_display_power_get_if_enabled before reading this (for
9362          * consistency and less surprising code; it's in always on power).
9363          */
9364         tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
9365         if (tmp & TRANS_DDI_FUNC_ENABLE) {
9366                 enum pipe trans_edp_pipe;
9367                 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
9368                 default:
9369                         WARN(1, "unknown pipe linked to edp transcoder\n");
9370                         /* fall through */
9371                 case TRANS_DDI_EDP_INPUT_A_ONOFF:
9372                 case TRANS_DDI_EDP_INPUT_A_ON:
9373                         trans_edp_pipe = PIPE_A;
9374                         break;
9375                 case TRANS_DDI_EDP_INPUT_B_ONOFF:
9376                         trans_edp_pipe = PIPE_B;
9377                         break;
9378                 case TRANS_DDI_EDP_INPUT_C_ONOFF:
9379                         trans_edp_pipe = PIPE_C;
9380                         break;
9381                 }
9382
9383                 if (trans_edp_pipe == crtc->pipe)
9384                         pipe_config->cpu_transcoder = TRANSCODER_EDP;
9385         }
9386
9387         power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
9388         if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9389                 return false;
9390         *power_domain_mask |= BIT_ULL(power_domain);
9391
9392         tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
9393
9394         return tmp & PIPECONF_ENABLE;
9395 }
9396
9397 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
9398                                          struct intel_crtc_state *pipe_config,
9399                                          u64 *power_domain_mask)
9400 {
9401         struct drm_device *dev = crtc->base.dev;
9402         struct drm_i915_private *dev_priv = to_i915(dev);
9403         enum intel_display_power_domain power_domain;
9404         enum port port;
9405         enum transcoder cpu_transcoder;
9406         u32 tmp;
9407
9408         for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
9409                 if (port == PORT_A)
9410                         cpu_transcoder = TRANSCODER_DSI_A;
9411                 else
9412                         cpu_transcoder = TRANSCODER_DSI_C;
9413
9414                 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
9415                 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9416                         continue;
9417                 *power_domain_mask |= BIT_ULL(power_domain);
9418
9419                 /*
9420                  * The PLL needs to be enabled with a valid divider
9421                  * configuration, otherwise accessing DSI registers will hang
9422                  * the machine. See BSpec North Display Engine
9423                  * registers/MIPI[BXT]. We can break out here early, since we
9424                  * need the same DSI PLL to be enabled for both DSI ports.
9425                  */
9426                 if (!bxt_dsi_pll_is_enabled(dev_priv))
9427                         break;
9428
9429                 /* XXX: this works for video mode only */
9430                 tmp = I915_READ(BXT_MIPI_PORT_CTRL(port));
9431                 if (!(tmp & DPI_ENABLE))
9432                         continue;
9433
9434                 tmp = I915_READ(MIPI_CTRL(port));
9435                 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
9436                         continue;
9437
9438                 pipe_config->cpu_transcoder = cpu_transcoder;
9439                 break;
9440         }
9441
9442         return transcoder_is_dsi(pipe_config->cpu_transcoder);
9443 }
9444
9445 static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
9446                                        struct intel_crtc_state *pipe_config)
9447 {
9448         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9449         struct intel_shared_dpll *pll;
9450         enum port port;
9451         uint32_t tmp;
9452
9453         tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
9454
9455         port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
9456
9457         if (IS_ICELAKE(dev_priv))
9458                 icelake_get_ddi_pll(dev_priv, port, pipe_config);
9459         else if (IS_CANNONLAKE(dev_priv))
9460                 cannonlake_get_ddi_pll(dev_priv, port, pipe_config);
9461         else if (IS_GEN9_BC(dev_priv))
9462                 skylake_get_ddi_pll(dev_priv, port, pipe_config);
9463         else if (IS_GEN9_LP(dev_priv))
9464                 bxt_get_ddi_pll(dev_priv, port, pipe_config);
9465         else
9466                 haswell_get_ddi_pll(dev_priv, port, pipe_config);
9467
9468         pll = pipe_config->shared_dpll;
9469         if (pll) {
9470                 WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
9471                                                 &pipe_config->dpll_hw_state));
9472         }
9473
9474         /*
9475          * Haswell has only FDI/PCH transcoder A. It is which is connected to
9476          * DDI E. So just check whether this pipe is wired to DDI E and whether
9477          * the PCH transcoder is on.
9478          */
9479         if (INTEL_GEN(dev_priv) < 9 &&
9480             (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
9481                 pipe_config->has_pch_encoder = true;
9482
9483                 tmp = I915_READ(FDI_RX_CTL(PIPE_A));
9484                 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
9485                                           FDI_DP_PORT_WIDTH_SHIFT) + 1;
9486
9487                 ironlake_get_fdi_m_n_config(crtc, pipe_config);
9488         }
9489 }
9490
9491 static bool haswell_get_pipe_config(struct intel_crtc *crtc,
9492                                     struct intel_crtc_state *pipe_config)
9493 {
9494         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9495         enum intel_display_power_domain power_domain;
9496         u64 power_domain_mask;
9497         bool active;
9498
9499         intel_crtc_init_scalers(crtc, pipe_config);
9500
9501         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
9502         if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9503                 return false;
9504         power_domain_mask = BIT_ULL(power_domain);
9505
9506         pipe_config->shared_dpll = NULL;
9507
9508         active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_mask);
9509
9510         if (IS_GEN9_LP(dev_priv) &&
9511             bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_mask)) {
9512                 WARN_ON(active);
9513                 active = true;
9514         }
9515
9516         if (!active)
9517                 goto out;
9518
9519         if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) {
9520                 haswell_get_ddi_port_state(crtc, pipe_config);
9521                 intel_get_pipe_timings(crtc, pipe_config);
9522         }
9523
9524         intel_get_pipe_src_size(crtc, pipe_config);
9525
9526         pipe_config->gamma_mode =
9527                 I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK;
9528
9529         if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) {
9530                 u32 tmp = I915_READ(PIPEMISC(crtc->pipe));
9531                 bool clrspace_yuv = tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV;
9532
9533                 if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
9534                         bool blend_mode_420 = tmp &
9535                                               PIPEMISC_YUV420_MODE_FULL_BLEND;
9536
9537                         pipe_config->ycbcr420 = tmp & PIPEMISC_YUV420_ENABLE;
9538                         if (pipe_config->ycbcr420 != clrspace_yuv ||
9539                             pipe_config->ycbcr420 != blend_mode_420)
9540                                 DRM_DEBUG_KMS("Bad 4:2:0 mode (%08x)\n", tmp);
9541                 } else if (clrspace_yuv) {
9542                         DRM_DEBUG_KMS("YCbCr 4:2:0 Unsupported\n");
9543                 }
9544         }
9545
9546         power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
9547         if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
9548                 power_domain_mask |= BIT_ULL(power_domain);
9549                 if (INTEL_GEN(dev_priv) >= 9)
9550                         skylake_get_pfit_config(crtc, pipe_config);
9551                 else
9552                         ironlake_get_pfit_config(crtc, pipe_config);
9553         }
9554
9555         if (hsw_crtc_supports_ips(crtc)) {
9556                 if (IS_HASWELL(dev_priv))
9557                         pipe_config->ips_enabled = I915_READ(IPS_CTL) & IPS_ENABLE;
9558                 else {
9559                         /*
9560                          * We cannot readout IPS state on broadwell, set to
9561                          * true so we can set it to a defined state on first
9562                          * commit.
9563                          */
9564                         pipe_config->ips_enabled = true;
9565                 }
9566         }
9567
9568         if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
9569             !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
9570                 pipe_config->pixel_multiplier =
9571                         I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
9572         } else {
9573                 pipe_config->pixel_multiplier = 1;
9574         }
9575
9576 out:
9577         for_each_power_domain(power_domain, power_domain_mask)
9578                 intel_display_power_put(dev_priv, power_domain);
9579
9580         return active;
9581 }
9582
9583 static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
9584 {
9585         struct drm_i915_private *dev_priv =
9586                 to_i915(plane_state->base.plane->dev);
9587         const struct drm_framebuffer *fb = plane_state->base.fb;
9588         const struct drm_i915_gem_object *obj = intel_fb_obj(fb);
9589         u32 base;
9590
9591         if (INTEL_INFO(dev_priv)->cursor_needs_physical)
9592                 base = obj->phys_handle->busaddr;
9593         else
9594                 base = intel_plane_ggtt_offset(plane_state);
9595
9596         base += plane_state->main.offset;
9597
9598         /* ILK+ do this automagically */
9599         if (HAS_GMCH_DISPLAY(dev_priv) &&
9600             plane_state->base.rotation & DRM_MODE_ROTATE_180)
9601                 base += (plane_state->base.crtc_h *
9602                          plane_state->base.crtc_w - 1) * fb->format->cpp[0];
9603
9604         return base;
9605 }
9606
9607 static u32 intel_cursor_position(const struct intel_plane_state *plane_state)
9608 {
9609         int x = plane_state->base.crtc_x;
9610         int y = plane_state->base.crtc_y;
9611         u32 pos = 0;
9612
9613         if (x < 0) {
9614                 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
9615                 x = -x;
9616         }
9617         pos |= x << CURSOR_X_SHIFT;
9618
9619         if (y < 0) {
9620                 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
9621                 y = -y;
9622         }
9623         pos |= y << CURSOR_Y_SHIFT;
9624
9625         return pos;
9626 }
9627
9628 static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state)
9629 {
9630         const struct drm_mode_config *config =
9631                 &plane_state->base.plane->dev->mode_config;
9632         int width = plane_state->base.crtc_w;
9633         int height = plane_state->base.crtc_h;
9634
9635         return width > 0 && width <= config->cursor_width &&
9636                 height > 0 && height <= config->cursor_height;
9637 }
9638
9639 static int intel_check_cursor(struct intel_crtc_state *crtc_state,
9640                               struct intel_plane_state *plane_state)
9641 {
9642         const struct drm_framebuffer *fb = plane_state->base.fb;
9643         int src_x, src_y;
9644         u32 offset;
9645         int ret;
9646
9647         ret = drm_atomic_helper_check_plane_state(&plane_state->base,
9648                                                   &crtc_state->base,
9649                                                   DRM_PLANE_HELPER_NO_SCALING,
9650                                                   DRM_PLANE_HELPER_NO_SCALING,
9651                                                   true, true);
9652         if (ret)
9653                 return ret;
9654
9655         if (!fb)
9656                 return 0;
9657
9658         if (fb->modifier != DRM_FORMAT_MOD_LINEAR) {
9659                 DRM_DEBUG_KMS("cursor cannot be tiled\n");
9660                 return -EINVAL;
9661         }
9662
9663         src_x = plane_state->base.src_x >> 16;
9664         src_y = plane_state->base.src_y >> 16;
9665
9666         intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
9667         offset = intel_compute_tile_offset(&src_x, &src_y, plane_state, 0);
9668
9669         if (src_x != 0 || src_y != 0) {
9670                 DRM_DEBUG_KMS("Arbitrary cursor panning not supported\n");
9671                 return -EINVAL;
9672         }
9673
9674         plane_state->main.offset = offset;
9675
9676         return 0;
9677 }
9678
9679 static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
9680                            const struct intel_plane_state *plane_state)
9681 {
9682         const struct drm_framebuffer *fb = plane_state->base.fb;
9683
9684         return CURSOR_ENABLE |
9685                 CURSOR_GAMMA_ENABLE |
9686                 CURSOR_FORMAT_ARGB |
9687                 CURSOR_STRIDE(fb->pitches[0]);
9688 }
9689
9690 static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state)
9691 {
9692         int width = plane_state->base.crtc_w;
9693
9694         /*
9695          * 845g/865g are only limited by the width of their cursors,
9696          * the height is arbitrary up to the precision of the register.
9697          */
9698         return intel_cursor_size_ok(plane_state) && IS_ALIGNED(width, 64);
9699 }
9700
9701 static int i845_check_cursor(struct intel_crtc_state *crtc_state,
9702                              struct intel_plane_state *plane_state)
9703 {
9704         const struct drm_framebuffer *fb = plane_state->base.fb;
9705         int ret;
9706
9707         ret = intel_check_cursor(crtc_state, plane_state);
9708         if (ret)
9709                 return ret;
9710
9711         /* if we want to turn off the cursor ignore width and height */
9712         if (!fb)
9713                 return 0;
9714
9715         /* Check for which cursor types we support */
9716         if (!i845_cursor_size_ok(plane_state)) {
9717                 DRM_DEBUG("Cursor dimension %dx%d not supported\n",
9718                           plane_state->base.crtc_w,
9719                           plane_state->base.crtc_h);
9720                 return -EINVAL;
9721         }
9722
9723         switch (fb->pitches[0]) {
9724         case 256:
9725         case 512:
9726         case 1024:
9727         case 2048:
9728                 break;
9729         default:
9730                 DRM_DEBUG_KMS("Invalid cursor stride (%u)\n",
9731                               fb->pitches[0]);
9732                 return -EINVAL;
9733         }
9734
9735         plane_state->ctl = i845_cursor_ctl(crtc_state, plane_state);
9736
9737         return 0;
9738 }
9739
9740 static void i845_update_cursor(struct intel_plane *plane,
9741                                const struct intel_crtc_state *crtc_state,
9742                                const struct intel_plane_state *plane_state)
9743 {
9744         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
9745         u32 cntl = 0, base = 0, pos = 0, size = 0;
9746         unsigned long irqflags;
9747
9748         if (plane_state && plane_state->base.visible) {
9749                 unsigned int width = plane_state->base.crtc_w;
9750                 unsigned int height = plane_state->base.crtc_h;
9751
9752                 cntl = plane_state->ctl;
9753                 size = (height << 12) | width;
9754
9755                 base = intel_cursor_base(plane_state);
9756                 pos = intel_cursor_position(plane_state);
9757         }
9758
9759         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
9760
9761         /* On these chipsets we can only modify the base/size/stride
9762          * whilst the cursor is disabled.
9763          */
9764         if (plane->cursor.base != base ||
9765             plane->cursor.size != size ||
9766             plane->cursor.cntl != cntl) {
9767                 I915_WRITE_FW(CURCNTR(PIPE_A), 0);
9768                 I915_WRITE_FW(CURBASE(PIPE_A), base);
9769                 I915_WRITE_FW(CURSIZE, size);
9770                 I915_WRITE_FW(CURPOS(PIPE_A), pos);
9771                 I915_WRITE_FW(CURCNTR(PIPE_A), cntl);
9772
9773                 plane->cursor.base = base;
9774                 plane->cursor.size = size;
9775                 plane->cursor.cntl = cntl;
9776         } else {
9777                 I915_WRITE_FW(CURPOS(PIPE_A), pos);
9778         }
9779
9780         POSTING_READ_FW(CURCNTR(PIPE_A));
9781
9782         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
9783 }
9784
9785 static void i845_disable_cursor(struct intel_plane *plane,
9786                                 struct intel_crtc *crtc)
9787 {
9788         i845_update_cursor(plane, NULL, NULL);
9789 }
9790
9791 static bool i845_cursor_get_hw_state(struct intel_plane *plane,
9792                                      enum pipe *pipe)
9793 {
9794         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
9795         enum intel_display_power_domain power_domain;
9796         bool ret;
9797
9798         power_domain = POWER_DOMAIN_PIPE(PIPE_A);
9799         if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9800                 return false;
9801
9802         ret = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
9803
9804         *pipe = PIPE_A;
9805
9806         intel_display_power_put(dev_priv, power_domain);
9807
9808         return ret;
9809 }
9810
9811 static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
9812                            const struct intel_plane_state *plane_state)
9813 {
9814         struct drm_i915_private *dev_priv =
9815                 to_i915(plane_state->base.plane->dev);
9816         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
9817         u32 cntl = 0;
9818
9819         if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv))
9820                 cntl |= MCURSOR_TRICKLE_FEED_DISABLE;
9821
9822         if (INTEL_GEN(dev_priv) <= 10) {
9823                 cntl |= MCURSOR_GAMMA_ENABLE;
9824
9825                 if (HAS_DDI(dev_priv))
9826                         cntl |= MCURSOR_PIPE_CSC_ENABLE;
9827         }
9828
9829         if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
9830                 cntl |= MCURSOR_PIPE_SELECT(crtc->pipe);
9831
9832         switch (plane_state->base.crtc_w) {
9833         case 64:
9834                 cntl |= MCURSOR_MODE_64_ARGB_AX;
9835                 break;
9836         case 128:
9837                 cntl |= MCURSOR_MODE_128_ARGB_AX;
9838                 break;
9839         case 256:
9840                 cntl |= MCURSOR_MODE_256_ARGB_AX;
9841                 break;
9842         default:
9843                 MISSING_CASE(plane_state->base.crtc_w);
9844                 return 0;
9845         }
9846
9847         if (plane_state->base.rotation & DRM_MODE_ROTATE_180)
9848                 cntl |= MCURSOR_ROTATE_180;
9849
9850         return cntl;
9851 }
9852
9853 static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
9854 {
9855         struct drm_i915_private *dev_priv =
9856                 to_i915(plane_state->base.plane->dev);
9857         int width = plane_state->base.crtc_w;
9858         int height = plane_state->base.crtc_h;
9859
9860         if (!intel_cursor_size_ok(plane_state))
9861                 return false;
9862
9863         /* Cursor width is limited to a few power-of-two sizes */
9864         switch (width) {
9865         case 256:
9866         case 128:
9867         case 64:
9868                 break;
9869         default:
9870                 return false;
9871         }
9872
9873         /*
9874          * IVB+ have CUR_FBC_CTL which allows an arbitrary cursor
9875          * height from 8 lines up to the cursor width, when the
9876          * cursor is not rotated. Everything else requires square
9877          * cursors.
9878          */
9879         if (HAS_CUR_FBC(dev_priv) &&
9880             plane_state->base.rotation & DRM_MODE_ROTATE_0) {
9881                 if (height < 8 || height > width)
9882                         return false;
9883         } else {
9884                 if (height != width)
9885                         return false;
9886         }
9887
9888         return true;
9889 }
9890
9891 static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
9892                              struct intel_plane_state *plane_state)
9893 {
9894         struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
9895         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
9896         const struct drm_framebuffer *fb = plane_state->base.fb;
9897         enum pipe pipe = plane->pipe;
9898         int ret;
9899
9900         ret = intel_check_cursor(crtc_state, plane_state);
9901         if (ret)
9902                 return ret;
9903
9904         /* if we want to turn off the cursor ignore width and height */
9905         if (!fb)
9906                 return 0;
9907
9908         /* Check for which cursor types we support */
9909         if (!i9xx_cursor_size_ok(plane_state)) {
9910                 DRM_DEBUG("Cursor dimension %dx%d not supported\n",
9911                           plane_state->base.crtc_w,
9912                           plane_state->base.crtc_h);
9913                 return -EINVAL;
9914         }
9915
9916         if (fb->pitches[0] != plane_state->base.crtc_w * fb->format->cpp[0]) {
9917                 DRM_DEBUG_KMS("Invalid cursor stride (%u) (cursor width %d)\n",
9918                               fb->pitches[0], plane_state->base.crtc_w);
9919                 return -EINVAL;
9920         }
9921
9922         /*
9923          * There's something wrong with the cursor on CHV pipe C.
9924          * If it straddles the left edge of the screen then
9925          * moving it away from the edge or disabling it often
9926          * results in a pipe underrun, and often that can lead to
9927          * dead pipe (constant underrun reported, and it scans
9928          * out just a solid color). To recover from that, the
9929          * display power well must be turned off and on again.
9930          * Refuse the put the cursor into that compromised position.
9931          */
9932         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C &&
9933             plane_state->base.visible && plane_state->base.crtc_x < 0) {
9934                 DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
9935                 return -EINVAL;
9936         }
9937
9938         plane_state->ctl = i9xx_cursor_ctl(crtc_state, plane_state);
9939
9940         return 0;
9941 }
9942
9943 static void i9xx_update_cursor(struct intel_plane *plane,
9944                                const struct intel_crtc_state *crtc_state,
9945                                const struct intel_plane_state *plane_state)
9946 {
9947         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
9948         enum pipe pipe = plane->pipe;
9949         u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0;
9950         unsigned long irqflags;
9951
9952         if (plane_state && plane_state->base.visible) {
9953                 cntl = plane_state->ctl;
9954
9955                 if (plane_state->base.crtc_h != plane_state->base.crtc_w)
9956                         fbc_ctl = CUR_FBC_CTL_EN | (plane_state->base.crtc_h - 1);
9957
9958                 base = intel_cursor_base(plane_state);
9959                 pos = intel_cursor_position(plane_state);
9960         }
9961
9962         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
9963
9964         /*
9965          * On some platforms writing CURCNTR first will also
9966          * cause CURPOS to be armed by the CURBASE write.
9967          * Without the CURCNTR write the CURPOS write would
9968          * arm itself. Thus we always start the full update
9969          * with a CURCNTR write.
9970          *
9971          * On other platforms CURPOS always requires the
9972          * CURBASE write to arm the update. Additonally
9973          * a write to any of the cursor register will cancel
9974          * an already armed cursor update. Thus leaving out
9975          * the CURBASE write after CURPOS could lead to a
9976          * cursor that doesn't appear to move, or even change
9977          * shape. Thus we always write CURBASE.
9978          *
9979          * CURCNTR and CUR_FBC_CTL are always
9980          * armed by the CURBASE write only.
9981          */
9982         if (plane->cursor.base != base ||
9983             plane->cursor.size != fbc_ctl ||
9984             plane->cursor.cntl != cntl) {
9985                 I915_WRITE_FW(CURCNTR(pipe), cntl);
9986                 if (HAS_CUR_FBC(dev_priv))
9987                         I915_WRITE_FW(CUR_FBC_CTL(pipe), fbc_ctl);
9988                 I915_WRITE_FW(CURPOS(pipe), pos);
9989                 I915_WRITE_FW(CURBASE(pipe), base);
9990
9991                 plane->cursor.base = base;
9992                 plane->cursor.size = fbc_ctl;
9993                 plane->cursor.cntl = cntl;
9994         } else {
9995                 I915_WRITE_FW(CURPOS(pipe), pos);
9996                 I915_WRITE_FW(CURBASE(pipe), base);
9997         }
9998
9999         POSTING_READ_FW(CURBASE(pipe));
10000
10001         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
10002 }
10003
10004 static void i9xx_disable_cursor(struct intel_plane *plane,
10005                                 struct intel_crtc *crtc)
10006 {
10007         i9xx_update_cursor(plane, NULL, NULL);
10008 }
10009
10010 static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
10011                                      enum pipe *pipe)
10012 {
10013         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10014         enum intel_display_power_domain power_domain;
10015         bool ret;
10016         u32 val;
10017
10018         /*
10019          * Not 100% correct for planes that can move between pipes,
10020          * but that's only the case for gen2-3 which don't have any
10021          * display power wells.
10022          */
10023         power_domain = POWER_DOMAIN_PIPE(plane->pipe);
10024         if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
10025                 return false;
10026
10027         val = I915_READ(CURCNTR(plane->pipe));
10028
10029         ret = val & MCURSOR_MODE;
10030
10031         if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
10032                 *pipe = plane->pipe;
10033         else
10034                 *pipe = (val & MCURSOR_PIPE_SELECT_MASK) >>
10035                         MCURSOR_PIPE_SELECT_SHIFT;
10036
10037         intel_display_power_put(dev_priv, power_domain);
10038
10039         return ret;
10040 }
10041
10042 /* VESA 640x480x72Hz mode to set on the pipe */
10043 static const struct drm_display_mode load_detect_mode = {
10044         DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
10045                  704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
10046 };
10047
10048 struct drm_framebuffer *
10049 intel_framebuffer_create(struct drm_i915_gem_object *obj,
10050                          struct drm_mode_fb_cmd2 *mode_cmd)
10051 {
10052         struct intel_framebuffer *intel_fb;
10053         int ret;
10054
10055         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
10056         if (!intel_fb)
10057                 return ERR_PTR(-ENOMEM);
10058
10059         ret = intel_framebuffer_init(intel_fb, obj, mode_cmd);
10060         if (ret)
10061                 goto err;
10062
10063         return &intel_fb->base;
10064
10065 err:
10066         kfree(intel_fb);
10067         return ERR_PTR(ret);
10068 }
10069
10070 static int intel_modeset_disable_planes(struct drm_atomic_state *state,
10071                                         struct drm_crtc *crtc)
10072 {
10073         struct drm_plane *plane;
10074         struct drm_plane_state *plane_state;
10075         int ret, i;
10076
10077         ret = drm_atomic_add_affected_planes(state, crtc);
10078         if (ret)
10079                 return ret;
10080
10081         for_each_new_plane_in_state(state, plane, plane_state, i) {
10082                 if (plane_state->crtc != crtc)
10083                         continue;
10084
10085                 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
10086                 if (ret)
10087                         return ret;
10088
10089                 drm_atomic_set_fb_for_plane(plane_state, NULL);
10090         }
10091
10092         return 0;
10093 }
10094
10095 int intel_get_load_detect_pipe(struct drm_connector *connector,
10096                                const struct drm_display_mode *mode,
10097                                struct intel_load_detect_pipe *old,
10098                                struct drm_modeset_acquire_ctx *ctx)
10099 {
10100         struct intel_crtc *intel_crtc;
10101         struct intel_encoder *intel_encoder =
10102                 intel_attached_encoder(connector);
10103         struct drm_crtc *possible_crtc;
10104         struct drm_encoder *encoder = &intel_encoder->base;
10105         struct drm_crtc *crtc = NULL;
10106         struct drm_device *dev = encoder->dev;
10107         struct drm_i915_private *dev_priv = to_i915(dev);
10108         struct drm_mode_config *config = &dev->mode_config;
10109         struct drm_atomic_state *state = NULL, *restore_state = NULL;
10110         struct drm_connector_state *connector_state;
10111         struct intel_crtc_state *crtc_state;
10112         int ret, i = -1;
10113
10114         DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
10115                       connector->base.id, connector->name,
10116                       encoder->base.id, encoder->name);
10117
10118         old->restore_state = NULL;
10119
10120         WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
10121
10122         /*
10123          * Algorithm gets a little messy:
10124          *
10125          *   - if the connector already has an assigned crtc, use it (but make
10126          *     sure it's on first)
10127          *
10128          *   - try to find the first unused crtc that can drive this connector,
10129          *     and use that if we find one
10130          */
10131
10132         /* See if we already have a CRTC for this connector */
10133         if (connector->state->crtc) {
10134                 crtc = connector->state->crtc;
10135
10136                 ret = drm_modeset_lock(&crtc->mutex, ctx);
10137                 if (ret)
10138                         goto fail;
10139
10140                 /* Make sure the crtc and connector are running */
10141                 goto found;
10142         }
10143
10144         /* Find an unused one (if possible) */
10145         for_each_crtc(dev, possible_crtc) {
10146                 i++;
10147                 if (!(encoder->possible_crtcs & (1 << i)))
10148                         continue;
10149
10150                 ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
10151                 if (ret)
10152                         goto fail;
10153
10154                 if (possible_crtc->state->enable) {
10155                         drm_modeset_unlock(&possible_crtc->mutex);
10156                         continue;
10157                 }
10158
10159                 crtc = possible_crtc;
10160                 break;
10161         }
10162
10163         /*
10164          * If we didn't find an unused CRTC, don't use any.
10165          */
10166         if (!crtc) {
10167                 DRM_DEBUG_KMS("no pipe available for load-detect\n");
10168                 ret = -ENODEV;
10169                 goto fail;
10170         }
10171
10172 found:
10173         intel_crtc = to_intel_crtc(crtc);
10174
10175         state = drm_atomic_state_alloc(dev);
10176         restore_state = drm_atomic_state_alloc(dev);
10177         if (!state || !restore_state) {
10178                 ret = -ENOMEM;
10179                 goto fail;
10180         }
10181
10182         state->acquire_ctx = ctx;
10183         restore_state->acquire_ctx = ctx;
10184
10185         connector_state = drm_atomic_get_connector_state(state, connector);
10186         if (IS_ERR(connector_state)) {
10187                 ret = PTR_ERR(connector_state);
10188                 goto fail;
10189         }
10190
10191         ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
10192         if (ret)
10193                 goto fail;
10194
10195         crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
10196         if (IS_ERR(crtc_state)) {
10197                 ret = PTR_ERR(crtc_state);
10198                 goto fail;
10199         }
10200
10201         crtc_state->base.active = crtc_state->base.enable = true;
10202
10203         if (!mode)
10204                 mode = &load_detect_mode;
10205
10206         ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode);
10207         if (ret)
10208                 goto fail;
10209
10210         ret = intel_modeset_disable_planes(state, crtc);
10211         if (ret)
10212                 goto fail;
10213
10214         ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
10215         if (!ret)
10216                 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
10217         if (!ret)
10218                 ret = drm_atomic_add_affected_planes(restore_state, crtc);
10219         if (ret) {
10220                 DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret);
10221                 goto fail;
10222         }
10223
10224         ret = drm_atomic_commit(state);
10225         if (ret) {
10226                 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
10227                 goto fail;
10228         }
10229
10230         old->restore_state = restore_state;
10231         drm_atomic_state_put(state);
10232
10233         /* let the connector get through one full cycle before testing */
10234         intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
10235         return true;
10236
10237 fail:
10238         if (state) {
10239                 drm_atomic_state_put(state);
10240                 state = NULL;
10241         }
10242         if (restore_state) {
10243                 drm_atomic_state_put(restore_state);
10244                 restore_state = NULL;
10245         }
10246
10247         if (ret == -EDEADLK)
10248                 return ret;
10249
10250         return false;
10251 }
10252
10253 void intel_release_load_detect_pipe(struct drm_connector *connector,
10254                                     struct intel_load_detect_pipe *old,
10255                                     struct drm_modeset_acquire_ctx *ctx)
10256 {
10257         struct intel_encoder *intel_encoder =
10258                 intel_attached_encoder(connector);
10259         struct drm_encoder *encoder = &intel_encoder->base;
10260         struct drm_atomic_state *state = old->restore_state;
10261         int ret;
10262
10263         DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
10264                       connector->base.id, connector->name,
10265                       encoder->base.id, encoder->name);
10266
10267         if (!state)
10268                 return;
10269
10270         ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
10271         if (ret)
10272                 DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret);
10273         drm_atomic_state_put(state);
10274 }
10275
10276 static int i9xx_pll_refclk(struct drm_device *dev,
10277                            const struct intel_crtc_state *pipe_config)
10278 {
10279         struct drm_i915_private *dev_priv = to_i915(dev);
10280         u32 dpll = pipe_config->dpll_hw_state.dpll;
10281
10282         if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
10283                 return dev_priv->vbt.lvds_ssc_freq;
10284         else if (HAS_PCH_SPLIT(dev_priv))
10285                 return 120000;
10286         else if (!IS_GEN2(dev_priv))
10287                 return 96000;
10288         else
10289                 return 48000;
10290 }
10291
10292 /* Returns the clock of the currently programmed mode of the given pipe. */
10293 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
10294                                 struct intel_crtc_state *pipe_config)
10295 {
10296         struct drm_device *dev = crtc->base.dev;
10297         struct drm_i915_private *dev_priv = to_i915(dev);
10298         int pipe = pipe_config->cpu_transcoder;
10299         u32 dpll = pipe_config->dpll_hw_state.dpll;
10300         u32 fp;
10301         struct dpll clock;
10302         int port_clock;
10303         int refclk = i9xx_pll_refclk(dev, pipe_config);
10304
10305         if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
10306                 fp = pipe_config->dpll_hw_state.fp0;
10307         else
10308                 fp = pipe_config->dpll_hw_state.fp1;
10309
10310         clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
10311         if (IS_PINEVIEW(dev_priv)) {
10312                 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
10313                 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
10314         } else {
10315                 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
10316                 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
10317         }
10318
10319         if (!IS_GEN2(dev_priv)) {
10320                 if (IS_PINEVIEW(dev_priv))
10321                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
10322                                 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
10323                 else
10324                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
10325                                DPLL_FPA01_P1_POST_DIV_SHIFT);
10326
10327                 switch (dpll & DPLL_MODE_MASK) {
10328                 case DPLLB_MODE_DAC_SERIAL:
10329                         clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
10330                                 5 : 10;
10331                         break;
10332                 case DPLLB_MODE_LVDS:
10333                         clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
10334                                 7 : 14;
10335                         break;
10336                 default:
10337                         DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
10338                                   "mode\n", (int)(dpll & DPLL_MODE_MASK));
10339                         return;
10340                 }
10341
10342                 if (IS_PINEVIEW(dev_priv))
10343                         port_clock = pnv_calc_dpll_params(refclk, &clock);
10344                 else
10345                         port_clock = i9xx_calc_dpll_params(refclk, &clock);
10346         } else {
10347                 u32 lvds = IS_I830(dev_priv) ? 0 : I915_READ(LVDS);
10348                 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
10349
10350                 if (is_lvds) {
10351                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
10352                                        DPLL_FPA01_P1_POST_DIV_SHIFT);
10353
10354                         if (lvds & LVDS_CLKB_POWER_UP)
10355                                 clock.p2 = 7;
10356                         else
10357                                 clock.p2 = 14;
10358                 } else {
10359                         if (dpll & PLL_P1_DIVIDE_BY_TWO)
10360                                 clock.p1 = 2;
10361                         else {
10362                                 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
10363                                             DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
10364                         }
10365                         if (dpll & PLL_P2_DIVIDE_BY_4)
10366                                 clock.p2 = 4;
10367                         else
10368                                 clock.p2 = 2;
10369                 }
10370
10371                 port_clock = i9xx_calc_dpll_params(refclk, &clock);
10372         }
10373
10374         /*
10375          * This value includes pixel_multiplier. We will use
10376          * port_clock to compute adjusted_mode.crtc_clock in the
10377          * encoder's get_config() function.
10378          */
10379         pipe_config->port_clock = port_clock;
10380 }
10381
10382 int intel_dotclock_calculate(int link_freq,
10383                              const struct intel_link_m_n *m_n)
10384 {
10385         /*
10386          * The calculation for the data clock is:
10387          * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
10388          * But we want to avoid losing precison if possible, so:
10389          * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
10390          *
10391          * and the link clock is simpler:
10392          * link_clock = (m * link_clock) / n
10393          */
10394
10395         if (!m_n->link_n)
10396                 return 0;
10397
10398         return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
10399 }
10400
10401 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
10402                                    struct intel_crtc_state *pipe_config)
10403 {
10404         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10405
10406         /* read out port_clock from the DPLL */
10407         i9xx_crtc_clock_get(crtc, pipe_config);
10408
10409         /*
10410          * In case there is an active pipe without active ports,
10411          * we may need some idea for the dotclock anyway.
10412          * Calculate one based on the FDI configuration.
10413          */
10414         pipe_config->base.adjusted_mode.crtc_clock =
10415                 intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
10416                                          &pipe_config->fdi_m_n);
10417 }
10418
10419 /* Returns the currently programmed mode of the given encoder. */
10420 struct drm_display_mode *
10421 intel_encoder_current_mode(struct intel_encoder *encoder)
10422 {
10423         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
10424         struct intel_crtc_state *crtc_state;
10425         struct drm_display_mode *mode;
10426         struct intel_crtc *crtc;
10427         enum pipe pipe;
10428
10429         if (!encoder->get_hw_state(encoder, &pipe))
10430                 return NULL;
10431
10432         crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
10433
10434         mode = kzalloc(sizeof(*mode), GFP_KERNEL);
10435         if (!mode)
10436                 return NULL;
10437
10438         crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
10439         if (!crtc_state) {
10440                 kfree(mode);
10441                 return NULL;
10442         }
10443
10444         crtc_state->base.crtc = &crtc->base;
10445
10446         if (!dev_priv->display.get_pipe_config(crtc, crtc_state)) {
10447                 kfree(crtc_state);
10448                 kfree(mode);
10449                 return NULL;
10450         }
10451
10452         encoder->get_config(encoder, crtc_state);
10453
10454         intel_mode_from_pipe_config(mode, crtc_state);
10455
10456         kfree(crtc_state);
10457
10458         return mode;
10459 }
10460
10461 static void intel_crtc_destroy(struct drm_crtc *crtc)
10462 {
10463         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10464
10465         drm_crtc_cleanup(crtc);
10466         kfree(intel_crtc);
10467 }
10468
10469 /**
10470  * intel_wm_need_update - Check whether watermarks need updating
10471  * @plane: drm plane
10472  * @state: new plane state
10473  *
10474  * Check current plane state versus the new one to determine whether
10475  * watermarks need to be recalculated.
10476  *
10477  * Returns true or false.
10478  */
10479 static bool intel_wm_need_update(struct drm_plane *plane,
10480                                  struct drm_plane_state *state)
10481 {
10482         struct intel_plane_state *new = to_intel_plane_state(state);
10483         struct intel_plane_state *cur = to_intel_plane_state(plane->state);
10484
10485         /* Update watermarks on tiling or size changes. */
10486         if (new->base.visible != cur->base.visible)
10487                 return true;
10488
10489         if (!cur->base.fb || !new->base.fb)
10490                 return false;
10491
10492         if (cur->base.fb->modifier != new->base.fb->modifier ||
10493             cur->base.rotation != new->base.rotation ||
10494             drm_rect_width(&new->base.src) != drm_rect_width(&cur->base.src) ||
10495             drm_rect_height(&new->base.src) != drm_rect_height(&cur->base.src) ||
10496             drm_rect_width(&new->base.dst) != drm_rect_width(&cur->base.dst) ||
10497             drm_rect_height(&new->base.dst) != drm_rect_height(&cur->base.dst))
10498                 return true;
10499
10500         return false;
10501 }
10502
10503 static bool needs_scaling(const struct intel_plane_state *state)
10504 {
10505         int src_w = drm_rect_width(&state->base.src) >> 16;
10506         int src_h = drm_rect_height(&state->base.src) >> 16;
10507         int dst_w = drm_rect_width(&state->base.dst);
10508         int dst_h = drm_rect_height(&state->base.dst);
10509
10510         return (src_w != dst_w || src_h != dst_h);
10511 }
10512
10513 int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
10514                                     struct drm_crtc_state *crtc_state,
10515                                     const struct intel_plane_state *old_plane_state,
10516                                     struct drm_plane_state *plane_state)
10517 {
10518         struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc_state);
10519         struct drm_crtc *crtc = crtc_state->crtc;
10520         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10521         struct intel_plane *plane = to_intel_plane(plane_state->plane);
10522         struct drm_device *dev = crtc->dev;
10523         struct drm_i915_private *dev_priv = to_i915(dev);
10524         bool mode_changed = needs_modeset(crtc_state);
10525         bool was_crtc_enabled = old_crtc_state->base.active;
10526         bool is_crtc_enabled = crtc_state->active;
10527         bool turn_off, turn_on, visible, was_visible;
10528         struct drm_framebuffer *fb = plane_state->fb;
10529         int ret;
10530
10531         if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
10532                 ret = skl_update_scaler_plane(
10533                         to_intel_crtc_state(crtc_state),
10534                         to_intel_plane_state(plane_state));
10535                 if (ret)
10536                         return ret;
10537         }
10538
10539         was_visible = old_plane_state->base.visible;
10540         visible = plane_state->visible;
10541
10542         if (!was_crtc_enabled && WARN_ON(was_visible))
10543                 was_visible = false;
10544
10545         /*
10546          * Visibility is calculated as if the crtc was on, but
10547          * after scaler setup everything depends on it being off
10548          * when the crtc isn't active.
10549          *
10550          * FIXME this is wrong for watermarks. Watermarks should also
10551          * be computed as if the pipe would be active. Perhaps move
10552          * per-plane wm computation to the .check_plane() hook, and
10553          * only combine the results from all planes in the current place?
10554          */
10555         if (!is_crtc_enabled) {
10556                 plane_state->visible = visible = false;
10557                 to_intel_crtc_state(crtc_state)->active_planes &= ~BIT(plane->id);
10558         }
10559
10560         if (!was_visible && !visible)
10561                 return 0;
10562
10563         if (fb != old_plane_state->base.fb)
10564                 pipe_config->fb_changed = true;
10565
10566         turn_off = was_visible && (!visible || mode_changed);
10567         turn_on = visible && (!was_visible || mode_changed);
10568
10569         DRM_DEBUG_ATOMIC("[CRTC:%d:%s] has [PLANE:%d:%s] with fb %i\n",
10570                          intel_crtc->base.base.id, intel_crtc->base.name,
10571                          plane->base.base.id, plane->base.name,
10572                          fb ? fb->base.id : -1);
10573
10574         DRM_DEBUG_ATOMIC("[PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
10575                          plane->base.base.id, plane->base.name,
10576                          was_visible, visible,
10577                          turn_off, turn_on, mode_changed);
10578
10579         if (turn_on) {
10580                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
10581                         pipe_config->update_wm_pre = true;
10582
10583                 /* must disable cxsr around plane enable/disable */
10584                 if (plane->id != PLANE_CURSOR)
10585                         pipe_config->disable_cxsr = true;
10586         } else if (turn_off) {
10587                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
10588                         pipe_config->update_wm_post = true;
10589
10590                 /* must disable cxsr around plane enable/disable */
10591                 if (plane->id != PLANE_CURSOR)
10592                         pipe_config->disable_cxsr = true;
10593         } else if (intel_wm_need_update(&plane->base, plane_state)) {
10594                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
10595                         /* FIXME bollocks */
10596                         pipe_config->update_wm_pre = true;
10597                         pipe_config->update_wm_post = true;
10598                 }
10599         }
10600
10601         if (visible || was_visible)
10602                 pipe_config->fb_bits |= plane->frontbuffer_bit;
10603
10604         /*
10605          * WaCxSRDisabledForSpriteScaling:ivb
10606          *
10607          * cstate->update_wm was already set above, so this flag will
10608          * take effect when we commit and program watermarks.
10609          */
10610         if (plane->id == PLANE_SPRITE0 && IS_IVYBRIDGE(dev_priv) &&
10611             needs_scaling(to_intel_plane_state(plane_state)) &&
10612             !needs_scaling(old_plane_state))
10613                 pipe_config->disable_lp_wm = true;
10614
10615         return 0;
10616 }
10617
10618 static bool encoders_cloneable(const struct intel_encoder *a,
10619                                const struct intel_encoder *b)
10620 {
10621         /* masks could be asymmetric, so check both ways */
10622         return a == b || (a->cloneable & (1 << b->type) &&
10623                           b->cloneable & (1 << a->type));
10624 }
10625
10626 static bool check_single_encoder_cloning(struct drm_atomic_state *state,
10627                                          struct intel_crtc *crtc,
10628                                          struct intel_encoder *encoder)
10629 {
10630         struct intel_encoder *source_encoder;
10631         struct drm_connector *connector;
10632         struct drm_connector_state *connector_state;
10633         int i;
10634
10635         for_each_new_connector_in_state(state, connector, connector_state, i) {
10636                 if (connector_state->crtc != &crtc->base)
10637                         continue;
10638
10639                 source_encoder =
10640                         to_intel_encoder(connector_state->best_encoder);
10641                 if (!encoders_cloneable(encoder, source_encoder))
10642                         return false;
10643         }
10644
10645         return true;
10646 }
10647
10648 static int intel_crtc_atomic_check(struct drm_crtc *crtc,
10649                                    struct drm_crtc_state *crtc_state)
10650 {
10651         struct drm_device *dev = crtc->dev;
10652         struct drm_i915_private *dev_priv = to_i915(dev);
10653         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10654         struct intel_crtc_state *pipe_config =
10655                 to_intel_crtc_state(crtc_state);
10656         struct drm_atomic_state *state = crtc_state->state;
10657         int ret;
10658         bool mode_changed = needs_modeset(crtc_state);
10659
10660         if (mode_changed && !crtc_state->active)
10661                 pipe_config->update_wm_post = true;
10662
10663         if (mode_changed && crtc_state->enable &&
10664             dev_priv->display.crtc_compute_clock &&
10665             !WARN_ON(pipe_config->shared_dpll)) {
10666                 ret = dev_priv->display.crtc_compute_clock(intel_crtc,
10667                                                            pipe_config);
10668                 if (ret)
10669                         return ret;
10670         }
10671
10672         if (crtc_state->color_mgmt_changed) {
10673                 ret = intel_color_check(crtc, crtc_state);
10674                 if (ret)
10675                         return ret;
10676
10677                 /*
10678                  * Changing color management on Intel hardware is
10679                  * handled as part of planes update.
10680                  */
10681                 crtc_state->planes_changed = true;
10682         }
10683
10684         ret = 0;
10685         if (dev_priv->display.compute_pipe_wm) {
10686                 ret = dev_priv->display.compute_pipe_wm(pipe_config);
10687                 if (ret) {
10688                         DRM_DEBUG_KMS("Target pipe watermarks are invalid\n");
10689                         return ret;
10690                 }
10691         }
10692
10693         if (dev_priv->display.compute_intermediate_wm &&
10694             !to_intel_atomic_state(state)->skip_intermediate_wm) {
10695                 if (WARN_ON(!dev_priv->display.compute_pipe_wm))
10696                         return 0;
10697
10698                 /*
10699                  * Calculate 'intermediate' watermarks that satisfy both the
10700                  * old state and the new state.  We can program these
10701                  * immediately.
10702                  */
10703                 ret = dev_priv->display.compute_intermediate_wm(dev,
10704                                                                 intel_crtc,
10705                                                                 pipe_config);
10706                 if (ret) {
10707                         DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
10708                         return ret;
10709                 }
10710         } else if (dev_priv->display.compute_intermediate_wm) {
10711                 if (HAS_PCH_SPLIT(dev_priv) && INTEL_GEN(dev_priv) < 9)
10712                         pipe_config->wm.ilk.intermediate = pipe_config->wm.ilk.optimal;
10713         }
10714
10715         if (INTEL_GEN(dev_priv) >= 9) {
10716                 if (mode_changed)
10717                         ret = skl_update_scaler_crtc(pipe_config);
10718
10719                 if (!ret)
10720                         ret = skl_check_pipe_max_pixel_rate(intel_crtc,
10721                                                             pipe_config);
10722                 if (!ret)
10723                         ret = intel_atomic_setup_scalers(dev_priv, intel_crtc,
10724                                                          pipe_config);
10725         }
10726
10727         if (HAS_IPS(dev_priv))
10728                 pipe_config->ips_enabled = hsw_compute_ips_config(pipe_config);
10729
10730         return ret;
10731 }
10732
10733 static const struct drm_crtc_helper_funcs intel_helper_funcs = {
10734         .atomic_begin = intel_begin_crtc_commit,
10735         .atomic_flush = intel_finish_crtc_commit,
10736         .atomic_check = intel_crtc_atomic_check,
10737 };
10738
10739 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
10740 {
10741         struct intel_connector *connector;
10742         struct drm_connector_list_iter conn_iter;
10743
10744         drm_connector_list_iter_begin(dev, &conn_iter);
10745         for_each_intel_connector_iter(connector, &conn_iter) {
10746                 if (connector->base.state->crtc)
10747                         drm_connector_put(&connector->base);
10748
10749                 if (connector->base.encoder) {
10750                         connector->base.state->best_encoder =
10751                                 connector->base.encoder;
10752                         connector->base.state->crtc =
10753                                 connector->base.encoder->crtc;
10754
10755                         drm_connector_get(&connector->base);
10756                 } else {
10757                         connector->base.state->best_encoder = NULL;
10758                         connector->base.state->crtc = NULL;
10759                 }
10760         }
10761         drm_connector_list_iter_end(&conn_iter);
10762 }
10763
10764 static void
10765 connected_sink_compute_bpp(struct intel_connector *connector,
10766                            struct intel_crtc_state *pipe_config)
10767 {
10768         const struct drm_display_info *info = &connector->base.display_info;
10769         int bpp = pipe_config->pipe_bpp;
10770
10771         DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n",
10772                       connector->base.base.id,
10773                       connector->base.name);
10774
10775         /* Don't use an invalid EDID bpc value */
10776         if (info->bpc != 0 && info->bpc * 3 < bpp) {
10777                 DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n",
10778                               bpp, info->bpc * 3);
10779                 pipe_config->pipe_bpp = info->bpc * 3;
10780         }
10781
10782         /* Clamp bpp to 8 on screens without EDID 1.4 */
10783         if (info->bpc == 0 && bpp > 24) {
10784                 DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
10785                               bpp);
10786                 pipe_config->pipe_bpp = 24;
10787         }
10788 }
10789
10790 static int
10791 compute_baseline_pipe_bpp(struct intel_crtc *crtc,
10792                           struct intel_crtc_state *pipe_config)
10793 {
10794         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10795         struct drm_atomic_state *state;
10796         struct drm_connector *connector;
10797         struct drm_connector_state *connector_state;
10798         int bpp, i;
10799
10800         if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
10801             IS_CHERRYVIEW(dev_priv)))
10802                 bpp = 10*3;
10803         else if (INTEL_GEN(dev_priv) >= 5)
10804                 bpp = 12*3;
10805         else
10806                 bpp = 8*3;
10807
10808
10809         pipe_config->pipe_bpp = bpp;
10810
10811         state = pipe_config->base.state;
10812
10813         /* Clamp display bpp to EDID value */
10814         for_each_new_connector_in_state(state, connector, connector_state, i) {
10815                 if (connector_state->crtc != &crtc->base)
10816                         continue;
10817
10818                 connected_sink_compute_bpp(to_intel_connector(connector),
10819                                            pipe_config);
10820         }
10821
10822         return bpp;
10823 }
10824
10825 static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
10826 {
10827         DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
10828                         "type: 0x%x flags: 0x%x\n",
10829                 mode->crtc_clock,
10830                 mode->crtc_hdisplay, mode->crtc_hsync_start,
10831                 mode->crtc_hsync_end, mode->crtc_htotal,
10832                 mode->crtc_vdisplay, mode->crtc_vsync_start,
10833                 mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags);
10834 }
10835
10836 static inline void
10837 intel_dump_m_n_config(struct intel_crtc_state *pipe_config, char *id,
10838                       unsigned int lane_count, struct intel_link_m_n *m_n)
10839 {
10840         DRM_DEBUG_KMS("%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
10841                       id, lane_count,
10842                       m_n->gmch_m, m_n->gmch_n,
10843                       m_n->link_m, m_n->link_n, m_n->tu);
10844 }
10845
10846 #define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
10847
10848 static const char * const output_type_str[] = {
10849         OUTPUT_TYPE(UNUSED),
10850         OUTPUT_TYPE(ANALOG),
10851         OUTPUT_TYPE(DVO),
10852         OUTPUT_TYPE(SDVO),
10853         OUTPUT_TYPE(LVDS),
10854         OUTPUT_TYPE(TVOUT),
10855         OUTPUT_TYPE(HDMI),
10856         OUTPUT_TYPE(DP),
10857         OUTPUT_TYPE(EDP),
10858         OUTPUT_TYPE(DSI),
10859         OUTPUT_TYPE(DDI),
10860         OUTPUT_TYPE(DP_MST),
10861 };
10862
10863 #undef OUTPUT_TYPE
10864
10865 static void snprintf_output_types(char *buf, size_t len,
10866                                   unsigned int output_types)
10867 {
10868         char *str = buf;
10869         int i;
10870
10871         str[0] = '\0';
10872
10873         for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
10874                 int r;
10875
10876                 if ((output_types & BIT(i)) == 0)
10877                         continue;
10878
10879                 r = snprintf(str, len, "%s%s",
10880                              str != buf ? "," : "", output_type_str[i]);
10881                 if (r >= len)
10882                         break;
10883                 str += r;
10884                 len -= r;
10885
10886                 output_types &= ~BIT(i);
10887         }
10888
10889         WARN_ON_ONCE(output_types != 0);
10890 }
10891
10892 static void intel_dump_pipe_config(struct intel_crtc *crtc,
10893                                    struct intel_crtc_state *pipe_config,
10894                                    const char *context)
10895 {
10896         struct drm_device *dev = crtc->base.dev;
10897         struct drm_i915_private *dev_priv = to_i915(dev);
10898         struct drm_plane *plane;
10899         struct intel_plane *intel_plane;
10900         struct intel_plane_state *state;
10901         struct drm_framebuffer *fb;
10902         char buf[64];
10903
10904         DRM_DEBUG_KMS("[CRTC:%d:%s]%s\n",
10905                       crtc->base.base.id, crtc->base.name, context);
10906
10907         snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
10908         DRM_DEBUG_KMS("output_types: %s (0x%x)\n",
10909                       buf, pipe_config->output_types);
10910
10911         DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
10912                       transcoder_name(pipe_config->cpu_transcoder),
10913                       pipe_config->pipe_bpp, pipe_config->dither);
10914
10915         if (pipe_config->has_pch_encoder)
10916                 intel_dump_m_n_config(pipe_config, "fdi",
10917                                       pipe_config->fdi_lanes,
10918                                       &pipe_config->fdi_m_n);
10919
10920         if (pipe_config->ycbcr420)
10921                 DRM_DEBUG_KMS("YCbCr 4:2:0 output enabled\n");
10922
10923         if (intel_crtc_has_dp_encoder(pipe_config)) {
10924                 intel_dump_m_n_config(pipe_config, "dp m_n",
10925                                 pipe_config->lane_count, &pipe_config->dp_m_n);
10926                 if (pipe_config->has_drrs)
10927                         intel_dump_m_n_config(pipe_config, "dp m2_n2",
10928                                               pipe_config->lane_count,
10929                                               &pipe_config->dp_m2_n2);
10930         }
10931
10932         DRM_DEBUG_KMS("audio: %i, infoframes: %i\n",
10933                       pipe_config->has_audio, pipe_config->has_infoframe);
10934
10935         DRM_DEBUG_KMS("requested mode:\n");
10936         drm_mode_debug_printmodeline(&pipe_config->base.mode);
10937         DRM_DEBUG_KMS("adjusted mode:\n");
10938         drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode);
10939         intel_dump_crtc_timings(&pipe_config->base.adjusted_mode);
10940         DRM_DEBUG_KMS("port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
10941                       pipe_config->port_clock,
10942                       pipe_config->pipe_src_w, pipe_config->pipe_src_h,
10943                       pipe_config->pixel_rate);
10944
10945         if (INTEL_GEN(dev_priv) >= 9)
10946                 DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
10947                               crtc->num_scalers,
10948                               pipe_config->scaler_state.scaler_users,
10949                               pipe_config->scaler_state.scaler_id);
10950
10951         if (HAS_GMCH_DISPLAY(dev_priv))
10952                 DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
10953                               pipe_config->gmch_pfit.control,
10954                               pipe_config->gmch_pfit.pgm_ratios,
10955                               pipe_config->gmch_pfit.lvds_border_bits);
10956         else
10957                 DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
10958                               pipe_config->pch_pfit.pos,
10959                               pipe_config->pch_pfit.size,
10960                               enableddisabled(pipe_config->pch_pfit.enabled));
10961
10962         DRM_DEBUG_KMS("ips: %i, double wide: %i\n",
10963                       pipe_config->ips_enabled, pipe_config->double_wide);
10964
10965         intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
10966
10967         DRM_DEBUG_KMS("planes on this crtc\n");
10968         list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
10969                 struct drm_format_name_buf format_name;
10970                 intel_plane = to_intel_plane(plane);
10971                 if (intel_plane->pipe != crtc->pipe)
10972                         continue;
10973
10974                 state = to_intel_plane_state(plane->state);
10975                 fb = state->base.fb;
10976                 if (!fb) {
10977                         DRM_DEBUG_KMS("[PLANE:%d:%s] disabled, scaler_id = %d\n",
10978                                       plane->base.id, plane->name, state->scaler_id);
10979                         continue;
10980                 }
10981
10982                 DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d, fb = %ux%u format = %s\n",
10983                               plane->base.id, plane->name,
10984                               fb->base.id, fb->width, fb->height,
10985                               drm_get_format_name(fb->format->format, &format_name));
10986                 if (INTEL_GEN(dev_priv) >= 9)
10987                         DRM_DEBUG_KMS("\tscaler:%d src %dx%d+%d+%d dst %dx%d+%d+%d\n",
10988                                       state->scaler_id,
10989                                       state->base.src.x1 >> 16,
10990                                       state->base.src.y1 >> 16,
10991                                       drm_rect_width(&state->base.src) >> 16,
10992                                       drm_rect_height(&state->base.src) >> 16,
10993                                       state->base.dst.x1, state->base.dst.y1,
10994                                       drm_rect_width(&state->base.dst),
10995                                       drm_rect_height(&state->base.dst));
10996         }
10997 }
10998
10999 static bool check_digital_port_conflicts(struct drm_atomic_state *state)
11000 {
11001         struct drm_device *dev = state->dev;
11002         struct drm_connector *connector;
11003         struct drm_connector_list_iter conn_iter;
11004         unsigned int used_ports = 0;
11005         unsigned int used_mst_ports = 0;
11006         bool ret = true;
11007
11008         /*
11009          * Walk the connector list instead of the encoder
11010          * list to detect the problem on ddi platforms
11011          * where there's just one encoder per digital port.
11012          */
11013         drm_connector_list_iter_begin(dev, &conn_iter);
11014         drm_for_each_connector_iter(connector, &conn_iter) {
11015                 struct drm_connector_state *connector_state;
11016                 struct intel_encoder *encoder;
11017
11018                 connector_state = drm_atomic_get_new_connector_state(state, connector);
11019                 if (!connector_state)
11020                         connector_state = connector->state;
11021
11022                 if (!connector_state->best_encoder)
11023                         continue;
11024
11025                 encoder = to_intel_encoder(connector_state->best_encoder);
11026
11027                 WARN_ON(!connector_state->crtc);
11028
11029                 switch (encoder->type) {
11030                         unsigned int port_mask;
11031                 case INTEL_OUTPUT_DDI:
11032                         if (WARN_ON(!HAS_DDI(to_i915(dev))))
11033                                 break;
11034                         /* else: fall through */
11035                 case INTEL_OUTPUT_DP:
11036                 case INTEL_OUTPUT_HDMI:
11037                 case INTEL_OUTPUT_EDP:
11038                         port_mask = 1 << encoder->port;
11039
11040                         /* the same port mustn't appear more than once */
11041                         if (used_ports & port_mask)
11042                                 ret = false;
11043
11044                         used_ports |= port_mask;
11045                         break;
11046                 case INTEL_OUTPUT_DP_MST:
11047                         used_mst_ports |=
11048                                 1 << encoder->port;
11049                         break;
11050                 default:
11051                         break;
11052                 }
11053         }
11054         drm_connector_list_iter_end(&conn_iter);
11055
11056         /* can't mix MST and SST/HDMI on the same port */
11057         if (used_ports & used_mst_ports)
11058                 return false;
11059
11060         return ret;
11061 }
11062
11063 static void
11064 clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
11065 {
11066         struct drm_i915_private *dev_priv =
11067                 to_i915(crtc_state->base.crtc->dev);
11068         struct intel_crtc_scaler_state scaler_state;
11069         struct intel_dpll_hw_state dpll_hw_state;
11070         struct intel_shared_dpll *shared_dpll;
11071         struct intel_crtc_wm_state wm_state;
11072         bool force_thru, ips_force_disable;
11073
11074         /* FIXME: before the switch to atomic started, a new pipe_config was
11075          * kzalloc'd. Code that depends on any field being zero should be
11076          * fixed, so that the crtc_state can be safely duplicated. For now,
11077          * only fields that are know to not cause problems are preserved. */
11078
11079         scaler_state = crtc_state->scaler_state;
11080         shared_dpll = crtc_state->shared_dpll;
11081         dpll_hw_state = crtc_state->dpll_hw_state;
11082         force_thru = crtc_state->pch_pfit.force_thru;
11083         ips_force_disable = crtc_state->ips_force_disable;
11084         if (IS_G4X(dev_priv) ||
11085             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
11086                 wm_state = crtc_state->wm;
11087
11088         /* Keep base drm_crtc_state intact, only clear our extended struct */
11089         BUILD_BUG_ON(offsetof(struct intel_crtc_state, base));
11090         memset(&crtc_state->base + 1, 0,
11091                sizeof(*crtc_state) - sizeof(crtc_state->base));
11092
11093         crtc_state->scaler_state = scaler_state;
11094         crtc_state->shared_dpll = shared_dpll;
11095         crtc_state->dpll_hw_state = dpll_hw_state;
11096         crtc_state->pch_pfit.force_thru = force_thru;
11097         crtc_state->ips_force_disable = ips_force_disable;
11098         if (IS_G4X(dev_priv) ||
11099             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
11100                 crtc_state->wm = wm_state;
11101 }
11102
11103 static int
11104 intel_modeset_pipe_config(struct drm_crtc *crtc,
11105                           struct intel_crtc_state *pipe_config)
11106 {
11107         struct drm_atomic_state *state = pipe_config->base.state;
11108         struct intel_encoder *encoder;
11109         struct drm_connector *connector;
11110         struct drm_connector_state *connector_state;
11111         int base_bpp, ret = -EINVAL;
11112         int i;
11113         bool retry = true;
11114
11115         clear_intel_crtc_state(pipe_config);
11116
11117         pipe_config->cpu_transcoder =
11118                 (enum transcoder) to_intel_crtc(crtc)->pipe;
11119
11120         /*
11121          * Sanitize sync polarity flags based on requested ones. If neither
11122          * positive or negative polarity is requested, treat this as meaning
11123          * negative polarity.
11124          */
11125         if (!(pipe_config->base.adjusted_mode.flags &
11126               (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
11127                 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
11128
11129         if (!(pipe_config->base.adjusted_mode.flags &
11130               (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
11131                 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
11132
11133         base_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
11134                                              pipe_config);
11135         if (base_bpp < 0)
11136                 goto fail;
11137
11138         /*
11139          * Determine the real pipe dimensions. Note that stereo modes can
11140          * increase the actual pipe size due to the frame doubling and
11141          * insertion of additional space for blanks between the frame. This
11142          * is stored in the crtc timings. We use the requested mode to do this
11143          * computation to clearly distinguish it from the adjusted mode, which
11144          * can be changed by the connectors in the below retry loop.
11145          */
11146         drm_mode_get_hv_timing(&pipe_config->base.mode,
11147                                &pipe_config->pipe_src_w,
11148                                &pipe_config->pipe_src_h);
11149
11150         for_each_new_connector_in_state(state, connector, connector_state, i) {
11151                 if (connector_state->crtc != crtc)
11152                         continue;
11153
11154                 encoder = to_intel_encoder(connector_state->best_encoder);
11155
11156                 if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
11157                         DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
11158                         goto fail;
11159                 }
11160
11161                 /*
11162                  * Determine output_types before calling the .compute_config()
11163                  * hooks so that the hooks can use this information safely.
11164                  */
11165                 if (encoder->compute_output_type)
11166                         pipe_config->output_types |=
11167                                 BIT(encoder->compute_output_type(encoder, pipe_config,
11168                                                                  connector_state));
11169                 else
11170                         pipe_config->output_types |= BIT(encoder->type);
11171         }
11172
11173 encoder_retry:
11174         /* Ensure the port clock defaults are reset when retrying. */
11175         pipe_config->port_clock = 0;
11176         pipe_config->pixel_multiplier = 1;
11177
11178         /* Fill in default crtc timings, allow encoders to overwrite them. */
11179         drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode,
11180                               CRTC_STEREO_DOUBLE);
11181
11182         /* Pass our mode to the connectors and the CRTC to give them a chance to
11183          * adjust it according to limitations or connector properties, and also
11184          * a chance to reject the mode entirely.
11185          */
11186         for_each_new_connector_in_state(state, connector, connector_state, i) {
11187                 if (connector_state->crtc != crtc)
11188                         continue;
11189
11190                 encoder = to_intel_encoder(connector_state->best_encoder);
11191
11192                 if (!(encoder->compute_config(encoder, pipe_config, connector_state))) {
11193                         DRM_DEBUG_KMS("Encoder config failure\n");
11194                         goto fail;
11195                 }
11196         }
11197
11198         /* Set default port clock if not overwritten by the encoder. Needs to be
11199          * done afterwards in case the encoder adjusts the mode. */
11200         if (!pipe_config->port_clock)
11201                 pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock
11202                         * pipe_config->pixel_multiplier;
11203
11204         ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
11205         if (ret < 0) {
11206                 DRM_DEBUG_KMS("CRTC fixup failed\n");
11207                 goto fail;
11208         }
11209
11210         if (ret == RETRY) {
11211                 if (WARN(!retry, "loop in pipe configuration computation\n")) {
11212                         ret = -EINVAL;
11213                         goto fail;
11214                 }
11215
11216                 DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
11217                 retry = false;
11218                 goto encoder_retry;
11219         }
11220
11221         /* Dithering seems to not pass-through bits correctly when it should, so
11222          * only enable it on 6bpc panels and when its not a compliance
11223          * test requesting 6bpc video pattern.
11224          */
11225         pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
11226                 !pipe_config->dither_force_disable;
11227         DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
11228                       base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
11229
11230 fail:
11231         return ret;
11232 }
11233
11234 static bool intel_fuzzy_clock_check(int clock1, int clock2)
11235 {
11236         int diff;
11237
11238         if (clock1 == clock2)
11239                 return true;
11240
11241         if (!clock1 || !clock2)
11242                 return false;
11243
11244         diff = abs(clock1 - clock2);
11245
11246         if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
11247                 return true;
11248
11249         return false;
11250 }
11251
11252 static bool
11253 intel_compare_m_n(unsigned int m, unsigned int n,
11254                   unsigned int m2, unsigned int n2,
11255                   bool exact)
11256 {
11257         if (m == m2 && n == n2)
11258                 return true;
11259
11260         if (exact || !m || !n || !m2 || !n2)
11261                 return false;
11262
11263         BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
11264
11265         if (n > n2) {
11266                 while (n > n2) {
11267                         m2 <<= 1;
11268                         n2 <<= 1;
11269                 }
11270         } else if (n < n2) {
11271                 while (n < n2) {
11272                         m <<= 1;
11273                         n <<= 1;
11274                 }
11275         }
11276
11277         if (n != n2)
11278                 return false;
11279
11280         return intel_fuzzy_clock_check(m, m2);
11281 }
11282
11283 static bool
11284 intel_compare_link_m_n(const struct intel_link_m_n *m_n,
11285                        struct intel_link_m_n *m2_n2,
11286                        bool adjust)
11287 {
11288         if (m_n->tu == m2_n2->tu &&
11289             intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
11290                               m2_n2->gmch_m, m2_n2->gmch_n, !adjust) &&
11291             intel_compare_m_n(m_n->link_m, m_n->link_n,
11292                               m2_n2->link_m, m2_n2->link_n, !adjust)) {
11293                 if (adjust)
11294                         *m2_n2 = *m_n;
11295
11296                 return true;
11297         }
11298
11299         return false;
11300 }
11301
11302 static void __printf(3, 4)
11303 pipe_config_err(bool adjust, const char *name, const char *format, ...)
11304 {
11305         struct va_format vaf;
11306         va_list args;
11307
11308         va_start(args, format);
11309         vaf.fmt = format;
11310         vaf.va = &args;
11311
11312         if (adjust)
11313                 drm_dbg(DRM_UT_KMS, "mismatch in %s %pV", name, &vaf);
11314         else
11315                 drm_err("mismatch in %s %pV", name, &vaf);
11316
11317         va_end(args);
11318 }
11319
11320 static bool
11321 intel_pipe_config_compare(struct drm_i915_private *dev_priv,
11322                           struct intel_crtc_state *current_config,
11323                           struct intel_crtc_state *pipe_config,
11324                           bool adjust)
11325 {
11326         bool ret = true;
11327         bool fixup_inherited = adjust &&
11328                 (current_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED) &&
11329                 !(pipe_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED);
11330
11331 #define PIPE_CONF_CHECK_X(name) do { \
11332         if (current_config->name != pipe_config->name) { \
11333                 pipe_config_err(adjust, __stringify(name), \
11334                           "(expected 0x%08x, found 0x%08x)\n", \
11335                           current_config->name, \
11336                           pipe_config->name); \
11337                 ret = false; \
11338         } \
11339 } while (0)
11340
11341 #define PIPE_CONF_CHECK_I(name) do { \
11342         if (current_config->name != pipe_config->name) { \
11343                 pipe_config_err(adjust, __stringify(name), \
11344                           "(expected %i, found %i)\n", \
11345                           current_config->name, \
11346                           pipe_config->name); \
11347                 ret = false; \
11348         } \
11349 } while (0)
11350
11351 #define PIPE_CONF_CHECK_BOOL(name) do { \
11352         if (current_config->name != pipe_config->name) { \
11353                 pipe_config_err(adjust, __stringify(name), \
11354                           "(expected %s, found %s)\n", \
11355                           yesno(current_config->name), \
11356                           yesno(pipe_config->name)); \
11357                 ret = false; \
11358         } \
11359 } while (0)
11360
11361 /*
11362  * Checks state where we only read out the enabling, but not the entire
11363  * state itself (like full infoframes or ELD for audio). These states
11364  * require a full modeset on bootup to fix up.
11365  */
11366 #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
11367         if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
11368                 PIPE_CONF_CHECK_BOOL(name); \
11369         } else { \
11370                 pipe_config_err(adjust, __stringify(name), \
11371                           "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)\n", \
11372                           yesno(current_config->name), \
11373                           yesno(pipe_config->name)); \
11374                 ret = false; \
11375         } \
11376 } while (0)
11377
11378 #define PIPE_CONF_CHECK_P(name) do { \
11379         if (current_config->name != pipe_config->name) { \
11380                 pipe_config_err(adjust, __stringify(name), \
11381                           "(expected %p, found %p)\n", \
11382                           current_config->name, \
11383                           pipe_config->name); \
11384                 ret = false; \
11385         } \
11386 } while (0)
11387
11388 #define PIPE_CONF_CHECK_M_N(name) do { \
11389         if (!intel_compare_link_m_n(&current_config->name, \
11390                                     &pipe_config->name,\
11391                                     adjust)) { \
11392                 pipe_config_err(adjust, __stringify(name), \
11393                           "(expected tu %i gmch %i/%i link %i/%i, " \
11394                           "found tu %i, gmch %i/%i link %i/%i)\n", \
11395                           current_config->name.tu, \
11396                           current_config->name.gmch_m, \
11397                           current_config->name.gmch_n, \
11398                           current_config->name.link_m, \
11399                           current_config->name.link_n, \
11400                           pipe_config->name.tu, \
11401                           pipe_config->name.gmch_m, \
11402                           pipe_config->name.gmch_n, \
11403                           pipe_config->name.link_m, \
11404                           pipe_config->name.link_n); \
11405                 ret = false; \
11406         } \
11407 } while (0)
11408
11409 /* This is required for BDW+ where there is only one set of registers for
11410  * switching between high and low RR.
11411  * This macro can be used whenever a comparison has to be made between one
11412  * hw state and multiple sw state variables.
11413  */
11414 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
11415         if (!intel_compare_link_m_n(&current_config->name, \
11416                                     &pipe_config->name, adjust) && \
11417             !intel_compare_link_m_n(&current_config->alt_name, \
11418                                     &pipe_config->name, adjust)) { \
11419                 pipe_config_err(adjust, __stringify(name), \
11420                           "(expected tu %i gmch %i/%i link %i/%i, " \
11421                           "or tu %i gmch %i/%i link %i/%i, " \
11422                           "found tu %i, gmch %i/%i link %i/%i)\n", \
11423                           current_config->name.tu, \
11424                           current_config->name.gmch_m, \
11425                           current_config->name.gmch_n, \
11426                           current_config->name.link_m, \
11427                           current_config->name.link_n, \
11428                           current_config->alt_name.tu, \
11429                           current_config->alt_name.gmch_m, \
11430                           current_config->alt_name.gmch_n, \
11431                           current_config->alt_name.link_m, \
11432                           current_config->alt_name.link_n, \
11433                           pipe_config->name.tu, \
11434                           pipe_config->name.gmch_m, \
11435                           pipe_config->name.gmch_n, \
11436                           pipe_config->name.link_m, \
11437                           pipe_config->name.link_n); \
11438                 ret = false; \
11439         } \
11440 } while (0)
11441
11442 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
11443         if ((current_config->name ^ pipe_config->name) & (mask)) { \
11444                 pipe_config_err(adjust, __stringify(name), \
11445                           "(%x) (expected %i, found %i)\n", \
11446                           (mask), \
11447                           current_config->name & (mask), \
11448                           pipe_config->name & (mask)); \
11449                 ret = false; \
11450         } \
11451 } while (0)
11452
11453 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
11454         if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
11455                 pipe_config_err(adjust, __stringify(name), \
11456                           "(expected %i, found %i)\n", \
11457                           current_config->name, \
11458                           pipe_config->name); \
11459                 ret = false; \
11460         } \
11461 } while (0)
11462
11463 #define PIPE_CONF_QUIRK(quirk)  \
11464         ((current_config->quirks | pipe_config->quirks) & (quirk))
11465
11466         PIPE_CONF_CHECK_I(cpu_transcoder);
11467
11468         PIPE_CONF_CHECK_BOOL(has_pch_encoder);
11469         PIPE_CONF_CHECK_I(fdi_lanes);
11470         PIPE_CONF_CHECK_M_N(fdi_m_n);
11471
11472         PIPE_CONF_CHECK_I(lane_count);
11473         PIPE_CONF_CHECK_X(lane_lat_optim_mask);
11474
11475         if (INTEL_GEN(dev_priv) < 8) {
11476                 PIPE_CONF_CHECK_M_N(dp_m_n);
11477
11478                 if (current_config->has_drrs)
11479                         PIPE_CONF_CHECK_M_N(dp_m2_n2);
11480         } else
11481                 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
11482
11483         PIPE_CONF_CHECK_X(output_types);
11484
11485         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
11486         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
11487         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start);
11488         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end);
11489         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start);
11490         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end);
11491
11492         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay);
11493         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal);
11494         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start);
11495         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end);
11496         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start);
11497         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end);
11498
11499         PIPE_CONF_CHECK_I(pixel_multiplier);
11500         PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
11501         if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
11502             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
11503                 PIPE_CONF_CHECK_BOOL(limited_color_range);
11504
11505         PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
11506         PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
11507         PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_infoframe);
11508         PIPE_CONF_CHECK_BOOL(ycbcr420);
11509
11510         PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
11511
11512         PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
11513                               DRM_MODE_FLAG_INTERLACE);
11514
11515         if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
11516                 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
11517                                       DRM_MODE_FLAG_PHSYNC);
11518                 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
11519                                       DRM_MODE_FLAG_NHSYNC);
11520                 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
11521                                       DRM_MODE_FLAG_PVSYNC);
11522                 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
11523                                       DRM_MODE_FLAG_NVSYNC);
11524         }
11525
11526         PIPE_CONF_CHECK_X(gmch_pfit.control);
11527         /* pfit ratios are autocomputed by the hw on gen4+ */
11528         if (INTEL_GEN(dev_priv) < 4)
11529                 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
11530         PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
11531
11532         if (!adjust) {
11533                 PIPE_CONF_CHECK_I(pipe_src_w);
11534                 PIPE_CONF_CHECK_I(pipe_src_h);
11535
11536                 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
11537                 if (current_config->pch_pfit.enabled) {
11538                         PIPE_CONF_CHECK_X(pch_pfit.pos);
11539                         PIPE_CONF_CHECK_X(pch_pfit.size);
11540                 }
11541
11542                 PIPE_CONF_CHECK_I(scaler_state.scaler_id);
11543                 PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
11544         }
11545
11546         PIPE_CONF_CHECK_BOOL(double_wide);
11547
11548         PIPE_CONF_CHECK_P(shared_dpll);
11549         PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
11550         PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
11551         PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
11552         PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
11553         PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
11554         PIPE_CONF_CHECK_X(dpll_hw_state.spll);
11555         PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
11556         PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
11557         PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
11558         PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
11559         PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
11560         PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
11561         PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
11562         PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
11563         PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
11564         PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
11565         PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
11566         PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
11567         PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
11568         PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
11569         PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
11570         PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
11571         PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
11572         PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
11573         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
11574         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
11575         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
11576         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
11577         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
11578         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
11579         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
11580
11581         PIPE_CONF_CHECK_X(dsi_pll.ctrl);
11582         PIPE_CONF_CHECK_X(dsi_pll.div);
11583
11584         if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5)
11585                 PIPE_CONF_CHECK_I(pipe_bpp);
11586
11587         PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock);
11588         PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
11589
11590         PIPE_CONF_CHECK_I(min_voltage_level);
11591
11592 #undef PIPE_CONF_CHECK_X
11593 #undef PIPE_CONF_CHECK_I
11594 #undef PIPE_CONF_CHECK_BOOL
11595 #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
11596 #undef PIPE_CONF_CHECK_P
11597 #undef PIPE_CONF_CHECK_FLAGS
11598 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
11599 #undef PIPE_CONF_QUIRK
11600
11601         return ret;
11602 }
11603
11604 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
11605                                            const struct intel_crtc_state *pipe_config)
11606 {
11607         if (pipe_config->has_pch_encoder) {
11608                 int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
11609                                                             &pipe_config->fdi_m_n);
11610                 int dotclock = pipe_config->base.adjusted_mode.crtc_clock;
11611
11612                 /*
11613                  * FDI already provided one idea for the dotclock.
11614                  * Yell if the encoder disagrees.
11615                  */
11616                 WARN(!intel_fuzzy_clock_check(fdi_dotclock, dotclock),
11617                      "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
11618                      fdi_dotclock, dotclock);
11619         }
11620 }
11621
11622 static void verify_wm_state(struct drm_crtc *crtc,
11623                             struct drm_crtc_state *new_state)
11624 {
11625         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
11626         struct skl_ddb_allocation hw_ddb, *sw_ddb;
11627         struct skl_pipe_wm hw_wm, *sw_wm;
11628         struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
11629         struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
11630         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11631         const enum pipe pipe = intel_crtc->pipe;
11632         int plane, level, max_level = ilk_wm_max_level(dev_priv);
11633
11634         if (INTEL_GEN(dev_priv) < 9 || !new_state->active)
11635                 return;
11636
11637         skl_pipe_wm_get_hw_state(crtc, &hw_wm);
11638         sw_wm = &to_intel_crtc_state(new_state)->wm.skl.optimal;
11639
11640         skl_ddb_get_hw_state(dev_priv, &hw_ddb);
11641         sw_ddb = &dev_priv->wm.skl_hw.ddb;
11642
11643         if (INTEL_GEN(dev_priv) >= 11)
11644                 if (hw_ddb.enabled_slices != sw_ddb->enabled_slices)
11645                         DRM_ERROR("mismatch in DBUF Slices (expected %u, got %u)\n",
11646                                   sw_ddb->enabled_slices,
11647                                   hw_ddb.enabled_slices);
11648         /* planes */
11649         for_each_universal_plane(dev_priv, pipe, plane) {
11650                 hw_plane_wm = &hw_wm.planes[plane];
11651                 sw_plane_wm = &sw_wm->planes[plane];
11652
11653                 /* Watermarks */
11654                 for (level = 0; level <= max_level; level++) {
11655                         if (skl_wm_level_equals(&hw_plane_wm->wm[level],
11656                                                 &sw_plane_wm->wm[level]))
11657                                 continue;
11658
11659                         DRM_ERROR("mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
11660                                   pipe_name(pipe), plane + 1, level,
11661                                   sw_plane_wm->wm[level].plane_en,
11662                                   sw_plane_wm->wm[level].plane_res_b,
11663                                   sw_plane_wm->wm[level].plane_res_l,
11664                                   hw_plane_wm->wm[level].plane_en,
11665                                   hw_plane_wm->wm[level].plane_res_b,
11666                                   hw_plane_wm->wm[level].plane_res_l);
11667                 }
11668
11669                 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
11670                                          &sw_plane_wm->trans_wm)) {
11671                         DRM_ERROR("mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
11672                                   pipe_name(pipe), plane + 1,
11673                                   sw_plane_wm->trans_wm.plane_en,
11674                                   sw_plane_wm->trans_wm.plane_res_b,
11675                                   sw_plane_wm->trans_wm.plane_res_l,
11676                                   hw_plane_wm->trans_wm.plane_en,
11677                                   hw_plane_wm->trans_wm.plane_res_b,
11678                                   hw_plane_wm->trans_wm.plane_res_l);
11679                 }
11680
11681                 /* DDB */
11682                 hw_ddb_entry = &hw_ddb.plane[pipe][plane];
11683                 sw_ddb_entry = &sw_ddb->plane[pipe][plane];
11684
11685                 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
11686                         DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
11687                                   pipe_name(pipe), plane + 1,
11688                                   sw_ddb_entry->start, sw_ddb_entry->end,
11689                                   hw_ddb_entry->start, hw_ddb_entry->end);
11690                 }
11691         }
11692
11693         /*
11694          * cursor
11695          * If the cursor plane isn't active, we may not have updated it's ddb
11696          * allocation. In that case since the ddb allocation will be updated
11697          * once the plane becomes visible, we can skip this check
11698          */
11699         if (1) {
11700                 hw_plane_wm = &hw_wm.planes[PLANE_CURSOR];
11701                 sw_plane_wm = &sw_wm->planes[PLANE_CURSOR];
11702
11703                 /* Watermarks */
11704                 for (level = 0; level <= max_level; level++) {
11705                         if (skl_wm_level_equals(&hw_plane_wm->wm[level],
11706                                                 &sw_plane_wm->wm[level]))
11707                                 continue;
11708
11709                         DRM_ERROR("mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
11710                                   pipe_name(pipe), level,
11711                                   sw_plane_wm->wm[level].plane_en,
11712                                   sw_plane_wm->wm[level].plane_res_b,
11713                                   sw_plane_wm->wm[level].plane_res_l,
11714                                   hw_plane_wm->wm[level].plane_en,
11715                                   hw_plane_wm->wm[level].plane_res_b,
11716                                   hw_plane_wm->wm[level].plane_res_l);
11717                 }
11718
11719                 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
11720                                          &sw_plane_wm->trans_wm)) {
11721                         DRM_ERROR("mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
11722                                   pipe_name(pipe),
11723                                   sw_plane_wm->trans_wm.plane_en,
11724                                   sw_plane_wm->trans_wm.plane_res_b,
11725                                   sw_plane_wm->trans_wm.plane_res_l,
11726                                   hw_plane_wm->trans_wm.plane_en,
11727                                   hw_plane_wm->trans_wm.plane_res_b,
11728                                   hw_plane_wm->trans_wm.plane_res_l);
11729                 }
11730
11731                 /* DDB */
11732                 hw_ddb_entry = &hw_ddb.plane[pipe][PLANE_CURSOR];
11733                 sw_ddb_entry = &sw_ddb->plane[pipe][PLANE_CURSOR];
11734
11735                 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
11736                         DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
11737                                   pipe_name(pipe),
11738                                   sw_ddb_entry->start, sw_ddb_entry->end,
11739                                   hw_ddb_entry->start, hw_ddb_entry->end);
11740                 }
11741         }
11742 }
11743
11744 static void
11745 verify_connector_state(struct drm_device *dev,
11746                        struct drm_atomic_state *state,
11747                        struct drm_crtc *crtc)
11748 {
11749         struct drm_connector *connector;
11750         struct drm_connector_state *new_conn_state;
11751         int i;
11752
11753         for_each_new_connector_in_state(state, connector, new_conn_state, i) {
11754                 struct drm_encoder *encoder = connector->encoder;
11755                 struct drm_crtc_state *crtc_state = NULL;
11756
11757                 if (new_conn_state->crtc != crtc)
11758                         continue;
11759
11760                 if (crtc)
11761                         crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
11762
11763                 intel_connector_verify_state(crtc_state, new_conn_state);
11764
11765                 I915_STATE_WARN(new_conn_state->best_encoder != encoder,
11766                      "connector's atomic encoder doesn't match legacy encoder\n");
11767         }
11768 }
11769
11770 static void
11771 verify_encoder_state(struct drm_device *dev, struct drm_atomic_state *state)
11772 {
11773         struct intel_encoder *encoder;
11774         struct drm_connector *connector;
11775         struct drm_connector_state *old_conn_state, *new_conn_state;
11776         int i;
11777
11778         for_each_intel_encoder(dev, encoder) {
11779                 bool enabled = false, found = false;
11780                 enum pipe pipe;
11781
11782                 DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
11783                               encoder->base.base.id,
11784                               encoder->base.name);
11785
11786                 for_each_oldnew_connector_in_state(state, connector, old_conn_state,
11787                                                    new_conn_state, i) {
11788                         if (old_conn_state->best_encoder == &encoder->base)
11789                                 found = true;
11790
11791                         if (new_conn_state->best_encoder != &encoder->base)
11792                                 continue;
11793                         found = enabled = true;
11794
11795                         I915_STATE_WARN(new_conn_state->crtc !=
11796                                         encoder->base.crtc,
11797                              "connector's crtc doesn't match encoder crtc\n");
11798                 }
11799
11800                 if (!found)
11801                         continue;
11802
11803                 I915_STATE_WARN(!!encoder->base.crtc != enabled,
11804                      "encoder's enabled state mismatch "
11805                      "(expected %i, found %i)\n",
11806                      !!encoder->base.crtc, enabled);
11807
11808                 if (!encoder->base.crtc) {
11809                         bool active;
11810
11811                         active = encoder->get_hw_state(encoder, &pipe);
11812                         I915_STATE_WARN(active,
11813                              "encoder detached but still enabled on pipe %c.\n",
11814                              pipe_name(pipe));
11815                 }
11816         }
11817 }
11818
11819 static void
11820 verify_crtc_state(struct drm_crtc *crtc,
11821                   struct drm_crtc_state *old_crtc_state,
11822                   struct drm_crtc_state *new_crtc_state)
11823 {
11824         struct drm_device *dev = crtc->dev;
11825         struct drm_i915_private *dev_priv = to_i915(dev);
11826         struct intel_encoder *encoder;
11827         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11828         struct intel_crtc_state *pipe_config, *sw_config;
11829         struct drm_atomic_state *old_state;
11830         bool active;
11831
11832         old_state = old_crtc_state->state;
11833         __drm_atomic_helper_crtc_destroy_state(old_crtc_state);
11834         pipe_config = to_intel_crtc_state(old_crtc_state);
11835         memset(pipe_config, 0, sizeof(*pipe_config));
11836         pipe_config->base.crtc = crtc;
11837         pipe_config->base.state = old_state;
11838
11839         DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
11840
11841         active = dev_priv->display.get_pipe_config(intel_crtc, pipe_config);
11842
11843         /* we keep both pipes enabled on 830 */
11844         if (IS_I830(dev_priv))
11845                 active = new_crtc_state->active;
11846
11847         I915_STATE_WARN(new_crtc_state->active != active,
11848              "crtc active state doesn't match with hw state "
11849              "(expected %i, found %i)\n", new_crtc_state->active, active);
11850
11851         I915_STATE_WARN(intel_crtc->active != new_crtc_state->active,
11852              "transitional active state does not match atomic hw state "
11853              "(expected %i, found %i)\n", new_crtc_state->active, intel_crtc->active);
11854
11855         for_each_encoder_on_crtc(dev, crtc, encoder) {
11856                 enum pipe pipe;
11857
11858                 active = encoder->get_hw_state(encoder, &pipe);
11859                 I915_STATE_WARN(active != new_crtc_state->active,
11860                         "[ENCODER:%i] active %i with crtc active %i\n",
11861                         encoder->base.base.id, active, new_crtc_state->active);
11862
11863                 I915_STATE_WARN(active && intel_crtc->pipe != pipe,
11864                                 "Encoder connected to wrong pipe %c\n",
11865                                 pipe_name(pipe));
11866
11867                 if (active)
11868                         encoder->get_config(encoder, pipe_config);
11869         }
11870
11871         intel_crtc_compute_pixel_rate(pipe_config);
11872
11873         if (!new_crtc_state->active)
11874                 return;
11875
11876         intel_pipe_config_sanity_check(dev_priv, pipe_config);
11877
11878         sw_config = to_intel_crtc_state(new_crtc_state);
11879         if (!intel_pipe_config_compare(dev_priv, sw_config,
11880                                        pipe_config, false)) {
11881                 I915_STATE_WARN(1, "pipe state doesn't match!\n");
11882                 intel_dump_pipe_config(intel_crtc, pipe_config,
11883                                        "[hw state]");
11884                 intel_dump_pipe_config(intel_crtc, sw_config,
11885                                        "[sw state]");
11886         }
11887 }
11888
11889 static void
11890 intel_verify_planes(struct intel_atomic_state *state)
11891 {
11892         struct intel_plane *plane;
11893         const struct intel_plane_state *plane_state;
11894         int i;
11895
11896         for_each_new_intel_plane_in_state(state, plane,
11897                                           plane_state, i)
11898                 assert_plane(plane, plane_state->base.visible);
11899 }
11900
11901 static void
11902 verify_single_dpll_state(struct drm_i915_private *dev_priv,
11903                          struct intel_shared_dpll *pll,
11904                          struct drm_crtc *crtc,
11905                          struct drm_crtc_state *new_state)
11906 {
11907         struct intel_dpll_hw_state dpll_hw_state;
11908         unsigned int crtc_mask;
11909         bool active;
11910
11911         memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
11912
11913         DRM_DEBUG_KMS("%s\n", pll->info->name);
11914
11915         active = pll->info->funcs->get_hw_state(dev_priv, pll, &dpll_hw_state);
11916
11917         if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
11918                 I915_STATE_WARN(!pll->on && pll->active_mask,
11919                      "pll in active use but not on in sw tracking\n");
11920                 I915_STATE_WARN(pll->on && !pll->active_mask,
11921                      "pll is on but not used by any active crtc\n");
11922                 I915_STATE_WARN(pll->on != active,
11923                      "pll on state mismatch (expected %i, found %i)\n",
11924                      pll->on, active);
11925         }
11926
11927         if (!crtc) {
11928                 I915_STATE_WARN(pll->active_mask & ~pll->state.crtc_mask,
11929                                 "more active pll users than references: %x vs %x\n",
11930                                 pll->active_mask, pll->state.crtc_mask);
11931
11932                 return;
11933         }
11934
11935         crtc_mask = drm_crtc_mask(crtc);
11936
11937         if (new_state->active)
11938                 I915_STATE_WARN(!(pll->active_mask & crtc_mask),
11939                                 "pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
11940                                 pipe_name(drm_crtc_index(crtc)), pll->active_mask);
11941         else
11942                 I915_STATE_WARN(pll->active_mask & crtc_mask,
11943                                 "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
11944                                 pipe_name(drm_crtc_index(crtc)), pll->active_mask);
11945
11946         I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask),
11947                         "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
11948                         crtc_mask, pll->state.crtc_mask);
11949
11950         I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
11951                                           &dpll_hw_state,
11952                                           sizeof(dpll_hw_state)),
11953                         "pll hw state mismatch\n");
11954 }
11955
11956 static void
11957 verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc,
11958                          struct drm_crtc_state *old_crtc_state,
11959                          struct drm_crtc_state *new_crtc_state)
11960 {
11961         struct drm_i915_private *dev_priv = to_i915(dev);
11962         struct intel_crtc_state *old_state = to_intel_crtc_state(old_crtc_state);
11963         struct intel_crtc_state *new_state = to_intel_crtc_state(new_crtc_state);
11964
11965         if (new_state->shared_dpll)
11966                 verify_single_dpll_state(dev_priv, new_state->shared_dpll, crtc, new_crtc_state);
11967
11968         if (old_state->shared_dpll &&
11969             old_state->shared_dpll != new_state->shared_dpll) {
11970                 unsigned int crtc_mask = drm_crtc_mask(crtc);
11971                 struct intel_shared_dpll *pll = old_state->shared_dpll;
11972
11973                 I915_STATE_WARN(pll->active_mask & crtc_mask,
11974                                 "pll active mismatch (didn't expect pipe %c in active mask)\n",
11975                                 pipe_name(drm_crtc_index(crtc)));
11976                 I915_STATE_WARN(pll->state.crtc_mask & crtc_mask,
11977                                 "pll enabled crtcs mismatch (found %x in enabled mask)\n",
11978                                 pipe_name(drm_crtc_index(crtc)));
11979         }
11980 }
11981
11982 static void
11983 intel_modeset_verify_crtc(struct drm_crtc *crtc,
11984                           struct drm_atomic_state *state,
11985                           struct drm_crtc_state *old_state,
11986                           struct drm_crtc_state *new_state)
11987 {
11988         if (!needs_modeset(new_state) &&
11989             !to_intel_crtc_state(new_state)->update_pipe)
11990                 return;
11991
11992         verify_wm_state(crtc, new_state);
11993         verify_connector_state(crtc->dev, state, crtc);
11994         verify_crtc_state(crtc, old_state, new_state);
11995         verify_shared_dpll_state(crtc->dev, crtc, old_state, new_state);
11996 }
11997
11998 static void
11999 verify_disabled_dpll_state(struct drm_device *dev)
12000 {
12001         struct drm_i915_private *dev_priv = to_i915(dev);
12002         int i;
12003
12004         for (i = 0; i < dev_priv->num_shared_dpll; i++)
12005                 verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL);
12006 }
12007
12008 static void
12009 intel_modeset_verify_disabled(struct drm_device *dev,
12010                               struct drm_atomic_state *state)
12011 {
12012         verify_encoder_state(dev, state);
12013         verify_connector_state(dev, state, NULL);
12014         verify_disabled_dpll_state(dev);
12015 }
12016
12017 static void update_scanline_offset(struct intel_crtc *crtc)
12018 {
12019         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12020
12021         /*
12022          * The scanline counter increments at the leading edge of hsync.
12023          *
12024          * On most platforms it starts counting from vtotal-1 on the
12025          * first active line. That means the scanline counter value is
12026          * always one less than what we would expect. Ie. just after
12027          * start of vblank, which also occurs at start of hsync (on the
12028          * last active line), the scanline counter will read vblank_start-1.
12029          *
12030          * On gen2 the scanline counter starts counting from 1 instead
12031          * of vtotal-1, so we have to subtract one (or rather add vtotal-1
12032          * to keep the value positive), instead of adding one.
12033          *
12034          * On HSW+ the behaviour of the scanline counter depends on the output
12035          * type. For DP ports it behaves like most other platforms, but on HDMI
12036          * there's an extra 1 line difference. So we need to add two instead of
12037          * one to the value.
12038          *
12039          * On VLV/CHV DSI the scanline counter would appear to increment
12040          * approx. 1/3 of a scanline before start of vblank. Unfortunately
12041          * that means we can't tell whether we're in vblank or not while
12042          * we're on that particular line. We must still set scanline_offset
12043          * to 1 so that the vblank timestamps come out correct when we query
12044          * the scanline counter from within the vblank interrupt handler.
12045          * However if queried just before the start of vblank we'll get an
12046          * answer that's slightly in the future.
12047          */
12048         if (IS_GEN2(dev_priv)) {
12049                 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
12050                 int vtotal;
12051
12052                 vtotal = adjusted_mode->crtc_vtotal;
12053                 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
12054                         vtotal /= 2;
12055
12056                 crtc->scanline_offset = vtotal - 1;
12057         } else if (HAS_DDI(dev_priv) &&
12058                    intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI)) {
12059                 crtc->scanline_offset = 2;
12060         } else
12061                 crtc->scanline_offset = 1;
12062 }
12063
12064 static void intel_modeset_clear_plls(struct drm_atomic_state *state)
12065 {
12066         struct drm_device *dev = state->dev;
12067         struct drm_i915_private *dev_priv = to_i915(dev);
12068         struct drm_crtc *crtc;
12069         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
12070         int i;
12071
12072         if (!dev_priv->display.crtc_compute_clock)
12073                 return;
12074
12075         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
12076                 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12077                 struct intel_shared_dpll *old_dpll =
12078                         to_intel_crtc_state(old_crtc_state)->shared_dpll;
12079
12080                 if (!needs_modeset(new_crtc_state))
12081                         continue;
12082
12083                 to_intel_crtc_state(new_crtc_state)->shared_dpll = NULL;
12084
12085                 if (!old_dpll)
12086                         continue;
12087
12088                 intel_release_shared_dpll(old_dpll, intel_crtc, state);
12089         }
12090 }
12091
12092 /*
12093  * This implements the workaround described in the "notes" section of the mode
12094  * set sequence documentation. When going from no pipes or single pipe to
12095  * multiple pipes, and planes are enabled after the pipe, we need to wait at
12096  * least 2 vblanks on the first pipe before enabling planes on the second pipe.
12097  */
12098 static int haswell_mode_set_planes_workaround(struct drm_atomic_state *state)
12099 {
12100         struct drm_crtc_state *crtc_state;
12101         struct intel_crtc *intel_crtc;
12102         struct drm_crtc *crtc;
12103         struct intel_crtc_state *first_crtc_state = NULL;
12104         struct intel_crtc_state *other_crtc_state = NULL;
12105         enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
12106         int i;
12107
12108         /* look at all crtc's that are going to be enabled in during modeset */
12109         for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
12110                 intel_crtc = to_intel_crtc(crtc);
12111
12112                 if (!crtc_state->active || !needs_modeset(crtc_state))
12113                         continue;
12114
12115                 if (first_crtc_state) {
12116                         other_crtc_state = to_intel_crtc_state(crtc_state);
12117                         break;
12118                 } else {
12119                         first_crtc_state = to_intel_crtc_state(crtc_state);
12120                         first_pipe = intel_crtc->pipe;
12121                 }
12122         }
12123
12124         /* No workaround needed? */
12125         if (!first_crtc_state)
12126                 return 0;
12127
12128         /* w/a possibly needed, check how many crtc's are already enabled. */
12129         for_each_intel_crtc(state->dev, intel_crtc) {
12130                 struct intel_crtc_state *pipe_config;
12131
12132                 pipe_config = intel_atomic_get_crtc_state(state, intel_crtc);
12133                 if (IS_ERR(pipe_config))
12134                         return PTR_ERR(pipe_config);
12135
12136                 pipe_config->hsw_workaround_pipe = INVALID_PIPE;
12137
12138                 if (!pipe_config->base.active ||
12139                     needs_modeset(&pipe_config->base))
12140                         continue;
12141
12142                 /* 2 or more enabled crtcs means no need for w/a */
12143                 if (enabled_pipe != INVALID_PIPE)
12144                         return 0;
12145
12146                 enabled_pipe = intel_crtc->pipe;
12147         }
12148
12149         if (enabled_pipe != INVALID_PIPE)
12150                 first_crtc_state->hsw_workaround_pipe = enabled_pipe;
12151         else if (other_crtc_state)
12152                 other_crtc_state->hsw_workaround_pipe = first_pipe;
12153
12154         return 0;
12155 }
12156
12157 static int intel_lock_all_pipes(struct drm_atomic_state *state)
12158 {
12159         struct drm_crtc *crtc;
12160
12161         /* Add all pipes to the state */
12162         for_each_crtc(state->dev, crtc) {
12163                 struct drm_crtc_state *crtc_state;
12164
12165                 crtc_state = drm_atomic_get_crtc_state(state, crtc);
12166                 if (IS_ERR(crtc_state))
12167                         return PTR_ERR(crtc_state);
12168         }
12169
12170         return 0;
12171 }
12172
12173 static int intel_modeset_all_pipes(struct drm_atomic_state *state)
12174 {
12175         struct drm_crtc *crtc;
12176
12177         /*
12178          * Add all pipes to the state, and force
12179          * a modeset on all the active ones.
12180          */
12181         for_each_crtc(state->dev, crtc) {
12182                 struct drm_crtc_state *crtc_state;
12183                 int ret;
12184
12185                 crtc_state = drm_atomic_get_crtc_state(state, crtc);
12186                 if (IS_ERR(crtc_state))
12187                         return PTR_ERR(crtc_state);
12188
12189                 if (!crtc_state->active || needs_modeset(crtc_state))
12190                         continue;
12191
12192                 crtc_state->mode_changed = true;
12193
12194                 ret = drm_atomic_add_affected_connectors(state, crtc);
12195                 if (ret)
12196                         return ret;
12197
12198                 ret = drm_atomic_add_affected_planes(state, crtc);
12199                 if (ret)
12200                         return ret;
12201         }
12202
12203         return 0;
12204 }
12205
12206 static int intel_modeset_checks(struct drm_atomic_state *state)
12207 {
12208         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
12209         struct drm_i915_private *dev_priv = to_i915(state->dev);
12210         struct drm_crtc *crtc;
12211         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
12212         int ret = 0, i;
12213
12214         if (!check_digital_port_conflicts(state)) {
12215                 DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
12216                 return -EINVAL;
12217         }
12218
12219         intel_state->modeset = true;
12220         intel_state->active_crtcs = dev_priv->active_crtcs;
12221         intel_state->cdclk.logical = dev_priv->cdclk.logical;
12222         intel_state->cdclk.actual = dev_priv->cdclk.actual;
12223
12224         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
12225                 if (new_crtc_state->active)
12226                         intel_state->active_crtcs |= 1 << i;
12227                 else
12228                         intel_state->active_crtcs &= ~(1 << i);
12229
12230                 if (old_crtc_state->active != new_crtc_state->active)
12231                         intel_state->active_pipe_changes |= drm_crtc_mask(crtc);
12232         }
12233
12234         /*
12235          * See if the config requires any additional preparation, e.g.
12236          * to adjust global state with pipes off.  We need to do this
12237          * here so we can get the modeset_pipe updated config for the new
12238          * mode set on this crtc.  For other crtcs we need to use the
12239          * adjusted_mode bits in the crtc directly.
12240          */
12241         if (dev_priv->display.modeset_calc_cdclk) {
12242                 ret = dev_priv->display.modeset_calc_cdclk(state);
12243                 if (ret < 0)
12244                         return ret;
12245
12246                 /*
12247                  * Writes to dev_priv->cdclk.logical must protected by
12248                  * holding all the crtc locks, even if we don't end up
12249                  * touching the hardware
12250                  */
12251                 if (intel_cdclk_changed(&dev_priv->cdclk.logical,
12252                                         &intel_state->cdclk.logical)) {
12253                         ret = intel_lock_all_pipes(state);
12254                         if (ret < 0)
12255                                 return ret;
12256                 }
12257
12258                 /* All pipes must be switched off while we change the cdclk. */
12259                 if (intel_cdclk_needs_modeset(&dev_priv->cdclk.actual,
12260                                               &intel_state->cdclk.actual)) {
12261                         ret = intel_modeset_all_pipes(state);
12262                         if (ret < 0)
12263                                 return ret;
12264                 }
12265
12266                 DRM_DEBUG_KMS("New cdclk calculated to be logical %u kHz, actual %u kHz\n",
12267                               intel_state->cdclk.logical.cdclk,
12268                               intel_state->cdclk.actual.cdclk);
12269                 DRM_DEBUG_KMS("New voltage level calculated to be logical %u, actual %u\n",
12270                               intel_state->cdclk.logical.voltage_level,
12271                               intel_state->cdclk.actual.voltage_level);
12272         } else {
12273                 to_intel_atomic_state(state)->cdclk.logical = dev_priv->cdclk.logical;
12274         }
12275
12276         intel_modeset_clear_plls(state);
12277
12278         if (IS_HASWELL(dev_priv))
12279                 return haswell_mode_set_planes_workaround(state);
12280
12281         return 0;
12282 }
12283
12284 /*
12285  * Handle calculation of various watermark data at the end of the atomic check
12286  * phase.  The code here should be run after the per-crtc and per-plane 'check'
12287  * handlers to ensure that all derived state has been updated.
12288  */
12289 static int calc_watermark_data(struct drm_atomic_state *state)
12290 {
12291         struct drm_device *dev = state->dev;
12292         struct drm_i915_private *dev_priv = to_i915(dev);
12293
12294         /* Is there platform-specific watermark information to calculate? */
12295         if (dev_priv->display.compute_global_watermarks)
12296                 return dev_priv->display.compute_global_watermarks(state);
12297
12298         return 0;
12299 }
12300
12301 /**
12302  * intel_atomic_check - validate state object
12303  * @dev: drm device
12304  * @state: state to validate
12305  */
12306 static int intel_atomic_check(struct drm_device *dev,
12307                               struct drm_atomic_state *state)
12308 {
12309         struct drm_i915_private *dev_priv = to_i915(dev);
12310         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
12311         struct drm_crtc *crtc;
12312         struct drm_crtc_state *old_crtc_state, *crtc_state;
12313         int ret, i;
12314         bool any_ms = false;
12315
12316         /* Catch I915_MODE_FLAG_INHERITED */
12317         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
12318                                       crtc_state, i) {
12319                 if (crtc_state->mode.private_flags !=
12320                     old_crtc_state->mode.private_flags)
12321                         crtc_state->mode_changed = true;
12322         }
12323
12324         ret = drm_atomic_helper_check_modeset(dev, state);
12325         if (ret)
12326                 return ret;
12327
12328         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, crtc_state, i) {
12329                 struct intel_crtc_state *pipe_config =
12330                         to_intel_crtc_state(crtc_state);
12331
12332                 if (!needs_modeset(crtc_state))
12333                         continue;
12334
12335                 if (!crtc_state->enable) {
12336                         any_ms = true;
12337                         continue;
12338                 }
12339
12340                 ret = intel_modeset_pipe_config(crtc, pipe_config);
12341                 if (ret) {
12342                         intel_dump_pipe_config(to_intel_crtc(crtc),
12343                                                pipe_config, "[failed]");
12344                         return ret;
12345                 }
12346
12347                 if (i915_modparams.fastboot &&
12348                     intel_pipe_config_compare(dev_priv,
12349                                         to_intel_crtc_state(old_crtc_state),
12350                                         pipe_config, true)) {
12351                         crtc_state->mode_changed = false;
12352                         pipe_config->update_pipe = true;
12353                 }
12354
12355                 if (needs_modeset(crtc_state))
12356                         any_ms = true;
12357
12358                 intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
12359                                        needs_modeset(crtc_state) ?
12360                                        "[modeset]" : "[fastset]");
12361         }
12362
12363         if (any_ms) {
12364                 ret = intel_modeset_checks(state);
12365
12366                 if (ret)
12367                         return ret;
12368         } else {
12369                 intel_state->cdclk.logical = dev_priv->cdclk.logical;
12370         }
12371
12372         ret = drm_atomic_helper_check_planes(dev, state);
12373         if (ret)
12374                 return ret;
12375
12376         intel_fbc_choose_crtc(dev_priv, intel_state);
12377         return calc_watermark_data(state);
12378 }
12379
12380 static int intel_atomic_prepare_commit(struct drm_device *dev,
12381                                        struct drm_atomic_state *state)
12382 {
12383         return drm_atomic_helper_prepare_planes(dev, state);
12384 }
12385
12386 u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
12387 {
12388         struct drm_device *dev = crtc->base.dev;
12389
12390         if (!dev->max_vblank_count)
12391                 return (u32)drm_crtc_accurate_vblank_count(&crtc->base);
12392
12393         return dev->driver->get_vblank_counter(dev, crtc->pipe);
12394 }
12395
12396 static void intel_update_crtc(struct drm_crtc *crtc,
12397                               struct drm_atomic_state *state,
12398                               struct drm_crtc_state *old_crtc_state,
12399                               struct drm_crtc_state *new_crtc_state)
12400 {
12401         struct drm_device *dev = crtc->dev;
12402         struct drm_i915_private *dev_priv = to_i915(dev);
12403         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12404         struct intel_crtc_state *pipe_config = to_intel_crtc_state(new_crtc_state);
12405         bool modeset = needs_modeset(new_crtc_state);
12406         struct intel_plane_state *new_plane_state =
12407                 intel_atomic_get_new_plane_state(to_intel_atomic_state(state),
12408                                                  to_intel_plane(crtc->primary));
12409
12410         if (modeset) {
12411                 update_scanline_offset(intel_crtc);
12412                 dev_priv->display.crtc_enable(pipe_config, state);
12413
12414                 /* vblanks work again, re-enable pipe CRC. */
12415                 intel_crtc_enable_pipe_crc(intel_crtc);
12416         } else {
12417                 intel_pre_plane_update(to_intel_crtc_state(old_crtc_state),
12418                                        pipe_config);
12419         }
12420
12421         if (new_plane_state)
12422                 intel_fbc_enable(intel_crtc, pipe_config, new_plane_state);
12423
12424         drm_atomic_helper_commit_planes_on_crtc(old_crtc_state);
12425 }
12426
12427 static void intel_update_crtcs(struct drm_atomic_state *state)
12428 {
12429         struct drm_crtc *crtc;
12430         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
12431         int i;
12432
12433         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
12434                 if (!new_crtc_state->active)
12435                         continue;
12436
12437                 intel_update_crtc(crtc, state, old_crtc_state,
12438                                   new_crtc_state);
12439         }
12440 }
12441
12442 static void skl_update_crtcs(struct drm_atomic_state *state)
12443 {
12444         struct drm_i915_private *dev_priv = to_i915(state->dev);
12445         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
12446         struct drm_crtc *crtc;
12447         struct intel_crtc *intel_crtc;
12448         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
12449         struct intel_crtc_state *cstate;
12450         unsigned int updated = 0;
12451         bool progress;
12452         enum pipe pipe;
12453         int i;
12454         u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
12455         u8 required_slices = intel_state->wm_results.ddb.enabled_slices;
12456
12457         const struct skl_ddb_entry *entries[I915_MAX_PIPES] = {};
12458
12459         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i)
12460                 /* ignore allocations for crtc's that have been turned off. */
12461                 if (new_crtc_state->active)
12462                         entries[i] = &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb;
12463
12464         /* If 2nd DBuf slice required, enable it here */
12465         if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices)
12466                 icl_dbuf_slices_update(dev_priv, required_slices);
12467
12468         /*
12469          * Whenever the number of active pipes changes, we need to make sure we
12470          * update the pipes in the right order so that their ddb allocations
12471          * never overlap with eachother inbetween CRTC updates. Otherwise we'll
12472          * cause pipe underruns and other bad stuff.
12473          */
12474         do {
12475                 progress = false;
12476
12477                 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
12478                         bool vbl_wait = false;
12479                         unsigned int cmask = drm_crtc_mask(crtc);
12480
12481                         intel_crtc = to_intel_crtc(crtc);
12482                         cstate = to_intel_crtc_state(new_crtc_state);
12483                         pipe = intel_crtc->pipe;
12484
12485                         if (updated & cmask || !cstate->base.active)
12486                                 continue;
12487
12488                         if (skl_ddb_allocation_overlaps(dev_priv,
12489                                                         entries,
12490                                                         &cstate->wm.skl.ddb,
12491                                                         i))
12492                                 continue;
12493
12494                         updated |= cmask;
12495                         entries[i] = &cstate->wm.skl.ddb;
12496
12497                         /*
12498                          * If this is an already active pipe, it's DDB changed,
12499                          * and this isn't the last pipe that needs updating
12500                          * then we need to wait for a vblank to pass for the
12501                          * new ddb allocation to take effect.
12502                          */
12503                         if (!skl_ddb_entry_equal(&cstate->wm.skl.ddb,
12504                                                  &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb) &&
12505                             !new_crtc_state->active_changed &&
12506                             intel_state->wm_results.dirty_pipes != updated)
12507                                 vbl_wait = true;
12508
12509                         intel_update_crtc(crtc, state, old_crtc_state,
12510                                           new_crtc_state);
12511
12512                         if (vbl_wait)
12513                                 intel_wait_for_vblank(dev_priv, pipe);
12514
12515                         progress = true;
12516                 }
12517         } while (progress);
12518
12519         /* If 2nd DBuf slice is no more required disable it */
12520         if (INTEL_GEN(dev_priv) >= 11 && required_slices < hw_enabled_slices)
12521                 icl_dbuf_slices_update(dev_priv, required_slices);
12522 }
12523
12524 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
12525 {
12526         struct intel_atomic_state *state, *next;
12527         struct llist_node *freed;
12528
12529         freed = llist_del_all(&dev_priv->atomic_helper.free_list);
12530         llist_for_each_entry_safe(state, next, freed, freed)
12531                 drm_atomic_state_put(&state->base);
12532 }
12533
12534 static void intel_atomic_helper_free_state_worker(struct work_struct *work)
12535 {
12536         struct drm_i915_private *dev_priv =
12537                 container_of(work, typeof(*dev_priv), atomic_helper.free_work);
12538
12539         intel_atomic_helper_free_state(dev_priv);
12540 }
12541
12542 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
12543 {
12544         struct wait_queue_entry wait_fence, wait_reset;
12545         struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
12546
12547         init_wait_entry(&wait_fence, 0);
12548         init_wait_entry(&wait_reset, 0);
12549         for (;;) {
12550                 prepare_to_wait(&intel_state->commit_ready.wait,
12551                                 &wait_fence, TASK_UNINTERRUPTIBLE);
12552                 prepare_to_wait(&dev_priv->gpu_error.wait_queue,
12553                                 &wait_reset, TASK_UNINTERRUPTIBLE);
12554
12555
12556                 if (i915_sw_fence_done(&intel_state->commit_ready)
12557                     || test_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags))
12558                         break;
12559
12560                 schedule();
12561         }
12562         finish_wait(&intel_state->commit_ready.wait, &wait_fence);
12563         finish_wait(&dev_priv->gpu_error.wait_queue, &wait_reset);
12564 }
12565
12566 static void intel_atomic_cleanup_work(struct work_struct *work)
12567 {
12568         struct drm_atomic_state *state =
12569                 container_of(work, struct drm_atomic_state, commit_work);
12570         struct drm_i915_private *i915 = to_i915(state->dev);
12571
12572         drm_atomic_helper_cleanup_planes(&i915->drm, state);
12573         drm_atomic_helper_commit_cleanup_done(state);
12574         drm_atomic_state_put(state);
12575
12576         intel_atomic_helper_free_state(i915);
12577 }
12578
12579 static void intel_atomic_commit_tail(struct drm_atomic_state *state)
12580 {
12581         struct drm_device *dev = state->dev;
12582         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
12583         struct drm_i915_private *dev_priv = to_i915(dev);
12584         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
12585         struct drm_crtc *crtc;
12586         struct intel_crtc_state *intel_cstate;
12587         u64 put_domains[I915_MAX_PIPES] = {};
12588         int i;
12589
12590         intel_atomic_commit_fence_wait(intel_state);
12591
12592         drm_atomic_helper_wait_for_dependencies(state);
12593
12594         if (intel_state->modeset)
12595                 intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
12596
12597         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
12598                 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12599
12600                 if (needs_modeset(new_crtc_state) ||
12601                     to_intel_crtc_state(new_crtc_state)->update_pipe) {
12602
12603                         put_domains[to_intel_crtc(crtc)->pipe] =
12604                                 modeset_get_crtc_power_domains(crtc,
12605                                         to_intel_crtc_state(new_crtc_state));
12606                 }
12607
12608                 if (!needs_modeset(new_crtc_state))
12609                         continue;
12610
12611                 intel_pre_plane_update(to_intel_crtc_state(old_crtc_state),
12612                                        to_intel_crtc_state(new_crtc_state));
12613
12614                 if (old_crtc_state->active) {
12615                         intel_crtc_disable_planes(crtc, old_crtc_state->plane_mask);
12616
12617                         /*
12618                          * We need to disable pipe CRC before disabling the pipe,
12619                          * or we race against vblank off.
12620                          */
12621                         intel_crtc_disable_pipe_crc(intel_crtc);
12622
12623                         dev_priv->display.crtc_disable(to_intel_crtc_state(old_crtc_state), state);
12624                         intel_crtc->active = false;
12625                         intel_fbc_disable(intel_crtc);
12626                         intel_disable_shared_dpll(intel_crtc);
12627
12628                         /*
12629                          * Underruns don't always raise
12630                          * interrupts, so check manually.
12631                          */
12632                         intel_check_cpu_fifo_underruns(dev_priv);
12633                         intel_check_pch_fifo_underruns(dev_priv);
12634
12635                         if (!new_crtc_state->active) {
12636                                 /*
12637                                  * Make sure we don't call initial_watermarks
12638                                  * for ILK-style watermark updates.
12639                                  *
12640                                  * No clue what this is supposed to achieve.
12641                                  */
12642                                 if (INTEL_GEN(dev_priv) >= 9)
12643                                         dev_priv->display.initial_watermarks(intel_state,
12644                                                                              to_intel_crtc_state(new_crtc_state));
12645                         }
12646                 }
12647         }
12648
12649         /* FIXME: Eventually get rid of our intel_crtc->config pointer */
12650         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
12651                 to_intel_crtc(crtc)->config = to_intel_crtc_state(new_crtc_state);
12652
12653         if (intel_state->modeset) {
12654                 drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
12655
12656                 intel_set_cdclk(dev_priv, &dev_priv->cdclk.actual);
12657
12658                 /*
12659                  * SKL workaround: bspec recommends we disable the SAGV when we
12660                  * have more then one pipe enabled
12661                  */
12662                 if (!intel_can_enable_sagv(state))
12663                         intel_disable_sagv(dev_priv);
12664
12665                 intel_modeset_verify_disabled(dev, state);
12666         }
12667
12668         /* Complete the events for pipes that have now been disabled */
12669         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
12670                 bool modeset = needs_modeset(new_crtc_state);
12671
12672                 /* Complete events for now disable pipes here. */
12673                 if (modeset && !new_crtc_state->active && new_crtc_state->event) {
12674                         spin_lock_irq(&dev->event_lock);
12675                         drm_crtc_send_vblank_event(crtc, new_crtc_state->event);
12676                         spin_unlock_irq(&dev->event_lock);
12677
12678                         new_crtc_state->event = NULL;
12679                 }
12680         }
12681
12682         /* Now enable the clocks, plane, pipe, and connectors that we set up. */
12683         dev_priv->display.update_crtcs(state);
12684
12685         /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
12686          * already, but still need the state for the delayed optimization. To
12687          * fix this:
12688          * - wrap the optimization/post_plane_update stuff into a per-crtc work.
12689          * - schedule that vblank worker _before_ calling hw_done
12690          * - at the start of commit_tail, cancel it _synchrously
12691          * - switch over to the vblank wait helper in the core after that since
12692          *   we don't need out special handling any more.
12693          */
12694         drm_atomic_helper_wait_for_flip_done(dev, state);
12695
12696         /*
12697          * Now that the vblank has passed, we can go ahead and program the
12698          * optimal watermarks on platforms that need two-step watermark
12699          * programming.
12700          *
12701          * TODO: Move this (and other cleanup) to an async worker eventually.
12702          */
12703         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
12704                 intel_cstate = to_intel_crtc_state(new_crtc_state);
12705
12706                 if (dev_priv->display.optimize_watermarks)
12707                         dev_priv->display.optimize_watermarks(intel_state,
12708                                                               intel_cstate);
12709         }
12710
12711         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
12712                 intel_post_plane_update(to_intel_crtc_state(old_crtc_state));
12713
12714                 if (put_domains[i])
12715                         modeset_put_power_domains(dev_priv, put_domains[i]);
12716
12717                 intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
12718         }
12719
12720         if (intel_state->modeset)
12721                 intel_verify_planes(intel_state);
12722
12723         if (intel_state->modeset && intel_can_enable_sagv(state))
12724                 intel_enable_sagv(dev_priv);
12725
12726         drm_atomic_helper_commit_hw_done(state);
12727
12728         if (intel_state->modeset) {
12729                 /* As one of the primary mmio accessors, KMS has a high
12730                  * likelihood of triggering bugs in unclaimed access. After we
12731                  * finish modesetting, see if an error has been flagged, and if
12732                  * so enable debugging for the next modeset - and hope we catch
12733                  * the culprit.
12734                  */
12735                 intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
12736                 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET);
12737         }
12738
12739         /*
12740          * Defer the cleanup of the old state to a separate worker to not
12741          * impede the current task (userspace for blocking modesets) that
12742          * are executed inline. For out-of-line asynchronous modesets/flips,
12743          * deferring to a new worker seems overkill, but we would place a
12744          * schedule point (cond_resched()) here anyway to keep latencies
12745          * down.
12746          */
12747         INIT_WORK(&state->commit_work, intel_atomic_cleanup_work);
12748         queue_work(system_highpri_wq, &state->commit_work);
12749 }
12750
12751 static void intel_atomic_commit_work(struct work_struct *work)
12752 {
12753         struct drm_atomic_state *state =
12754                 container_of(work, struct drm_atomic_state, commit_work);
12755
12756         intel_atomic_commit_tail(state);
12757 }
12758
12759 static int __i915_sw_fence_call
12760 intel_atomic_commit_ready(struct i915_sw_fence *fence,
12761                           enum i915_sw_fence_notify notify)
12762 {
12763         struct intel_atomic_state *state =
12764                 container_of(fence, struct intel_atomic_state, commit_ready);
12765
12766         switch (notify) {
12767         case FENCE_COMPLETE:
12768                 /* we do blocking waits in the worker, nothing to do here */
12769                 break;
12770         case FENCE_FREE:
12771                 {
12772                         struct intel_atomic_helper *helper =
12773                                 &to_i915(state->base.dev)->atomic_helper;
12774
12775                         if (llist_add(&state->freed, &helper->free_list))
12776                                 schedule_work(&helper->free_work);
12777                         break;
12778                 }
12779         }
12780
12781         return NOTIFY_DONE;
12782 }
12783
12784 static void intel_atomic_track_fbs(struct drm_atomic_state *state)
12785 {
12786         struct drm_plane_state *old_plane_state, *new_plane_state;
12787         struct drm_plane *plane;
12788         int i;
12789
12790         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i)
12791                 i915_gem_track_fb(intel_fb_obj(old_plane_state->fb),
12792                                   intel_fb_obj(new_plane_state->fb),
12793                                   to_intel_plane(plane)->frontbuffer_bit);
12794 }
12795
12796 /**
12797  * intel_atomic_commit - commit validated state object
12798  * @dev: DRM device
12799  * @state: the top-level driver state object
12800  * @nonblock: nonblocking commit
12801  *
12802  * This function commits a top-level state object that has been validated
12803  * with drm_atomic_helper_check().
12804  *
12805  * RETURNS
12806  * Zero for success or -errno.
12807  */
12808 static int intel_atomic_commit(struct drm_device *dev,
12809                                struct drm_atomic_state *state,
12810                                bool nonblock)
12811 {
12812         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
12813         struct drm_i915_private *dev_priv = to_i915(dev);
12814         int ret = 0;
12815
12816         drm_atomic_state_get(state);
12817         i915_sw_fence_init(&intel_state->commit_ready,
12818                            intel_atomic_commit_ready);
12819
12820         /*
12821          * The intel_legacy_cursor_update() fast path takes care
12822          * of avoiding the vblank waits for simple cursor
12823          * movement and flips. For cursor on/off and size changes,
12824          * we want to perform the vblank waits so that watermark
12825          * updates happen during the correct frames. Gen9+ have
12826          * double buffered watermarks and so shouldn't need this.
12827          *
12828          * Unset state->legacy_cursor_update before the call to
12829          * drm_atomic_helper_setup_commit() because otherwise
12830          * drm_atomic_helper_wait_for_flip_done() is a noop and
12831          * we get FIFO underruns because we didn't wait
12832          * for vblank.
12833          *
12834          * FIXME doing watermarks and fb cleanup from a vblank worker
12835          * (assuming we had any) would solve these problems.
12836          */
12837         if (INTEL_GEN(dev_priv) < 9 && state->legacy_cursor_update) {
12838                 struct intel_crtc_state *new_crtc_state;
12839                 struct intel_crtc *crtc;
12840                 int i;
12841
12842                 for_each_new_intel_crtc_in_state(intel_state, crtc, new_crtc_state, i)
12843                         if (new_crtc_state->wm.need_postvbl_update ||
12844                             new_crtc_state->update_wm_post)
12845                                 state->legacy_cursor_update = false;
12846         }
12847
12848         ret = intel_atomic_prepare_commit(dev, state);
12849         if (ret) {
12850                 DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
12851                 i915_sw_fence_commit(&intel_state->commit_ready);
12852                 return ret;
12853         }
12854
12855         ret = drm_atomic_helper_setup_commit(state, nonblock);
12856         if (!ret)
12857                 ret = drm_atomic_helper_swap_state(state, true);
12858
12859         if (ret) {
12860                 i915_sw_fence_commit(&intel_state->commit_ready);
12861
12862                 drm_atomic_helper_cleanup_planes(dev, state);
12863                 return ret;
12864         }
12865         dev_priv->wm.distrust_bios_wm = false;
12866         intel_shared_dpll_swap_state(state);
12867         intel_atomic_track_fbs(state);
12868
12869         if (intel_state->modeset) {
12870                 memcpy(dev_priv->min_cdclk, intel_state->min_cdclk,
12871                        sizeof(intel_state->min_cdclk));
12872                 memcpy(dev_priv->min_voltage_level,
12873                        intel_state->min_voltage_level,
12874                        sizeof(intel_state->min_voltage_level));
12875                 dev_priv->active_crtcs = intel_state->active_crtcs;
12876                 dev_priv->cdclk.logical = intel_state->cdclk.logical;
12877                 dev_priv->cdclk.actual = intel_state->cdclk.actual;
12878         }
12879
12880         drm_atomic_state_get(state);
12881         INIT_WORK(&state->commit_work, intel_atomic_commit_work);
12882
12883         i915_sw_fence_commit(&intel_state->commit_ready);
12884         if (nonblock && intel_state->modeset) {
12885                 queue_work(dev_priv->modeset_wq, &state->commit_work);
12886         } else if (nonblock) {
12887                 queue_work(system_unbound_wq, &state->commit_work);
12888         } else {
12889                 if (intel_state->modeset)
12890                         flush_workqueue(dev_priv->modeset_wq);
12891                 intel_atomic_commit_tail(state);
12892         }
12893
12894         return 0;
12895 }
12896
12897 static const struct drm_crtc_funcs intel_crtc_funcs = {
12898         .gamma_set = drm_atomic_helper_legacy_gamma_set,
12899         .set_config = drm_atomic_helper_set_config,
12900         .destroy = intel_crtc_destroy,
12901         .page_flip = drm_atomic_helper_page_flip,
12902         .atomic_duplicate_state = intel_crtc_duplicate_state,
12903         .atomic_destroy_state = intel_crtc_destroy_state,
12904         .set_crc_source = intel_crtc_set_crc_source,
12905         .verify_crc_source = intel_crtc_verify_crc_source,
12906         .get_crc_sources = intel_crtc_get_crc_sources,
12907 };
12908
12909 struct wait_rps_boost {
12910         struct wait_queue_entry wait;
12911
12912         struct drm_crtc *crtc;
12913         struct i915_request *request;
12914 };
12915
12916 static int do_rps_boost(struct wait_queue_entry *_wait,
12917                         unsigned mode, int sync, void *key)
12918 {
12919         struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
12920         struct i915_request *rq = wait->request;
12921
12922         /*
12923          * If we missed the vblank, but the request is already running it
12924          * is reasonable to assume that it will complete before the next
12925          * vblank without our intervention, so leave RPS alone.
12926          */
12927         if (!i915_request_started(rq))
12928                 gen6_rps_boost(rq, NULL);
12929         i915_request_put(rq);
12930
12931         drm_crtc_vblank_put(wait->crtc);
12932
12933         list_del(&wait->wait.entry);
12934         kfree(wait);
12935         return 1;
12936 }
12937
12938 static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
12939                                        struct dma_fence *fence)
12940 {
12941         struct wait_rps_boost *wait;
12942
12943         if (!dma_fence_is_i915(fence))
12944                 return;
12945
12946         if (INTEL_GEN(to_i915(crtc->dev)) < 6)
12947                 return;
12948
12949         if (drm_crtc_vblank_get(crtc))
12950                 return;
12951
12952         wait = kmalloc(sizeof(*wait), GFP_KERNEL);
12953         if (!wait) {
12954                 drm_crtc_vblank_put(crtc);
12955                 return;
12956         }
12957
12958         wait->request = to_request(dma_fence_get(fence));
12959         wait->crtc = crtc;
12960
12961         wait->wait.func = do_rps_boost;
12962         wait->wait.flags = 0;
12963
12964         add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
12965 }
12966
12967 static int intel_plane_pin_fb(struct intel_plane_state *plane_state)
12968 {
12969         struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
12970         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
12971         struct drm_framebuffer *fb = plane_state->base.fb;
12972         struct i915_vma *vma;
12973
12974         if (plane->id == PLANE_CURSOR &&
12975             INTEL_INFO(dev_priv)->cursor_needs_physical) {
12976                 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
12977                 const int align = intel_cursor_alignment(dev_priv);
12978                 int err;
12979
12980                 err = i915_gem_object_attach_phys(obj, align);
12981                 if (err)
12982                         return err;
12983         }
12984
12985         vma = intel_pin_and_fence_fb_obj(fb,
12986                                          plane_state->base.rotation,
12987                                          intel_plane_uses_fence(plane_state),
12988                                          &plane_state->flags);
12989         if (IS_ERR(vma))
12990                 return PTR_ERR(vma);
12991
12992         plane_state->vma = vma;
12993
12994         return 0;
12995 }
12996
12997 static void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
12998 {
12999         struct i915_vma *vma;
13000
13001         vma = fetch_and_zero(&old_plane_state->vma);
13002         if (vma)
13003                 intel_unpin_fb_vma(vma, old_plane_state->flags);
13004 }
13005
13006 static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj)
13007 {
13008         struct i915_sched_attr attr = {
13009                 .priority = I915_PRIORITY_DISPLAY,
13010         };
13011
13012         i915_gem_object_wait_priority(obj, 0, &attr);
13013 }
13014
13015 /**
13016  * intel_prepare_plane_fb - Prepare fb for usage on plane
13017  * @plane: drm plane to prepare for
13018  * @new_state: the plane state being prepared
13019  *
13020  * Prepares a framebuffer for usage on a display plane.  Generally this
13021  * involves pinning the underlying object and updating the frontbuffer tracking
13022  * bits.  Some older platforms need special physical address handling for
13023  * cursor planes.
13024  *
13025  * Must be called with struct_mutex held.
13026  *
13027  * Returns 0 on success, negative error code on failure.
13028  */
13029 int
13030 intel_prepare_plane_fb(struct drm_plane *plane,
13031                        struct drm_plane_state *new_state)
13032 {
13033         struct intel_atomic_state *intel_state =
13034                 to_intel_atomic_state(new_state->state);
13035         struct drm_i915_private *dev_priv = to_i915(plane->dev);
13036         struct drm_framebuffer *fb = new_state->fb;
13037         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
13038         struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
13039         int ret;
13040
13041         if (old_obj) {
13042                 struct drm_crtc_state *crtc_state =
13043                         drm_atomic_get_new_crtc_state(new_state->state,
13044                                                       plane->state->crtc);
13045
13046                 /* Big Hammer, we also need to ensure that any pending
13047                  * MI_WAIT_FOR_EVENT inside a user batch buffer on the
13048                  * current scanout is retired before unpinning the old
13049                  * framebuffer. Note that we rely on userspace rendering
13050                  * into the buffer attached to the pipe they are waiting
13051                  * on. If not, userspace generates a GPU hang with IPEHR
13052                  * point to the MI_WAIT_FOR_EVENT.
13053                  *
13054                  * This should only fail upon a hung GPU, in which case we
13055                  * can safely continue.
13056                  */
13057                 if (needs_modeset(crtc_state)) {
13058                         ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
13059                                                               old_obj->resv, NULL,
13060                                                               false, 0,
13061                                                               GFP_KERNEL);
13062                         if (ret < 0)
13063                                 return ret;
13064                 }
13065         }
13066
13067         if (new_state->fence) { /* explicit fencing */
13068                 ret = i915_sw_fence_await_dma_fence(&intel_state->commit_ready,
13069                                                     new_state->fence,
13070                                                     I915_FENCE_TIMEOUT,
13071                                                     GFP_KERNEL);
13072                 if (ret < 0)
13073                         return ret;
13074         }
13075
13076         if (!obj)
13077                 return 0;
13078
13079         ret = i915_gem_object_pin_pages(obj);
13080         if (ret)
13081                 return ret;
13082
13083         ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
13084         if (ret) {
13085                 i915_gem_object_unpin_pages(obj);
13086                 return ret;
13087         }
13088
13089         ret = intel_plane_pin_fb(to_intel_plane_state(new_state));
13090
13091         fb_obj_bump_render_priority(obj);
13092
13093         mutex_unlock(&dev_priv->drm.struct_mutex);
13094         i915_gem_object_unpin_pages(obj);
13095         if (ret)
13096                 return ret;
13097
13098         intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
13099
13100         if (!new_state->fence) { /* implicit fencing */
13101                 struct dma_fence *fence;
13102
13103                 ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
13104                                                       obj->resv, NULL,
13105                                                       false, I915_FENCE_TIMEOUT,
13106                                                       GFP_KERNEL);
13107                 if (ret < 0)
13108                         return ret;
13109
13110                 fence = reservation_object_get_excl_rcu(obj->resv);
13111                 if (fence) {
13112                         add_rps_boost_after_vblank(new_state->crtc, fence);
13113                         dma_fence_put(fence);
13114                 }
13115         } else {
13116                 add_rps_boost_after_vblank(new_state->crtc, new_state->fence);
13117         }
13118
13119         /*
13120          * We declare pageflips to be interactive and so merit a small bias
13121          * towards upclocking to deliver the frame on time. By only changing
13122          * the RPS thresholds to sample more regularly and aim for higher
13123          * clocks we can hopefully deliver low power workloads (like kodi)
13124          * that are not quite steady state without resorting to forcing
13125          * maximum clocks following a vblank miss (see do_rps_boost()).
13126          */
13127         if (!intel_state->rps_interactive) {
13128                 intel_rps_mark_interactive(dev_priv, true);
13129                 intel_state->rps_interactive = true;
13130         }
13131
13132         return 0;
13133 }
13134
13135 /**
13136  * intel_cleanup_plane_fb - Cleans up an fb after plane use
13137  * @plane: drm plane to clean up for
13138  * @old_state: the state from the previous modeset
13139  *
13140  * Cleans up a framebuffer that has just been removed from a plane.
13141  *
13142  * Must be called with struct_mutex held.
13143  */
13144 void
13145 intel_cleanup_plane_fb(struct drm_plane *plane,
13146                        struct drm_plane_state *old_state)
13147 {
13148         struct intel_atomic_state *intel_state =
13149                 to_intel_atomic_state(old_state->state);
13150         struct drm_i915_private *dev_priv = to_i915(plane->dev);
13151
13152         if (intel_state->rps_interactive) {
13153                 intel_rps_mark_interactive(dev_priv, false);
13154                 intel_state->rps_interactive = false;
13155         }
13156
13157         /* Should only be called after a successful intel_prepare_plane_fb()! */
13158         mutex_lock(&dev_priv->drm.struct_mutex);
13159         intel_plane_unpin_fb(to_intel_plane_state(old_state));
13160         mutex_unlock(&dev_priv->drm.struct_mutex);
13161 }
13162
13163 int
13164 skl_max_scale(struct intel_crtc *intel_crtc,
13165               struct intel_crtc_state *crtc_state,
13166               uint32_t pixel_format)
13167 {
13168         struct drm_i915_private *dev_priv;
13169         int max_scale, mult;
13170         int crtc_clock, max_dotclk, tmpclk1, tmpclk2;
13171
13172         if (!intel_crtc || !crtc_state->base.enable)
13173                 return DRM_PLANE_HELPER_NO_SCALING;
13174
13175         dev_priv = to_i915(intel_crtc->base.dev);
13176
13177         crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
13178         max_dotclk = to_intel_atomic_state(crtc_state->base.state)->cdclk.logical.cdclk;
13179
13180         if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10)
13181                 max_dotclk *= 2;
13182
13183         if (WARN_ON_ONCE(!crtc_clock || max_dotclk < crtc_clock))
13184                 return DRM_PLANE_HELPER_NO_SCALING;
13185
13186         /*
13187          * skl max scale is lower of:
13188          *    close to 3 but not 3, -1 is for that purpose
13189          *            or
13190          *    cdclk/crtc_clock
13191          */
13192         mult = pixel_format == DRM_FORMAT_NV12 ? 2 : 3;
13193         tmpclk1 = (1 << 16) * mult - 1;
13194         tmpclk2 = (1 << 8) * ((max_dotclk << 8) / crtc_clock);
13195         max_scale = min(tmpclk1, tmpclk2);
13196
13197         return max_scale;
13198 }
13199
13200 static int
13201 intel_check_primary_plane(struct intel_crtc_state *crtc_state,
13202                           struct intel_plane_state *state)
13203 {
13204         struct intel_plane *plane = to_intel_plane(state->base.plane);
13205         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
13206         struct drm_crtc *crtc = state->base.crtc;
13207         int min_scale = DRM_PLANE_HELPER_NO_SCALING;
13208         int max_scale = DRM_PLANE_HELPER_NO_SCALING;
13209         bool can_position = false;
13210         int ret;
13211         uint32_t pixel_format = 0;
13212
13213         if (INTEL_GEN(dev_priv) >= 9) {
13214                 /* use scaler when colorkey is not required */
13215                 if (!state->ckey.flags) {
13216                         min_scale = 1;
13217                         if (state->base.fb)
13218                                 pixel_format = state->base.fb->format->format;
13219                         max_scale = skl_max_scale(to_intel_crtc(crtc),
13220                                                   crtc_state, pixel_format);
13221                 }
13222                 can_position = true;
13223         }
13224
13225         ret = drm_atomic_helper_check_plane_state(&state->base,
13226                                                   &crtc_state->base,
13227                                                   min_scale, max_scale,
13228                                                   can_position, true);
13229         if (ret)
13230                 return ret;
13231
13232         if (!state->base.fb)
13233                 return 0;
13234
13235         if (INTEL_GEN(dev_priv) >= 9) {
13236                 ret = skl_check_plane_surface(crtc_state, state);
13237                 if (ret)
13238                         return ret;
13239
13240                 state->ctl = skl_plane_ctl(crtc_state, state);
13241         } else {
13242                 ret = i9xx_check_plane_surface(state);
13243                 if (ret)
13244                         return ret;
13245
13246                 state->ctl = i9xx_plane_ctl(crtc_state, state);
13247         }
13248
13249         if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
13250                 state->color_ctl = glk_plane_color_ctl(crtc_state, state);
13251
13252         return 0;
13253 }
13254
13255 static void intel_begin_crtc_commit(struct drm_crtc *crtc,
13256                                     struct drm_crtc_state *old_crtc_state)
13257 {
13258         struct drm_device *dev = crtc->dev;
13259         struct drm_i915_private *dev_priv = to_i915(dev);
13260         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13261         struct intel_crtc_state *old_intel_cstate =
13262                 to_intel_crtc_state(old_crtc_state);
13263         struct intel_atomic_state *old_intel_state =
13264                 to_intel_atomic_state(old_crtc_state->state);
13265         struct intel_crtc_state *intel_cstate =
13266                 intel_atomic_get_new_crtc_state(old_intel_state, intel_crtc);
13267         bool modeset = needs_modeset(&intel_cstate->base);
13268
13269         if (!modeset &&
13270             (intel_cstate->base.color_mgmt_changed ||
13271              intel_cstate->update_pipe)) {
13272                 intel_color_set_csc(&intel_cstate->base);
13273                 intel_color_load_luts(&intel_cstate->base);
13274         }
13275
13276         /* Perform vblank evasion around commit operation */
13277         intel_pipe_update_start(intel_cstate);
13278
13279         if (modeset)
13280                 goto out;
13281
13282         if (intel_cstate->update_pipe)
13283                 intel_update_pipe_config(old_intel_cstate, intel_cstate);
13284         else if (INTEL_GEN(dev_priv) >= 9)
13285                 skl_detach_scalers(intel_crtc);
13286
13287 out:
13288         if (dev_priv->display.atomic_update_watermarks)
13289                 dev_priv->display.atomic_update_watermarks(old_intel_state,
13290                                                            intel_cstate);
13291 }
13292
13293 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
13294                                   struct intel_crtc_state *crtc_state)
13295 {
13296         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13297
13298         if (!IS_GEN2(dev_priv))
13299                 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
13300
13301         if (crtc_state->has_pch_encoder) {
13302                 enum pipe pch_transcoder =
13303                         intel_crtc_pch_transcoder(crtc);
13304
13305                 intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
13306         }
13307 }
13308
13309 static void intel_finish_crtc_commit(struct drm_crtc *crtc,
13310                                      struct drm_crtc_state *old_crtc_state)
13311 {
13312         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13313         struct intel_atomic_state *old_intel_state =
13314                 to_intel_atomic_state(old_crtc_state->state);
13315         struct intel_crtc_state *new_crtc_state =
13316                 intel_atomic_get_new_crtc_state(old_intel_state, intel_crtc);
13317
13318         intel_pipe_update_end(new_crtc_state);
13319
13320         if (new_crtc_state->update_pipe &&
13321             !needs_modeset(&new_crtc_state->base) &&
13322             old_crtc_state->mode.private_flags & I915_MODE_FLAG_INHERITED)
13323                 intel_crtc_arm_fifo_underrun(intel_crtc, new_crtc_state);
13324 }
13325
13326 /**
13327  * intel_plane_destroy - destroy a plane
13328  * @plane: plane to destroy
13329  *
13330  * Common destruction function for all types of planes (primary, cursor,
13331  * sprite).
13332  */
13333 void intel_plane_destroy(struct drm_plane *plane)
13334 {
13335         drm_plane_cleanup(plane);
13336         kfree(to_intel_plane(plane));
13337 }
13338
13339 static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane,
13340                                             u32 format, u64 modifier)
13341 {
13342         switch (modifier) {
13343         case DRM_FORMAT_MOD_LINEAR:
13344         case I915_FORMAT_MOD_X_TILED:
13345                 break;
13346         default:
13347                 return false;
13348         }
13349
13350         switch (format) {
13351         case DRM_FORMAT_C8:
13352         case DRM_FORMAT_RGB565:
13353         case DRM_FORMAT_XRGB1555:
13354         case DRM_FORMAT_XRGB8888:
13355                 return modifier == DRM_FORMAT_MOD_LINEAR ||
13356                         modifier == I915_FORMAT_MOD_X_TILED;
13357         default:
13358                 return false;
13359         }
13360 }
13361
13362 static bool i965_plane_format_mod_supported(struct drm_plane *_plane,
13363                                             u32 format, u64 modifier)
13364 {
13365         switch (modifier) {
13366         case DRM_FORMAT_MOD_LINEAR:
13367         case I915_FORMAT_MOD_X_TILED:
13368                 break;
13369         default:
13370                 return false;
13371         }
13372
13373         switch (format) {
13374         case DRM_FORMAT_C8:
13375         case DRM_FORMAT_RGB565:
13376         case DRM_FORMAT_XRGB8888:
13377         case DRM_FORMAT_XBGR8888:
13378         case DRM_FORMAT_XRGB2101010:
13379         case DRM_FORMAT_XBGR2101010:
13380                 return modifier == DRM_FORMAT_MOD_LINEAR ||
13381                         modifier == I915_FORMAT_MOD_X_TILED;
13382         default:
13383                 return false;
13384         }
13385 }
13386
13387 static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
13388                                            u32 format, u64 modifier)
13389 {
13390         struct intel_plane *plane = to_intel_plane(_plane);
13391
13392         switch (modifier) {
13393         case DRM_FORMAT_MOD_LINEAR:
13394         case I915_FORMAT_MOD_X_TILED:
13395         case I915_FORMAT_MOD_Y_TILED:
13396         case I915_FORMAT_MOD_Yf_TILED:
13397                 break;
13398         case I915_FORMAT_MOD_Y_TILED_CCS:
13399         case I915_FORMAT_MOD_Yf_TILED_CCS:
13400                 if (!plane->has_ccs)
13401                         return false;
13402                 break;
13403         default:
13404                 return false;
13405         }
13406
13407         switch (format) {
13408         case DRM_FORMAT_XRGB8888:
13409         case DRM_FORMAT_XBGR8888:
13410         case DRM_FORMAT_ARGB8888:
13411         case DRM_FORMAT_ABGR8888:
13412                 if (is_ccs_modifier(modifier))
13413                         return true;
13414                 /* fall through */
13415         case DRM_FORMAT_RGB565:
13416         case DRM_FORMAT_XRGB2101010:
13417         case DRM_FORMAT_XBGR2101010:
13418         case DRM_FORMAT_YUYV:
13419         case DRM_FORMAT_YVYU:
13420         case DRM_FORMAT_UYVY:
13421         case DRM_FORMAT_VYUY:
13422         case DRM_FORMAT_NV12:
13423                 if (modifier == I915_FORMAT_MOD_Yf_TILED)
13424                         return true;
13425                 /* fall through */
13426         case DRM_FORMAT_C8:
13427                 if (modifier == DRM_FORMAT_MOD_LINEAR ||
13428                     modifier == I915_FORMAT_MOD_X_TILED ||
13429                     modifier == I915_FORMAT_MOD_Y_TILED)
13430                         return true;
13431                 /* fall through */
13432         default:
13433                 return false;
13434         }
13435 }
13436
13437 static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
13438                                               u32 format, u64 modifier)
13439 {
13440         return modifier == DRM_FORMAT_MOD_LINEAR &&
13441                 format == DRM_FORMAT_ARGB8888;
13442 }
13443
13444 static struct drm_plane_funcs skl_plane_funcs = {
13445         .update_plane = drm_atomic_helper_update_plane,
13446         .disable_plane = drm_atomic_helper_disable_plane,
13447         .destroy = intel_plane_destroy,
13448         .atomic_get_property = intel_plane_atomic_get_property,
13449         .atomic_set_property = intel_plane_atomic_set_property,
13450         .atomic_duplicate_state = intel_plane_duplicate_state,
13451         .atomic_destroy_state = intel_plane_destroy_state,
13452         .format_mod_supported = skl_plane_format_mod_supported,
13453 };
13454
13455 static struct drm_plane_funcs i965_plane_funcs = {
13456         .update_plane = drm_atomic_helper_update_plane,
13457         .disable_plane = drm_atomic_helper_disable_plane,
13458         .destroy = intel_plane_destroy,
13459         .atomic_get_property = intel_plane_atomic_get_property,
13460         .atomic_set_property = intel_plane_atomic_set_property,
13461         .atomic_duplicate_state = intel_plane_duplicate_state,
13462         .atomic_destroy_state = intel_plane_destroy_state,
13463         .format_mod_supported = i965_plane_format_mod_supported,
13464 };
13465
13466 static struct drm_plane_funcs i8xx_plane_funcs = {
13467         .update_plane = drm_atomic_helper_update_plane,
13468         .disable_plane = drm_atomic_helper_disable_plane,
13469         .destroy = intel_plane_destroy,
13470         .atomic_get_property = intel_plane_atomic_get_property,
13471         .atomic_set_property = intel_plane_atomic_set_property,
13472         .atomic_duplicate_state = intel_plane_duplicate_state,
13473         .atomic_destroy_state = intel_plane_destroy_state,
13474         .format_mod_supported = i8xx_plane_format_mod_supported,
13475 };
13476
13477 static int
13478 intel_legacy_cursor_update(struct drm_plane *plane,
13479                            struct drm_crtc *crtc,
13480                            struct drm_framebuffer *fb,
13481                            int crtc_x, int crtc_y,
13482                            unsigned int crtc_w, unsigned int crtc_h,
13483                            uint32_t src_x, uint32_t src_y,
13484                            uint32_t src_w, uint32_t src_h,
13485                            struct drm_modeset_acquire_ctx *ctx)
13486 {
13487         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
13488         int ret;
13489         struct drm_plane_state *old_plane_state, *new_plane_state;
13490         struct intel_plane *intel_plane = to_intel_plane(plane);
13491         struct drm_framebuffer *old_fb;
13492         struct drm_crtc_state *crtc_state = crtc->state;
13493
13494         /*
13495          * When crtc is inactive or there is a modeset pending,
13496          * wait for it to complete in the slowpath
13497          */
13498         if (!crtc_state->active || needs_modeset(crtc_state) ||
13499             to_intel_crtc_state(crtc_state)->update_pipe)
13500                 goto slow;
13501
13502         old_plane_state = plane->state;
13503         /*
13504          * Don't do an async update if there is an outstanding commit modifying
13505          * the plane.  This prevents our async update's changes from getting
13506          * overridden by a previous synchronous update's state.
13507          */
13508         if (old_plane_state->commit &&
13509             !try_wait_for_completion(&old_plane_state->commit->hw_done))
13510                 goto slow;
13511
13512         /*
13513          * If any parameters change that may affect watermarks,
13514          * take the slowpath. Only changing fb or position should be
13515          * in the fastpath.
13516          */
13517         if (old_plane_state->crtc != crtc ||
13518             old_plane_state->src_w != src_w ||
13519             old_plane_state->src_h != src_h ||
13520             old_plane_state->crtc_w != crtc_w ||
13521             old_plane_state->crtc_h != crtc_h ||
13522             !old_plane_state->fb != !fb)
13523                 goto slow;
13524
13525         new_plane_state = intel_plane_duplicate_state(plane);
13526         if (!new_plane_state)
13527                 return -ENOMEM;
13528
13529         drm_atomic_set_fb_for_plane(new_plane_state, fb);
13530
13531         new_plane_state->src_x = src_x;
13532         new_plane_state->src_y = src_y;
13533         new_plane_state->src_w = src_w;
13534         new_plane_state->src_h = src_h;
13535         new_plane_state->crtc_x = crtc_x;
13536         new_plane_state->crtc_y = crtc_y;
13537         new_plane_state->crtc_w = crtc_w;
13538         new_plane_state->crtc_h = crtc_h;
13539
13540         ret = intel_plane_atomic_check_with_state(to_intel_crtc_state(crtc->state),
13541                                                   to_intel_crtc_state(crtc->state), /* FIXME need a new crtc state? */
13542                                                   to_intel_plane_state(plane->state),
13543                                                   to_intel_plane_state(new_plane_state));
13544         if (ret)
13545                 goto out_free;
13546
13547         ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
13548         if (ret)
13549                 goto out_free;
13550
13551         ret = intel_plane_pin_fb(to_intel_plane_state(new_plane_state));
13552         if (ret)
13553                 goto out_unlock;
13554
13555         intel_fb_obj_flush(intel_fb_obj(fb), ORIGIN_FLIP);
13556
13557         old_fb = old_plane_state->fb;
13558         i915_gem_track_fb(intel_fb_obj(old_fb), intel_fb_obj(fb),
13559                           intel_plane->frontbuffer_bit);
13560
13561         /* Swap plane state */
13562         plane->state = new_plane_state;
13563
13564         if (plane->state->visible) {
13565                 trace_intel_update_plane(plane, to_intel_crtc(crtc));
13566                 intel_plane->update_plane(intel_plane,
13567                                           to_intel_crtc_state(crtc->state),
13568                                           to_intel_plane_state(plane->state));
13569         } else {
13570                 trace_intel_disable_plane(plane, to_intel_crtc(crtc));
13571                 intel_plane->disable_plane(intel_plane, to_intel_crtc(crtc));
13572         }
13573
13574         intel_plane_unpin_fb(to_intel_plane_state(old_plane_state));
13575
13576 out_unlock:
13577         mutex_unlock(&dev_priv->drm.struct_mutex);
13578 out_free:
13579         if (ret)
13580                 intel_plane_destroy_state(plane, new_plane_state);
13581         else
13582                 intel_plane_destroy_state(plane, old_plane_state);
13583         return ret;
13584
13585 slow:
13586         return drm_atomic_helper_update_plane(plane, crtc, fb,
13587                                               crtc_x, crtc_y, crtc_w, crtc_h,
13588                                               src_x, src_y, src_w, src_h, ctx);
13589 }
13590
13591 static const struct drm_plane_funcs intel_cursor_plane_funcs = {
13592         .update_plane = intel_legacy_cursor_update,
13593         .disable_plane = drm_atomic_helper_disable_plane,
13594         .destroy = intel_plane_destroy,
13595         .atomic_get_property = intel_plane_atomic_get_property,
13596         .atomic_set_property = intel_plane_atomic_set_property,
13597         .atomic_duplicate_state = intel_plane_duplicate_state,
13598         .atomic_destroy_state = intel_plane_destroy_state,
13599         .format_mod_supported = intel_cursor_format_mod_supported,
13600 };
13601
13602 static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv,
13603                                enum i9xx_plane_id i9xx_plane)
13604 {
13605         if (!HAS_FBC(dev_priv))
13606                 return false;
13607
13608         if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
13609                 return i9xx_plane == PLANE_A; /* tied to pipe A */
13610         else if (IS_IVYBRIDGE(dev_priv))
13611                 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B ||
13612                         i9xx_plane == PLANE_C;
13613         else if (INTEL_GEN(dev_priv) >= 4)
13614                 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B;
13615         else
13616                 return i9xx_plane == PLANE_A;
13617 }
13618
13619 static bool skl_plane_has_fbc(struct drm_i915_private *dev_priv,
13620                               enum pipe pipe, enum plane_id plane_id)
13621 {
13622         if (!HAS_FBC(dev_priv))
13623                 return false;
13624
13625         return pipe == PIPE_A && plane_id == PLANE_PRIMARY;
13626 }
13627
13628 bool skl_plane_has_planar(struct drm_i915_private *dev_priv,
13629                           enum pipe pipe, enum plane_id plane_id)
13630 {
13631         /*
13632          * FIXME: ICL requires two hardware planes for scanning out NV12
13633          * framebuffers. Do not advertize support until this is implemented.
13634          */
13635         if (INTEL_GEN(dev_priv) >= 11)
13636                 return false;
13637
13638         if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
13639                 return false;
13640
13641         if (INTEL_GEN(dev_priv) == 9 && !IS_GEMINILAKE(dev_priv) && pipe == PIPE_C)
13642                 return false;
13643
13644         if (plane_id != PLANE_PRIMARY && plane_id != PLANE_SPRITE0)
13645                 return false;
13646
13647         return true;
13648 }
13649
13650 static struct intel_plane *
13651 intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
13652 {
13653         struct intel_plane *primary = NULL;
13654         struct intel_plane_state *state = NULL;
13655         const struct drm_plane_funcs *plane_funcs;
13656         const uint32_t *intel_primary_formats;
13657         unsigned int supported_rotations;
13658         unsigned int num_formats;
13659         const uint64_t *modifiers;
13660         int ret;
13661
13662         primary = kzalloc(sizeof(*primary), GFP_KERNEL);
13663         if (!primary) {
13664                 ret = -ENOMEM;
13665                 goto fail;
13666         }
13667
13668         state = intel_create_plane_state(&primary->base);
13669         if (!state) {
13670                 ret = -ENOMEM;
13671                 goto fail;
13672         }
13673
13674         primary->base.state = &state->base;
13675
13676         primary->can_scale = false;
13677         primary->max_downscale = 1;
13678         if (INTEL_GEN(dev_priv) >= 9) {
13679                 primary->can_scale = true;
13680                 state->scaler_id = -1;
13681         }
13682         primary->pipe = pipe;
13683         /*
13684          * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
13685          * port is hooked to pipe B. Hence we want plane A feeding pipe B.
13686          */
13687         if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4)
13688                 primary->i9xx_plane = (enum i9xx_plane_id) !pipe;
13689         else
13690                 primary->i9xx_plane = (enum i9xx_plane_id) pipe;
13691         primary->id = PLANE_PRIMARY;
13692         primary->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, primary->id);
13693
13694         if (INTEL_GEN(dev_priv) >= 9)
13695                 primary->has_fbc = skl_plane_has_fbc(dev_priv,
13696                                                      primary->pipe,
13697                                                      primary->id);
13698         else
13699                 primary->has_fbc = i9xx_plane_has_fbc(dev_priv,
13700                                                       primary->i9xx_plane);
13701
13702         if (primary->has_fbc) {
13703                 struct intel_fbc *fbc = &dev_priv->fbc;
13704
13705                 fbc->possible_framebuffer_bits |= primary->frontbuffer_bit;
13706         }
13707
13708         primary->check_plane = intel_check_primary_plane;
13709
13710         if (INTEL_GEN(dev_priv) >= 9) {
13711                 primary->has_ccs = skl_plane_has_ccs(dev_priv, pipe,
13712                                                      PLANE_PRIMARY);
13713
13714                 if (skl_plane_has_planar(dev_priv, pipe, PLANE_PRIMARY)) {
13715                         intel_primary_formats = skl_pri_planar_formats;
13716                         num_formats = ARRAY_SIZE(skl_pri_planar_formats);
13717                 } else {
13718                         intel_primary_formats = skl_primary_formats;
13719                         num_formats = ARRAY_SIZE(skl_primary_formats);
13720                 }
13721
13722                 if (primary->has_ccs)
13723                         modifiers = skl_format_modifiers_ccs;
13724                 else
13725                         modifiers = skl_format_modifiers_noccs;
13726
13727                 primary->update_plane = skl_update_plane;
13728                 primary->disable_plane = skl_disable_plane;
13729                 primary->get_hw_state = skl_plane_get_hw_state;
13730
13731                 plane_funcs = &skl_plane_funcs;
13732         } else if (INTEL_GEN(dev_priv) >= 4) {
13733                 intel_primary_formats = i965_primary_formats;
13734                 num_formats = ARRAY_SIZE(i965_primary_formats);
13735                 modifiers = i9xx_format_modifiers;
13736
13737                 primary->update_plane = i9xx_update_plane;
13738                 primary->disable_plane = i9xx_disable_plane;
13739                 primary->get_hw_state = i9xx_plane_get_hw_state;
13740
13741                 plane_funcs = &i965_plane_funcs;
13742         } else {
13743                 intel_primary_formats = i8xx_primary_formats;
13744                 num_formats = ARRAY_SIZE(i8xx_primary_formats);
13745                 modifiers = i9xx_format_modifiers;
13746
13747                 primary->update_plane = i9xx_update_plane;
13748                 primary->disable_plane = i9xx_disable_plane;
13749                 primary->get_hw_state = i9xx_plane_get_hw_state;
13750
13751                 plane_funcs = &i8xx_plane_funcs;
13752         }
13753
13754         if (INTEL_GEN(dev_priv) >= 9)
13755                 ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
13756                                                0, plane_funcs,
13757                                                intel_primary_formats, num_formats,
13758                                                modifiers,
13759                                                DRM_PLANE_TYPE_PRIMARY,
13760                                                "plane 1%c", pipe_name(pipe));
13761         else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
13762                 ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
13763                                                0, plane_funcs,
13764                                                intel_primary_formats, num_formats,
13765                                                modifiers,
13766                                                DRM_PLANE_TYPE_PRIMARY,
13767                                                "primary %c", pipe_name(pipe));
13768         else
13769                 ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
13770                                                0, plane_funcs,
13771                                                intel_primary_formats, num_formats,
13772                                                modifiers,
13773                                                DRM_PLANE_TYPE_PRIMARY,
13774                                                "plane %c",
13775                                                plane_name(primary->i9xx_plane));
13776         if (ret)
13777                 goto fail;
13778
13779         if (INTEL_GEN(dev_priv) >= 10) {
13780                 supported_rotations =
13781                         DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
13782                         DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270 |
13783                         DRM_MODE_REFLECT_X;
13784         } else if (INTEL_GEN(dev_priv) >= 9) {
13785                 supported_rotations =
13786                         DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
13787                         DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
13788         } else if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
13789                 supported_rotations =
13790                         DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
13791                         DRM_MODE_REFLECT_X;
13792         } else if (INTEL_GEN(dev_priv) >= 4) {
13793                 supported_rotations =
13794                         DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
13795         } else {
13796                 supported_rotations = DRM_MODE_ROTATE_0;
13797         }
13798
13799         if (INTEL_GEN(dev_priv) >= 4)
13800                 drm_plane_create_rotation_property(&primary->base,
13801                                                    DRM_MODE_ROTATE_0,
13802                                                    supported_rotations);
13803
13804         if (INTEL_GEN(dev_priv) >= 9)
13805                 drm_plane_create_color_properties(&primary->base,
13806                                                   BIT(DRM_COLOR_YCBCR_BT601) |
13807                                                   BIT(DRM_COLOR_YCBCR_BT709),
13808                                                   BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
13809                                                   BIT(DRM_COLOR_YCBCR_FULL_RANGE),
13810                                                   DRM_COLOR_YCBCR_BT709,
13811                                                   DRM_COLOR_YCBCR_LIMITED_RANGE);
13812
13813         drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs);
13814
13815         return primary;
13816
13817 fail:
13818         kfree(state);
13819         kfree(primary);
13820
13821         return ERR_PTR(ret);
13822 }
13823
13824 static struct intel_plane *
13825 intel_cursor_plane_create(struct drm_i915_private *dev_priv,
13826                           enum pipe pipe)
13827 {
13828         struct intel_plane *cursor = NULL;
13829         struct intel_plane_state *state = NULL;
13830         int ret;
13831
13832         cursor = kzalloc(sizeof(*cursor), GFP_KERNEL);
13833         if (!cursor) {
13834                 ret = -ENOMEM;
13835                 goto fail;
13836         }
13837
13838         state = intel_create_plane_state(&cursor->base);
13839         if (!state) {
13840                 ret = -ENOMEM;
13841                 goto fail;
13842         }
13843
13844         cursor->base.state = &state->base;
13845
13846         cursor->can_scale = false;
13847         cursor->max_downscale = 1;
13848         cursor->pipe = pipe;
13849         cursor->i9xx_plane = (enum i9xx_plane_id) pipe;
13850         cursor->id = PLANE_CURSOR;
13851         cursor->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, cursor->id);
13852
13853         if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
13854                 cursor->update_plane = i845_update_cursor;
13855                 cursor->disable_plane = i845_disable_cursor;
13856                 cursor->get_hw_state = i845_cursor_get_hw_state;
13857                 cursor->check_plane = i845_check_cursor;
13858         } else {
13859                 cursor->update_plane = i9xx_update_cursor;
13860                 cursor->disable_plane = i9xx_disable_cursor;
13861                 cursor->get_hw_state = i9xx_cursor_get_hw_state;
13862                 cursor->check_plane = i9xx_check_cursor;
13863         }
13864
13865         cursor->cursor.base = ~0;
13866         cursor->cursor.cntl = ~0;
13867
13868         if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv))
13869                 cursor->cursor.size = ~0;
13870
13871         ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
13872                                        0, &intel_cursor_plane_funcs,
13873                                        intel_cursor_formats,
13874                                        ARRAY_SIZE(intel_cursor_formats),
13875                                        cursor_format_modifiers,
13876                                        DRM_PLANE_TYPE_CURSOR,
13877                                        "cursor %c", pipe_name(pipe));
13878         if (ret)
13879                 goto fail;
13880
13881         if (INTEL_GEN(dev_priv) >= 4)
13882                 drm_plane_create_rotation_property(&cursor->base,
13883                                                    DRM_MODE_ROTATE_0,
13884                                                    DRM_MODE_ROTATE_0 |
13885                                                    DRM_MODE_ROTATE_180);
13886
13887         if (INTEL_GEN(dev_priv) >= 9)
13888                 state->scaler_id = -1;
13889
13890         drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
13891
13892         return cursor;
13893
13894 fail:
13895         kfree(state);
13896         kfree(cursor);
13897
13898         return ERR_PTR(ret);
13899 }
13900
13901 static void intel_crtc_init_scalers(struct intel_crtc *crtc,
13902                                     struct intel_crtc_state *crtc_state)
13903 {
13904         struct intel_crtc_scaler_state *scaler_state =
13905                 &crtc_state->scaler_state;
13906         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13907         int i;
13908
13909         crtc->num_scalers = dev_priv->info.num_scalers[crtc->pipe];
13910         if (!crtc->num_scalers)
13911                 return;
13912
13913         for (i = 0; i < crtc->num_scalers; i++) {
13914                 struct intel_scaler *scaler = &scaler_state->scalers[i];
13915
13916                 scaler->in_use = 0;
13917                 scaler->mode = PS_SCALER_MODE_DYN;
13918         }
13919
13920         scaler_state->scaler_id = -1;
13921 }
13922
13923 static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
13924 {
13925         struct intel_crtc *intel_crtc;
13926         struct intel_crtc_state *crtc_state = NULL;
13927         struct intel_plane *primary = NULL;
13928         struct intel_plane *cursor = NULL;
13929         int sprite, ret;
13930
13931         intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
13932         if (!intel_crtc)
13933                 return -ENOMEM;
13934
13935         crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
13936         if (!crtc_state) {
13937                 ret = -ENOMEM;
13938                 goto fail;
13939         }
13940         intel_crtc->config = crtc_state;
13941         intel_crtc->base.state = &crtc_state->base;
13942         crtc_state->base.crtc = &intel_crtc->base;
13943
13944         primary = intel_primary_plane_create(dev_priv, pipe);
13945         if (IS_ERR(primary)) {
13946                 ret = PTR_ERR(primary);
13947                 goto fail;
13948         }
13949         intel_crtc->plane_ids_mask |= BIT(primary->id);
13950
13951         for_each_sprite(dev_priv, pipe, sprite) {
13952                 struct intel_plane *plane;
13953
13954                 plane = intel_sprite_plane_create(dev_priv, pipe, sprite);
13955                 if (IS_ERR(plane)) {
13956                         ret = PTR_ERR(plane);
13957                         goto fail;
13958                 }
13959                 intel_crtc->plane_ids_mask |= BIT(plane->id);
13960         }
13961
13962         cursor = intel_cursor_plane_create(dev_priv, pipe);
13963         if (IS_ERR(cursor)) {
13964                 ret = PTR_ERR(cursor);
13965                 goto fail;
13966         }
13967         intel_crtc->plane_ids_mask |= BIT(cursor->id);
13968
13969         ret = drm_crtc_init_with_planes(&dev_priv->drm, &intel_crtc->base,
13970                                         &primary->base, &cursor->base,
13971                                         &intel_crtc_funcs,
13972                                         "pipe %c", pipe_name(pipe));
13973         if (ret)
13974                 goto fail;
13975
13976         intel_crtc->pipe = pipe;
13977
13978         /* initialize shared scalers */
13979         intel_crtc_init_scalers(intel_crtc, crtc_state);
13980
13981         BUG_ON(pipe >= ARRAY_SIZE(dev_priv->pipe_to_crtc_mapping) ||
13982                dev_priv->pipe_to_crtc_mapping[pipe] != NULL);
13983         dev_priv->pipe_to_crtc_mapping[pipe] = intel_crtc;
13984
13985         if (INTEL_GEN(dev_priv) < 9) {
13986                 enum i9xx_plane_id i9xx_plane = primary->i9xx_plane;
13987
13988                 BUG_ON(i9xx_plane >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
13989                        dev_priv->plane_to_crtc_mapping[i9xx_plane] != NULL);
13990                 dev_priv->plane_to_crtc_mapping[i9xx_plane] = intel_crtc;
13991         }
13992
13993         drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
13994
13995         intel_color_init(&intel_crtc->base);
13996
13997         WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
13998
13999         return 0;
14000
14001 fail:
14002         /*
14003          * drm_mode_config_cleanup() will free up any
14004          * crtcs/planes already initialized.
14005          */
14006         kfree(crtc_state);
14007         kfree(intel_crtc);
14008
14009         return ret;
14010 }
14011
14012 enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
14013 {
14014         struct drm_device *dev = connector->base.dev;
14015
14016         WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
14017
14018         if (!connector->base.state->crtc)
14019                 return INVALID_PIPE;
14020
14021         return to_intel_crtc(connector->base.state->crtc)->pipe;
14022 }
14023
14024 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
14025                                       struct drm_file *file)
14026 {
14027         struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
14028         struct drm_crtc *drmmode_crtc;
14029         struct intel_crtc *crtc;
14030
14031         drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
14032         if (!drmmode_crtc)
14033                 return -ENOENT;
14034
14035         crtc = to_intel_crtc(drmmode_crtc);
14036         pipe_from_crtc_id->pipe = crtc->pipe;
14037
14038         return 0;
14039 }
14040
14041 static int intel_encoder_clones(struct intel_encoder *encoder)
14042 {
14043         struct drm_device *dev = encoder->base.dev;
14044         struct intel_encoder *source_encoder;
14045         int index_mask = 0;
14046         int entry = 0;
14047
14048         for_each_intel_encoder(dev, source_encoder) {
14049                 if (encoders_cloneable(encoder, source_encoder))
14050                         index_mask |= (1 << entry);
14051
14052                 entry++;
14053         }
14054
14055         return index_mask;
14056 }
14057
14058 static bool has_edp_a(struct drm_i915_private *dev_priv)
14059 {
14060         if (!IS_MOBILE(dev_priv))
14061                 return false;
14062
14063         if ((I915_READ(DP_A) & DP_DETECTED) == 0)
14064                 return false;
14065
14066         if (IS_GEN5(dev_priv) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
14067                 return false;
14068
14069         return true;
14070 }
14071
14072 static bool intel_crt_present(struct drm_i915_private *dev_priv)
14073 {
14074         if (INTEL_GEN(dev_priv) >= 9)
14075                 return false;
14076
14077         if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
14078                 return false;
14079
14080         if (IS_CHERRYVIEW(dev_priv))
14081                 return false;
14082
14083         if (HAS_PCH_LPT_H(dev_priv) &&
14084             I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
14085                 return false;
14086
14087         /* DDI E can't be used if DDI A requires 4 lanes */
14088         if (HAS_DDI(dev_priv) && I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
14089                 return false;
14090
14091         if (!dev_priv->vbt.int_crt_support)
14092                 return false;
14093
14094         return true;
14095 }
14096
14097 void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
14098 {
14099         int pps_num;
14100         int pps_idx;
14101
14102         if (HAS_DDI(dev_priv))
14103                 return;
14104         /*
14105          * This w/a is needed at least on CPT/PPT, but to be sure apply it
14106          * everywhere where registers can be write protected.
14107          */
14108         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
14109                 pps_num = 2;
14110         else
14111                 pps_num = 1;
14112
14113         for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
14114                 u32 val = I915_READ(PP_CONTROL(pps_idx));
14115
14116                 val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS;
14117                 I915_WRITE(PP_CONTROL(pps_idx), val);
14118         }
14119 }
14120
14121 static void intel_pps_init(struct drm_i915_private *dev_priv)
14122 {
14123         if (HAS_PCH_SPLIT(dev_priv) || IS_GEN9_LP(dev_priv))
14124                 dev_priv->pps_mmio_base = PCH_PPS_BASE;
14125         else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
14126                 dev_priv->pps_mmio_base = VLV_PPS_BASE;
14127         else
14128                 dev_priv->pps_mmio_base = PPS_BASE;
14129
14130         intel_pps_unlock_regs_wa(dev_priv);
14131 }
14132
14133 static void intel_setup_outputs(struct drm_i915_private *dev_priv)
14134 {
14135         struct intel_encoder *encoder;
14136         bool dpd_is_edp = false;
14137
14138         intel_pps_init(dev_priv);
14139
14140         if (INTEL_INFO(dev_priv)->num_pipes == 0)
14141                 return;
14142
14143         /*
14144          * intel_edp_init_connector() depends on this completing first, to
14145          * prevent the registeration of both eDP and LVDS and the incorrect
14146          * sharing of the PPS.
14147          */
14148         intel_lvds_init(dev_priv);
14149
14150         if (intel_crt_present(dev_priv))
14151                 intel_crt_init(dev_priv);
14152
14153         if (IS_ICELAKE(dev_priv)) {
14154                 intel_ddi_init(dev_priv, PORT_A);
14155                 intel_ddi_init(dev_priv, PORT_B);
14156                 intel_ddi_init(dev_priv, PORT_C);
14157                 intel_ddi_init(dev_priv, PORT_D);
14158                 intel_ddi_init(dev_priv, PORT_E);
14159                 intel_ddi_init(dev_priv, PORT_F);
14160         } else if (IS_GEN9_LP(dev_priv)) {
14161                 /*
14162                  * FIXME: Broxton doesn't support port detection via the
14163                  * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
14164                  * detect the ports.
14165                  */
14166                 intel_ddi_init(dev_priv, PORT_A);
14167                 intel_ddi_init(dev_priv, PORT_B);
14168                 intel_ddi_init(dev_priv, PORT_C);
14169
14170                 vlv_dsi_init(dev_priv);
14171         } else if (HAS_DDI(dev_priv)) {
14172                 int found;
14173
14174                 /*
14175                  * Haswell uses DDI functions to detect digital outputs.
14176                  * On SKL pre-D0 the strap isn't connected, so we assume
14177                  * it's there.
14178                  */
14179                 found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
14180                 /* WaIgnoreDDIAStrap: skl */
14181                 if (found || IS_GEN9_BC(dev_priv))
14182                         intel_ddi_init(dev_priv, PORT_A);
14183
14184                 /* DDI B, C, D, and F detection is indicated by the SFUSE_STRAP
14185                  * register */
14186                 found = I915_READ(SFUSE_STRAP);
14187
14188                 if (found & SFUSE_STRAP_DDIB_DETECTED)
14189                         intel_ddi_init(dev_priv, PORT_B);
14190                 if (found & SFUSE_STRAP_DDIC_DETECTED)
14191                         intel_ddi_init(dev_priv, PORT_C);
14192                 if (found & SFUSE_STRAP_DDID_DETECTED)
14193                         intel_ddi_init(dev_priv, PORT_D);
14194                 if (found & SFUSE_STRAP_DDIF_DETECTED)
14195                         intel_ddi_init(dev_priv, PORT_F);
14196                 /*
14197                  * On SKL we don't have a way to detect DDI-E so we rely on VBT.
14198                  */
14199                 if (IS_GEN9_BC(dev_priv) &&
14200                     (dev_priv->vbt.ddi_port_info[PORT_E].supports_dp ||
14201                      dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi ||
14202                      dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi))
14203                         intel_ddi_init(dev_priv, PORT_E);
14204
14205         } else if (HAS_PCH_SPLIT(dev_priv)) {
14206                 int found;
14207                 dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
14208
14209                 if (has_edp_a(dev_priv))
14210                         intel_dp_init(dev_priv, DP_A, PORT_A);
14211
14212                 if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
14213                         /* PCH SDVOB multiplex with HDMIB */
14214                         found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
14215                         if (!found)
14216                                 intel_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
14217                         if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
14218                                 intel_dp_init(dev_priv, PCH_DP_B, PORT_B);
14219                 }
14220
14221                 if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
14222                         intel_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
14223
14224                 if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
14225                         intel_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
14226
14227                 if (I915_READ(PCH_DP_C) & DP_DETECTED)
14228                         intel_dp_init(dev_priv, PCH_DP_C, PORT_C);
14229
14230                 if (I915_READ(PCH_DP_D) & DP_DETECTED)
14231                         intel_dp_init(dev_priv, PCH_DP_D, PORT_D);
14232         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
14233                 bool has_edp, has_port;
14234
14235                 /*
14236                  * The DP_DETECTED bit is the latched state of the DDC
14237                  * SDA pin at boot. However since eDP doesn't require DDC
14238                  * (no way to plug in a DP->HDMI dongle) the DDC pins for
14239                  * eDP ports may have been muxed to an alternate function.
14240                  * Thus we can't rely on the DP_DETECTED bit alone to detect
14241                  * eDP ports. Consult the VBT as well as DP_DETECTED to
14242                  * detect eDP ports.
14243                  *
14244                  * Sadly the straps seem to be missing sometimes even for HDMI
14245                  * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
14246                  * and VBT for the presence of the port. Additionally we can't
14247                  * trust the port type the VBT declares as we've seen at least
14248                  * HDMI ports that the VBT claim are DP or eDP.
14249                  */
14250                 has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
14251                 has_port = intel_bios_is_port_present(dev_priv, PORT_B);
14252                 if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
14253                         has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B);
14254                 if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
14255                         intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
14256
14257                 has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
14258                 has_port = intel_bios_is_port_present(dev_priv, PORT_C);
14259                 if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
14260                         has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C);
14261                 if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
14262                         intel_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
14263
14264                 if (IS_CHERRYVIEW(dev_priv)) {
14265                         /*
14266                          * eDP not supported on port D,
14267                          * so no need to worry about it
14268                          */
14269                         has_port = intel_bios_is_port_present(dev_priv, PORT_D);
14270                         if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port)
14271                                 intel_dp_init(dev_priv, CHV_DP_D, PORT_D);
14272                         if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port)
14273                                 intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
14274                 }
14275
14276                 vlv_dsi_init(dev_priv);
14277         } else if (!IS_GEN2(dev_priv) && !IS_PINEVIEW(dev_priv)) {
14278                 bool found = false;
14279
14280                 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
14281                         DRM_DEBUG_KMS("probing SDVOB\n");
14282                         found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
14283                         if (!found && IS_G4X(dev_priv)) {
14284                                 DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
14285                                 intel_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
14286                         }
14287
14288                         if (!found && IS_G4X(dev_priv))
14289                                 intel_dp_init(dev_priv, DP_B, PORT_B);
14290                 }
14291
14292                 /* Before G4X SDVOC doesn't have its own detect register */
14293
14294                 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
14295                         DRM_DEBUG_KMS("probing SDVOC\n");
14296                         found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
14297                 }
14298
14299                 if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
14300
14301                         if (IS_G4X(dev_priv)) {
14302                                 DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
14303                                 intel_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
14304                         }
14305                         if (IS_G4X(dev_priv))
14306                                 intel_dp_init(dev_priv, DP_C, PORT_C);
14307                 }
14308
14309                 if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED))
14310                         intel_dp_init(dev_priv, DP_D, PORT_D);
14311         } else if (IS_GEN2(dev_priv))
14312                 intel_dvo_init(dev_priv);
14313
14314         if (SUPPORTS_TV(dev_priv))
14315                 intel_tv_init(dev_priv);
14316
14317         intel_psr_init(dev_priv);
14318
14319         for_each_intel_encoder(&dev_priv->drm, encoder) {
14320                 encoder->base.possible_crtcs = encoder->crtc_mask;
14321                 encoder->base.possible_clones =
14322                         intel_encoder_clones(encoder);
14323         }
14324
14325         intel_init_pch_refclk(dev_priv);
14326
14327         drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
14328 }
14329
14330 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
14331 {
14332         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
14333         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
14334
14335         drm_framebuffer_cleanup(fb);
14336
14337         i915_gem_object_lock(obj);
14338         WARN_ON(!obj->framebuffer_references--);
14339         i915_gem_object_unlock(obj);
14340
14341         i915_gem_object_put(obj);
14342
14343         kfree(intel_fb);
14344 }
14345
14346 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
14347                                                 struct drm_file *file,
14348                                                 unsigned int *handle)
14349 {
14350         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
14351
14352         if (obj->userptr.mm) {
14353                 DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n");
14354                 return -EINVAL;
14355         }
14356
14357         return drm_gem_handle_create(file, &obj->base, handle);
14358 }
14359
14360 static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
14361                                         struct drm_file *file,
14362                                         unsigned flags, unsigned color,
14363                                         struct drm_clip_rect *clips,
14364                                         unsigned num_clips)
14365 {
14366         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
14367
14368         i915_gem_object_flush_if_display(obj);
14369         intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
14370
14371         return 0;
14372 }
14373
14374 static const struct drm_framebuffer_funcs intel_fb_funcs = {
14375         .destroy = intel_user_framebuffer_destroy,
14376         .create_handle = intel_user_framebuffer_create_handle,
14377         .dirty = intel_user_framebuffer_dirty,
14378 };
14379
14380 static
14381 u32 intel_fb_pitch_limit(struct drm_i915_private *dev_priv,
14382                          uint64_t fb_modifier, uint32_t pixel_format)
14383 {
14384         u32 gen = INTEL_GEN(dev_priv);
14385
14386         if (gen >= 9) {
14387                 int cpp = drm_format_plane_cpp(pixel_format, 0);
14388
14389                 /* "The stride in bytes must not exceed the of the size of 8K
14390                  *  pixels and 32K bytes."
14391                  */
14392                 return min(8192 * cpp, 32768);
14393         } else if (gen >= 5 && !HAS_GMCH_DISPLAY(dev_priv)) {
14394                 return 32*1024;
14395         } else if (gen >= 4) {
14396                 if (fb_modifier == I915_FORMAT_MOD_X_TILED)
14397                         return 16*1024;
14398                 else
14399                         return 32*1024;
14400         } else if (gen >= 3) {
14401                 if (fb_modifier == I915_FORMAT_MOD_X_TILED)
14402                         return 8*1024;
14403                 else
14404                         return 16*1024;
14405         } else {
14406                 /* XXX DSPC is limited to 4k tiled */
14407                 return 8*1024;
14408         }
14409 }
14410
14411 static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
14412                                   struct drm_i915_gem_object *obj,
14413                                   struct drm_mode_fb_cmd2 *mode_cmd)
14414 {
14415         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
14416         struct drm_framebuffer *fb = &intel_fb->base;
14417         struct drm_format_name_buf format_name;
14418         u32 pitch_limit;
14419         unsigned int tiling, stride;
14420         int ret = -EINVAL;
14421         int i;
14422
14423         i915_gem_object_lock(obj);
14424         obj->framebuffer_references++;
14425         tiling = i915_gem_object_get_tiling(obj);
14426         stride = i915_gem_object_get_stride(obj);
14427         i915_gem_object_unlock(obj);
14428
14429         if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
14430                 /*
14431                  * If there's a fence, enforce that
14432                  * the fb modifier and tiling mode match.
14433                  */
14434                 if (tiling != I915_TILING_NONE &&
14435                     tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
14436                         DRM_DEBUG_KMS("tiling_mode doesn't match fb modifier\n");
14437                         goto err;
14438                 }
14439         } else {
14440                 if (tiling == I915_TILING_X) {
14441                         mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
14442                 } else if (tiling == I915_TILING_Y) {
14443                         DRM_DEBUG_KMS("No Y tiling for legacy addfb\n");
14444                         goto err;
14445                 }
14446         }
14447
14448         /* Passed in modifier sanity checking. */
14449         switch (mode_cmd->modifier[0]) {
14450         case I915_FORMAT_MOD_Y_TILED_CCS:
14451         case I915_FORMAT_MOD_Yf_TILED_CCS:
14452                 switch (mode_cmd->pixel_format) {
14453                 case DRM_FORMAT_XBGR8888:
14454                 case DRM_FORMAT_ABGR8888:
14455                 case DRM_FORMAT_XRGB8888:
14456                 case DRM_FORMAT_ARGB8888:
14457                         break;
14458                 default:
14459                         DRM_DEBUG_KMS("RC supported only with RGB8888 formats\n");
14460                         goto err;
14461                 }
14462                 /* fall through */
14463         case I915_FORMAT_MOD_Y_TILED:
14464         case I915_FORMAT_MOD_Yf_TILED:
14465                 if (INTEL_GEN(dev_priv) < 9) {
14466                         DRM_DEBUG_KMS("Unsupported tiling 0x%llx!\n",
14467                                       mode_cmd->modifier[0]);
14468                         goto err;
14469                 }
14470         case DRM_FORMAT_MOD_LINEAR:
14471         case I915_FORMAT_MOD_X_TILED:
14472                 break;
14473         default:
14474                 DRM_DEBUG_KMS("Unsupported fb modifier 0x%llx!\n",
14475                               mode_cmd->modifier[0]);
14476                 goto err;
14477         }
14478
14479         /*
14480          * gen2/3 display engine uses the fence if present,
14481          * so the tiling mode must match the fb modifier exactly.
14482          */
14483         if (INTEL_GEN(dev_priv) < 4 &&
14484             tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
14485                 DRM_DEBUG_KMS("tiling_mode must match fb modifier exactly on gen2/3\n");
14486                 goto err;
14487         }
14488
14489         pitch_limit = intel_fb_pitch_limit(dev_priv, mode_cmd->modifier[0],
14490                                            mode_cmd->pixel_format);
14491         if (mode_cmd->pitches[0] > pitch_limit) {
14492                 DRM_DEBUG_KMS("%s pitch (%u) must be at most %d\n",
14493                               mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
14494                               "tiled" : "linear",
14495                               mode_cmd->pitches[0], pitch_limit);
14496                 goto err;
14497         }
14498
14499         /*
14500          * If there's a fence, enforce that
14501          * the fb pitch and fence stride match.
14502          */
14503         if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
14504                 DRM_DEBUG_KMS("pitch (%d) must match tiling stride (%d)\n",
14505                               mode_cmd->pitches[0], stride);
14506                 goto err;
14507         }
14508
14509         /* Reject formats not supported by any plane early. */
14510         switch (mode_cmd->pixel_format) {
14511         case DRM_FORMAT_C8:
14512         case DRM_FORMAT_RGB565:
14513         case DRM_FORMAT_XRGB8888:
14514         case DRM_FORMAT_ARGB8888:
14515                 break;
14516         case DRM_FORMAT_XRGB1555:
14517                 if (INTEL_GEN(dev_priv) > 3) {
14518                         DRM_DEBUG_KMS("unsupported pixel format: %s\n",
14519                                       drm_get_format_name(mode_cmd->pixel_format, &format_name));
14520                         goto err;
14521                 }
14522                 break;
14523         case DRM_FORMAT_ABGR8888:
14524                 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
14525                     INTEL_GEN(dev_priv) < 9) {
14526                         DRM_DEBUG_KMS("unsupported pixel format: %s\n",
14527                                       drm_get_format_name(mode_cmd->pixel_format, &format_name));
14528                         goto err;
14529                 }
14530                 break;
14531         case DRM_FORMAT_XBGR8888:
14532         case DRM_FORMAT_XRGB2101010:
14533         case DRM_FORMAT_XBGR2101010:
14534                 if (INTEL_GEN(dev_priv) < 4) {
14535                         DRM_DEBUG_KMS("unsupported pixel format: %s\n",
14536                                       drm_get_format_name(mode_cmd->pixel_format, &format_name));
14537                         goto err;
14538                 }
14539                 break;
14540         case DRM_FORMAT_ABGR2101010:
14541                 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
14542                         DRM_DEBUG_KMS("unsupported pixel format: %s\n",
14543                                       drm_get_format_name(mode_cmd->pixel_format, &format_name));
14544                         goto err;
14545                 }
14546                 break;
14547         case DRM_FORMAT_YUYV:
14548         case DRM_FORMAT_UYVY:
14549         case DRM_FORMAT_YVYU:
14550         case DRM_FORMAT_VYUY:
14551                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
14552                         DRM_DEBUG_KMS("unsupported pixel format: %s\n",
14553                                       drm_get_format_name(mode_cmd->pixel_format, &format_name));
14554                         goto err;
14555                 }
14556                 break;
14557         case DRM_FORMAT_NV12:
14558                 if (INTEL_GEN(dev_priv) < 9 || IS_SKYLAKE(dev_priv) ||
14559                     IS_BROXTON(dev_priv) || INTEL_GEN(dev_priv) >= 11) {
14560                         DRM_DEBUG_KMS("unsupported pixel format: %s\n",
14561                                       drm_get_format_name(mode_cmd->pixel_format,
14562                                                           &format_name));
14563                         goto err;
14564                 }
14565                 break;
14566         default:
14567                 DRM_DEBUG_KMS("unsupported pixel format: %s\n",
14568                               drm_get_format_name(mode_cmd->pixel_format, &format_name));
14569                 goto err;
14570         }
14571
14572         /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
14573         if (mode_cmd->offsets[0] != 0)
14574                 goto err;
14575
14576         drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
14577
14578         if (fb->format->format == DRM_FORMAT_NV12 &&
14579             (fb->width < SKL_MIN_YUV_420_SRC_W ||
14580              fb->height < SKL_MIN_YUV_420_SRC_H ||
14581              (fb->width % 4) != 0 || (fb->height % 4) != 0)) {
14582                 DRM_DEBUG_KMS("src dimensions not correct for NV12\n");
14583                 return -EINVAL;
14584         }
14585
14586         for (i = 0; i < fb->format->num_planes; i++) {
14587                 u32 stride_alignment;
14588
14589                 if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
14590                         DRM_DEBUG_KMS("bad plane %d handle\n", i);
14591                         goto err;
14592                 }
14593
14594                 stride_alignment = intel_fb_stride_alignment(fb, i);
14595
14596                 /*
14597                  * Display WA #0531: skl,bxt,kbl,glk
14598                  *
14599                  * Render decompression and plane width > 3840
14600                  * combined with horizontal panning requires the
14601                  * plane stride to be a multiple of 4. We'll just
14602                  * require the entire fb to accommodate that to avoid
14603                  * potential runtime errors at plane configuration time.
14604                  */
14605                 if (IS_GEN9(dev_priv) && i == 0 && fb->width > 3840 &&
14606                     is_ccs_modifier(fb->modifier))
14607                         stride_alignment *= 4;
14608
14609                 if (fb->pitches[i] & (stride_alignment - 1)) {
14610                         DRM_DEBUG_KMS("plane %d pitch (%d) must be at least %u byte aligned\n",
14611                                       i, fb->pitches[i], stride_alignment);
14612                         goto err;
14613                 }
14614
14615                 fb->obj[i] = &obj->base;
14616         }
14617
14618         ret = intel_fill_fb_info(dev_priv, fb);
14619         if (ret)
14620                 goto err;
14621
14622         ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs);
14623         if (ret) {
14624                 DRM_ERROR("framebuffer init failed %d\n", ret);
14625                 goto err;
14626         }
14627
14628         return 0;
14629
14630 err:
14631         i915_gem_object_lock(obj);
14632         obj->framebuffer_references--;
14633         i915_gem_object_unlock(obj);
14634         return ret;
14635 }
14636
14637 static struct drm_framebuffer *
14638 intel_user_framebuffer_create(struct drm_device *dev,
14639                               struct drm_file *filp,
14640                               const struct drm_mode_fb_cmd2 *user_mode_cmd)
14641 {
14642         struct drm_framebuffer *fb;
14643         struct drm_i915_gem_object *obj;
14644         struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
14645
14646         obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
14647         if (!obj)
14648                 return ERR_PTR(-ENOENT);
14649
14650         fb = intel_framebuffer_create(obj, &mode_cmd);
14651         if (IS_ERR(fb))
14652                 i915_gem_object_put(obj);
14653
14654         return fb;
14655 }
14656
14657 static void intel_atomic_state_free(struct drm_atomic_state *state)
14658 {
14659         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
14660
14661         drm_atomic_state_default_release(state);
14662
14663         i915_sw_fence_fini(&intel_state->commit_ready);
14664
14665         kfree(state);
14666 }
14667
14668 static enum drm_mode_status
14669 intel_mode_valid(struct drm_device *dev,
14670                  const struct drm_display_mode *mode)
14671 {
14672         struct drm_i915_private *dev_priv = to_i915(dev);
14673         int hdisplay_max, htotal_max;
14674         int vdisplay_max, vtotal_max;
14675
14676         /*
14677          * Can't reject DBLSCAN here because Xorg ddxen can add piles
14678          * of DBLSCAN modes to the output's mode list when they detect
14679          * the scaling mode property on the connector. And they don't
14680          * ask the kernel to validate those modes in any way until
14681          * modeset time at which point the client gets a protocol error.
14682          * So in order to not upset those clients we silently ignore the
14683          * DBLSCAN flag on such connectors. For other connectors we will
14684          * reject modes with the DBLSCAN flag in encoder->compute_config().
14685          * And we always reject DBLSCAN modes in connector->mode_valid()
14686          * as we never want such modes on the connector's mode list.
14687          */
14688
14689         if (mode->vscan > 1)
14690                 return MODE_NO_VSCAN;
14691
14692         if (mode->flags & DRM_MODE_FLAG_HSKEW)
14693                 return MODE_H_ILLEGAL;
14694
14695         if (mode->flags & (DRM_MODE_FLAG_CSYNC |
14696                            DRM_MODE_FLAG_NCSYNC |
14697                            DRM_MODE_FLAG_PCSYNC))
14698                 return MODE_HSYNC;
14699
14700         if (mode->flags & (DRM_MODE_FLAG_BCAST |
14701                            DRM_MODE_FLAG_PIXMUX |
14702                            DRM_MODE_FLAG_CLKDIV2))
14703                 return MODE_BAD;
14704
14705         if (INTEL_GEN(dev_priv) >= 9 ||
14706             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
14707                 hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
14708                 vdisplay_max = 4096;
14709                 htotal_max = 8192;
14710                 vtotal_max = 8192;
14711         } else if (INTEL_GEN(dev_priv) >= 3) {
14712                 hdisplay_max = 4096;
14713                 vdisplay_max = 4096;
14714                 htotal_max = 8192;
14715                 vtotal_max = 8192;
14716         } else {
14717                 hdisplay_max = 2048;
14718                 vdisplay_max = 2048;
14719                 htotal_max = 4096;
14720                 vtotal_max = 4096;
14721         }
14722
14723         if (mode->hdisplay > hdisplay_max ||
14724             mode->hsync_start > htotal_max ||
14725             mode->hsync_end > htotal_max ||
14726             mode->htotal > htotal_max)
14727                 return MODE_H_ILLEGAL;
14728
14729         if (mode->vdisplay > vdisplay_max ||
14730             mode->vsync_start > vtotal_max ||
14731             mode->vsync_end > vtotal_max ||
14732             mode->vtotal > vtotal_max)
14733                 return MODE_V_ILLEGAL;
14734
14735         return MODE_OK;
14736 }
14737
14738 static const struct drm_mode_config_funcs intel_mode_funcs = {
14739         .fb_create = intel_user_framebuffer_create,
14740         .get_format_info = intel_get_format_info,
14741         .output_poll_changed = intel_fbdev_output_poll_changed,
14742         .mode_valid = intel_mode_valid,
14743         .atomic_check = intel_atomic_check,
14744         .atomic_commit = intel_atomic_commit,
14745         .atomic_state_alloc = intel_atomic_state_alloc,
14746         .atomic_state_clear = intel_atomic_state_clear,
14747         .atomic_state_free = intel_atomic_state_free,
14748 };
14749
14750 /**
14751  * intel_init_display_hooks - initialize the display modesetting hooks
14752  * @dev_priv: device private
14753  */
14754 void intel_init_display_hooks(struct drm_i915_private *dev_priv)
14755 {
14756         intel_init_cdclk_hooks(dev_priv);
14757
14758         if (INTEL_GEN(dev_priv) >= 9) {
14759                 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
14760                 dev_priv->display.get_initial_plane_config =
14761                         skylake_get_initial_plane_config;
14762                 dev_priv->display.crtc_compute_clock =
14763                         haswell_crtc_compute_clock;
14764                 dev_priv->display.crtc_enable = haswell_crtc_enable;
14765                 dev_priv->display.crtc_disable = haswell_crtc_disable;
14766         } else if (HAS_DDI(dev_priv)) {
14767                 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
14768                 dev_priv->display.get_initial_plane_config =
14769                         i9xx_get_initial_plane_config;
14770                 dev_priv->display.crtc_compute_clock =
14771                         haswell_crtc_compute_clock;
14772                 dev_priv->display.crtc_enable = haswell_crtc_enable;
14773                 dev_priv->display.crtc_disable = haswell_crtc_disable;
14774         } else if (HAS_PCH_SPLIT(dev_priv)) {
14775                 dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
14776                 dev_priv->display.get_initial_plane_config =
14777                         i9xx_get_initial_plane_config;
14778                 dev_priv->display.crtc_compute_clock =
14779                         ironlake_crtc_compute_clock;
14780                 dev_priv->display.crtc_enable = ironlake_crtc_enable;
14781                 dev_priv->display.crtc_disable = ironlake_crtc_disable;
14782         } else if (IS_CHERRYVIEW(dev_priv)) {
14783                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14784                 dev_priv->display.get_initial_plane_config =
14785                         i9xx_get_initial_plane_config;
14786                 dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock;
14787                 dev_priv->display.crtc_enable = valleyview_crtc_enable;
14788                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
14789         } else if (IS_VALLEYVIEW(dev_priv)) {
14790                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14791                 dev_priv->display.get_initial_plane_config =
14792                         i9xx_get_initial_plane_config;
14793                 dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock;
14794                 dev_priv->display.crtc_enable = valleyview_crtc_enable;
14795                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
14796         } else if (IS_G4X(dev_priv)) {
14797                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14798                 dev_priv->display.get_initial_plane_config =
14799                         i9xx_get_initial_plane_config;
14800                 dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock;
14801                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
14802                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
14803         } else if (IS_PINEVIEW(dev_priv)) {
14804                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14805                 dev_priv->display.get_initial_plane_config =
14806                         i9xx_get_initial_plane_config;
14807                 dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
14808                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
14809                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
14810         } else if (!IS_GEN2(dev_priv)) {
14811                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14812                 dev_priv->display.get_initial_plane_config =
14813                         i9xx_get_initial_plane_config;
14814                 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
14815                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
14816                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
14817         } else {
14818                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14819                 dev_priv->display.get_initial_plane_config =
14820                         i9xx_get_initial_plane_config;
14821                 dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
14822                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
14823                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
14824         }
14825
14826         if (IS_GEN5(dev_priv)) {
14827                 dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
14828         } else if (IS_GEN6(dev_priv)) {
14829                 dev_priv->display.fdi_link_train = gen6_fdi_link_train;
14830         } else if (IS_IVYBRIDGE(dev_priv)) {
14831                 /* FIXME: detect B0+ stepping and use auto training */
14832                 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
14833         } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
14834                 dev_priv->display.fdi_link_train = hsw_fdi_link_train;
14835         }
14836
14837         if (INTEL_GEN(dev_priv) >= 9)
14838                 dev_priv->display.update_crtcs = skl_update_crtcs;
14839         else
14840                 dev_priv->display.update_crtcs = intel_update_crtcs;
14841 }
14842
14843 /*
14844  * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
14845  */
14846 static void quirk_ssc_force_disable(struct drm_device *dev)
14847 {
14848         struct drm_i915_private *dev_priv = to_i915(dev);
14849         dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
14850         DRM_INFO("applying lvds SSC disable quirk\n");
14851 }
14852
14853 /*
14854  * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
14855  * brightness value
14856  */
14857 static void quirk_invert_brightness(struct drm_device *dev)
14858 {
14859         struct drm_i915_private *dev_priv = to_i915(dev);
14860         dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
14861         DRM_INFO("applying inverted panel brightness quirk\n");
14862 }
14863
14864 /* Some VBT's incorrectly indicate no backlight is present */
14865 static void quirk_backlight_present(struct drm_device *dev)
14866 {
14867         struct drm_i915_private *dev_priv = to_i915(dev);
14868         dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT;
14869         DRM_INFO("applying backlight present quirk\n");
14870 }
14871
14872 /* Toshiba Satellite P50-C-18C requires T12 delay to be min 800ms
14873  * which is 300 ms greater than eDP spec T12 min.
14874  */
14875 static void quirk_increase_t12_delay(struct drm_device *dev)
14876 {
14877         struct drm_i915_private *dev_priv = to_i915(dev);
14878
14879         dev_priv->quirks |= QUIRK_INCREASE_T12_DELAY;
14880         DRM_INFO("Applying T12 delay quirk\n");
14881 }
14882
14883 /*
14884  * GeminiLake NUC HDMI outputs require additional off time
14885  * this allows the onboard retimer to correctly sync to signal
14886  */
14887 static void quirk_increase_ddi_disabled_time(struct drm_device *dev)
14888 {
14889         struct drm_i915_private *dev_priv = to_i915(dev);
14890
14891         dev_priv->quirks |= QUIRK_INCREASE_DDI_DISABLED_TIME;
14892         DRM_INFO("Applying Increase DDI Disabled quirk\n");
14893 }
14894
14895 struct intel_quirk {
14896         int device;
14897         int subsystem_vendor;
14898         int subsystem_device;
14899         void (*hook)(struct drm_device *dev);
14900 };
14901
14902 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
14903 struct intel_dmi_quirk {
14904         void (*hook)(struct drm_device *dev);
14905         const struct dmi_system_id (*dmi_id_list)[];
14906 };
14907
14908 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
14909 {
14910         DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
14911         return 1;
14912 }
14913
14914 static const struct intel_dmi_quirk intel_dmi_quirks[] = {
14915         {
14916                 .dmi_id_list = &(const struct dmi_system_id[]) {
14917                         {
14918                                 .callback = intel_dmi_reverse_brightness,
14919                                 .ident = "NCR Corporation",
14920                                 .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
14921                                             DMI_MATCH(DMI_PRODUCT_NAME, ""),
14922                                 },
14923                         },
14924                         { }  /* terminating entry */
14925                 },
14926                 .hook = quirk_invert_brightness,
14927         },
14928 };
14929
14930 static struct intel_quirk intel_quirks[] = {
14931         /* Lenovo U160 cannot use SSC on LVDS */
14932         { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
14933
14934         /* Sony Vaio Y cannot use SSC on LVDS */
14935         { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
14936
14937         /* Acer Aspire 5734Z must invert backlight brightness */
14938         { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
14939
14940         /* Acer/eMachines G725 */
14941         { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
14942
14943         /* Acer/eMachines e725 */
14944         { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
14945
14946         /* Acer/Packard Bell NCL20 */
14947         { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
14948
14949         /* Acer Aspire 4736Z */
14950         { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
14951
14952         /* Acer Aspire 5336 */
14953         { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
14954
14955         /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */
14956         { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present },
14957
14958         /* Acer C720 Chromebook (Core i3 4005U) */
14959         { 0x0a16, 0x1025, 0x0a11, quirk_backlight_present },
14960
14961         /* Apple Macbook 2,1 (Core 2 T7400) */
14962         { 0x27a2, 0x8086, 0x7270, quirk_backlight_present },
14963
14964         /* Apple Macbook 4,1 */
14965         { 0x2a02, 0x106b, 0x00a1, quirk_backlight_present },
14966
14967         /* Toshiba CB35 Chromebook (Celeron 2955U) */
14968         { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
14969
14970         /* HP Chromebook 14 (Celeron 2955U) */
14971         { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
14972
14973         /* Dell Chromebook 11 */
14974         { 0x0a06, 0x1028, 0x0a35, quirk_backlight_present },
14975
14976         /* Dell Chromebook 11 (2015 version) */
14977         { 0x0a16, 0x1028, 0x0a35, quirk_backlight_present },
14978
14979         /* Toshiba Satellite P50-C-18C */
14980         { 0x191B, 0x1179, 0xF840, quirk_increase_t12_delay },
14981
14982         /* GeminiLake NUC */
14983         { 0x3185, 0x8086, 0x2072, quirk_increase_ddi_disabled_time },
14984         { 0x3184, 0x8086, 0x2072, quirk_increase_ddi_disabled_time },
14985         /* ASRock ITX*/
14986         { 0x3185, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
14987         { 0x3184, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
14988 };
14989
14990 static void intel_init_quirks(struct drm_device *dev)
14991 {
14992         struct pci_dev *d = dev->pdev;
14993         int i;
14994
14995         for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
14996                 struct intel_quirk *q = &intel_quirks[i];
14997
14998                 if (d->device == q->device &&
14999                     (d->subsystem_vendor == q->subsystem_vendor ||
15000                      q->subsystem_vendor == PCI_ANY_ID) &&
15001                     (d->subsystem_device == q->subsystem_device ||
15002                      q->subsystem_device == PCI_ANY_ID))
15003                         q->hook(dev);
15004         }
15005         for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
15006                 if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
15007                         intel_dmi_quirks[i].hook(dev);
15008         }
15009 }
15010
15011 /* Disable the VGA plane that we never use */
15012 static void i915_disable_vga(struct drm_i915_private *dev_priv)
15013 {
15014         struct pci_dev *pdev = dev_priv->drm.pdev;
15015         u8 sr1;
15016         i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
15017
15018         /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
15019         vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
15020         outb(SR01, VGA_SR_INDEX);
15021         sr1 = inb(VGA_SR_DATA);
15022         outb(sr1 | 1<<5, VGA_SR_DATA);
15023         vga_put(pdev, VGA_RSRC_LEGACY_IO);
15024         udelay(300);
15025
15026         I915_WRITE(vga_reg, VGA_DISP_DISABLE);
15027         POSTING_READ(vga_reg);
15028 }
15029
15030 void intel_modeset_init_hw(struct drm_device *dev)
15031 {
15032         struct drm_i915_private *dev_priv = to_i915(dev);
15033
15034         intel_update_cdclk(dev_priv);
15035         intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
15036         dev_priv->cdclk.logical = dev_priv->cdclk.actual = dev_priv->cdclk.hw;
15037 }
15038
15039 /*
15040  * Calculate what we think the watermarks should be for the state we've read
15041  * out of the hardware and then immediately program those watermarks so that
15042  * we ensure the hardware settings match our internal state.
15043  *
15044  * We can calculate what we think WM's should be by creating a duplicate of the
15045  * current state (which was constructed during hardware readout) and running it
15046  * through the atomic check code to calculate new watermark values in the
15047  * state object.
15048  */
15049 static void sanitize_watermarks(struct drm_device *dev)
15050 {
15051         struct drm_i915_private *dev_priv = to_i915(dev);
15052         struct drm_atomic_state *state;
15053         struct intel_atomic_state *intel_state;
15054         struct drm_crtc *crtc;
15055         struct drm_crtc_state *cstate;
15056         struct drm_modeset_acquire_ctx ctx;
15057         int ret;
15058         int i;
15059
15060         /* Only supported on platforms that use atomic watermark design */
15061         if (!dev_priv->display.optimize_watermarks)
15062                 return;
15063
15064         /*
15065          * We need to hold connection_mutex before calling duplicate_state so
15066          * that the connector loop is protected.
15067          */
15068         drm_modeset_acquire_init(&ctx, 0);
15069 retry:
15070         ret = drm_modeset_lock_all_ctx(dev, &ctx);
15071         if (ret == -EDEADLK) {
15072                 drm_modeset_backoff(&ctx);
15073                 goto retry;
15074         } else if (WARN_ON(ret)) {
15075                 goto fail;
15076         }
15077
15078         state = drm_atomic_helper_duplicate_state(dev, &ctx);
15079         if (WARN_ON(IS_ERR(state)))
15080                 goto fail;
15081
15082         intel_state = to_intel_atomic_state(state);
15083
15084         /*
15085          * Hardware readout is the only time we don't want to calculate
15086          * intermediate watermarks (since we don't trust the current
15087          * watermarks).
15088          */
15089         if (!HAS_GMCH_DISPLAY(dev_priv))
15090                 intel_state->skip_intermediate_wm = true;
15091
15092         ret = intel_atomic_check(dev, state);
15093         if (ret) {
15094                 /*
15095                  * If we fail here, it means that the hardware appears to be
15096                  * programmed in a way that shouldn't be possible, given our
15097                  * understanding of watermark requirements.  This might mean a
15098                  * mistake in the hardware readout code or a mistake in the
15099                  * watermark calculations for a given platform.  Raise a WARN
15100                  * so that this is noticeable.
15101                  *
15102                  * If this actually happens, we'll have to just leave the
15103                  * BIOS-programmed watermarks untouched and hope for the best.
15104                  */
15105                 WARN(true, "Could not determine valid watermarks for inherited state\n");
15106                 goto put_state;
15107         }
15108
15109         /* Write calculated watermark values back */
15110         for_each_new_crtc_in_state(state, crtc, cstate, i) {
15111                 struct intel_crtc_state *cs = to_intel_crtc_state(cstate);
15112
15113                 cs->wm.need_postvbl_update = true;
15114                 dev_priv->display.optimize_watermarks(intel_state, cs);
15115
15116                 to_intel_crtc_state(crtc->state)->wm = cs->wm;
15117         }
15118
15119 put_state:
15120         drm_atomic_state_put(state);
15121 fail:
15122         drm_modeset_drop_locks(&ctx);
15123         drm_modeset_acquire_fini(&ctx);
15124 }
15125
15126 static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
15127 {
15128         if (IS_GEN5(dev_priv)) {
15129                 u32 fdi_pll_clk =
15130                         I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
15131
15132                 dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
15133         } else if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv)) {
15134                 dev_priv->fdi_pll_freq = 270000;
15135         } else {
15136                 return;
15137         }
15138
15139         DRM_DEBUG_DRIVER("FDI PLL freq=%d\n", dev_priv->fdi_pll_freq);
15140 }
15141
15142 static int intel_initial_commit(struct drm_device *dev)
15143 {
15144         struct drm_atomic_state *state = NULL;
15145         struct drm_modeset_acquire_ctx ctx;
15146         struct drm_crtc *crtc;
15147         struct drm_crtc_state *crtc_state;
15148         int ret = 0;
15149
15150         state = drm_atomic_state_alloc(dev);
15151         if (!state)
15152                 return -ENOMEM;
15153
15154         drm_modeset_acquire_init(&ctx, 0);
15155
15156 retry:
15157         state->acquire_ctx = &ctx;
15158
15159         drm_for_each_crtc(crtc, dev) {
15160                 crtc_state = drm_atomic_get_crtc_state(state, crtc);
15161                 if (IS_ERR(crtc_state)) {
15162                         ret = PTR_ERR(crtc_state);
15163                         goto out;
15164                 }
15165
15166                 if (crtc_state->active) {
15167                         ret = drm_atomic_add_affected_planes(state, crtc);
15168                         if (ret)
15169                                 goto out;
15170                 }
15171         }
15172
15173         ret = drm_atomic_commit(state);
15174
15175 out:
15176         if (ret == -EDEADLK) {
15177                 drm_atomic_state_clear(state);
15178                 drm_modeset_backoff(&ctx);
15179                 goto retry;
15180         }
15181
15182         drm_atomic_state_put(state);
15183
15184         drm_modeset_drop_locks(&ctx);
15185         drm_modeset_acquire_fini(&ctx);
15186
15187         return ret;
15188 }
15189
15190 int intel_modeset_init(struct drm_device *dev)
15191 {
15192         struct drm_i915_private *dev_priv = to_i915(dev);
15193         struct i915_ggtt *ggtt = &dev_priv->ggtt;
15194         enum pipe pipe;
15195         struct intel_crtc *crtc;
15196         int ret;
15197
15198         dev_priv->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
15199
15200         drm_mode_config_init(dev);
15201
15202         dev->mode_config.min_width = 0;
15203         dev->mode_config.min_height = 0;
15204
15205         dev->mode_config.preferred_depth = 24;
15206         dev->mode_config.prefer_shadow = 1;
15207
15208         dev->mode_config.allow_fb_modifiers = true;
15209
15210         dev->mode_config.funcs = &intel_mode_funcs;
15211
15212         init_llist_head(&dev_priv->atomic_helper.free_list);
15213         INIT_WORK(&dev_priv->atomic_helper.free_work,
15214                   intel_atomic_helper_free_state_worker);
15215
15216         intel_init_quirks(dev);
15217
15218         intel_init_pm(dev_priv);
15219
15220         /*
15221          * There may be no VBT; and if the BIOS enabled SSC we can
15222          * just keep using it to avoid unnecessary flicker.  Whereas if the
15223          * BIOS isn't using it, don't assume it will work even if the VBT
15224          * indicates as much.
15225          */
15226         if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
15227                 bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
15228                                             DREF_SSC1_ENABLE);
15229
15230                 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
15231                         DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n",
15232                                      bios_lvds_use_ssc ? "en" : "dis",
15233                                      dev_priv->vbt.lvds_use_ssc ? "en" : "dis");
15234                         dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
15235                 }
15236         }
15237
15238         /* maximum framebuffer dimensions */
15239         if (IS_GEN2(dev_priv)) {
15240                 dev->mode_config.max_width = 2048;
15241                 dev->mode_config.max_height = 2048;
15242         } else if (IS_GEN3(dev_priv)) {
15243                 dev->mode_config.max_width = 4096;
15244                 dev->mode_config.max_height = 4096;
15245         } else {
15246                 dev->mode_config.max_width = 8192;
15247                 dev->mode_config.max_height = 8192;
15248         }
15249
15250         if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
15251                 dev->mode_config.cursor_width = IS_I845G(dev_priv) ? 64 : 512;
15252                 dev->mode_config.cursor_height = 1023;
15253         } else if (IS_GEN2(dev_priv)) {
15254                 dev->mode_config.cursor_width = 64;
15255                 dev->mode_config.cursor_height = 64;
15256         } else {
15257                 dev->mode_config.cursor_width = 256;
15258                 dev->mode_config.cursor_height = 256;
15259         }
15260
15261         dev->mode_config.fb_base = ggtt->gmadr.start;
15262
15263         DRM_DEBUG_KMS("%d display pipe%s available.\n",
15264                       INTEL_INFO(dev_priv)->num_pipes,
15265                       INTEL_INFO(dev_priv)->num_pipes > 1 ? "s" : "");
15266
15267         for_each_pipe(dev_priv, pipe) {
15268                 ret = intel_crtc_init(dev_priv, pipe);
15269                 if (ret) {
15270                         drm_mode_config_cleanup(dev);
15271                         return ret;
15272                 }
15273         }
15274
15275         intel_shared_dpll_init(dev);
15276         intel_update_fdi_pll_freq(dev_priv);
15277
15278         intel_update_czclk(dev_priv);
15279         intel_modeset_init_hw(dev);
15280
15281         if (dev_priv->max_cdclk_freq == 0)
15282                 intel_update_max_cdclk(dev_priv);
15283
15284         /* Just disable it once at startup */
15285         i915_disable_vga(dev_priv);
15286         intel_setup_outputs(dev_priv);
15287
15288         drm_modeset_lock_all(dev);
15289         intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
15290         drm_modeset_unlock_all(dev);
15291
15292         for_each_intel_crtc(dev, crtc) {
15293                 struct intel_initial_plane_config plane_config = {};
15294
15295                 if (!crtc->active)
15296                         continue;
15297
15298                 /*
15299                  * Note that reserving the BIOS fb up front prevents us
15300                  * from stuffing other stolen allocations like the ring
15301                  * on top.  This prevents some ugliness at boot time, and
15302                  * can even allow for smooth boot transitions if the BIOS
15303                  * fb is large enough for the active pipe configuration.
15304                  */
15305                 dev_priv->display.get_initial_plane_config(crtc,
15306                                                            &plane_config);
15307
15308                 /*
15309                  * If the fb is shared between multiple heads, we'll
15310                  * just get the first one.
15311                  */
15312                 intel_find_initial_plane_obj(crtc, &plane_config);
15313         }
15314
15315         /*
15316          * Make sure hardware watermarks really match the state we read out.
15317          * Note that we need to do this after reconstructing the BIOS fb's
15318          * since the watermark calculation done here will use pstate->fb.
15319          */
15320         if (!HAS_GMCH_DISPLAY(dev_priv))
15321                 sanitize_watermarks(dev);
15322
15323         /*
15324          * Force all active planes to recompute their states. So that on
15325          * mode_setcrtc after probe, all the intel_plane_state variables
15326          * are already calculated and there is no assert_plane warnings
15327          * during bootup.
15328          */
15329         ret = intel_initial_commit(dev);
15330         if (ret)
15331                 DRM_DEBUG_KMS("Initial commit in probe failed.\n");
15332
15333         return 0;
15334 }
15335
15336 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
15337 {
15338         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
15339         /* 640x480@60Hz, ~25175 kHz */
15340         struct dpll clock = {
15341                 .m1 = 18,
15342                 .m2 = 7,
15343                 .p1 = 13,
15344                 .p2 = 4,
15345                 .n = 2,
15346         };
15347         u32 dpll, fp;
15348         int i;
15349
15350         WARN_ON(i9xx_calc_dpll_params(48000, &clock) != 25154);
15351
15352         DRM_DEBUG_KMS("enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
15353                       pipe_name(pipe), clock.vco, clock.dot);
15354
15355         fp = i9xx_dpll_compute_fp(&clock);
15356         dpll = (I915_READ(DPLL(pipe)) & DPLL_DVO_2X_MODE) |
15357                 DPLL_VGA_MODE_DIS |
15358                 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
15359                 PLL_P2_DIVIDE_BY_4 |
15360                 PLL_REF_INPUT_DREFCLK |
15361                 DPLL_VCO_ENABLE;
15362
15363         I915_WRITE(FP0(pipe), fp);
15364         I915_WRITE(FP1(pipe), fp);
15365
15366         I915_WRITE(HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
15367         I915_WRITE(HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
15368         I915_WRITE(HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
15369         I915_WRITE(VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
15370         I915_WRITE(VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
15371         I915_WRITE(VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
15372         I915_WRITE(PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
15373
15374         /*
15375          * Apparently we need to have VGA mode enabled prior to changing
15376          * the P1/P2 dividers. Otherwise the DPLL will keep using the old
15377          * dividers, even though the register value does change.
15378          */
15379         I915_WRITE(DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
15380         I915_WRITE(DPLL(pipe), dpll);
15381
15382         /* Wait for the clocks to stabilize. */
15383         POSTING_READ(DPLL(pipe));
15384         udelay(150);
15385
15386         /* The pixel multiplier can only be updated once the
15387          * DPLL is enabled and the clocks are stable.
15388          *
15389          * So write it again.
15390          */
15391         I915_WRITE(DPLL(pipe), dpll);
15392
15393         /* We do this three times for luck */
15394         for (i = 0; i < 3 ; i++) {
15395                 I915_WRITE(DPLL(pipe), dpll);
15396                 POSTING_READ(DPLL(pipe));
15397                 udelay(150); /* wait for warmup */
15398         }
15399
15400         I915_WRITE(PIPECONF(pipe), PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
15401         POSTING_READ(PIPECONF(pipe));
15402
15403         intel_wait_for_pipe_scanline_moving(crtc);
15404 }
15405
15406 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
15407 {
15408         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
15409
15410         DRM_DEBUG_KMS("disabling pipe %c due to force quirk\n",
15411                       pipe_name(pipe));
15412
15413         WARN_ON(I915_READ(DSPCNTR(PLANE_A)) & DISPLAY_PLANE_ENABLE);
15414         WARN_ON(I915_READ(DSPCNTR(PLANE_B)) & DISPLAY_PLANE_ENABLE);
15415         WARN_ON(I915_READ(DSPCNTR(PLANE_C)) & DISPLAY_PLANE_ENABLE);
15416         WARN_ON(I915_READ(CURCNTR(PIPE_A)) & MCURSOR_MODE);
15417         WARN_ON(I915_READ(CURCNTR(PIPE_B)) & MCURSOR_MODE);
15418
15419         I915_WRITE(PIPECONF(pipe), 0);
15420         POSTING_READ(PIPECONF(pipe));
15421
15422         intel_wait_for_pipe_scanline_stopped(crtc);
15423
15424         I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
15425         POSTING_READ(DPLL(pipe));
15426 }
15427
15428 static bool intel_plane_mapping_ok(struct intel_crtc *crtc,
15429                                    struct intel_plane *plane)
15430 {
15431         enum pipe pipe;
15432
15433         if (!plane->get_hw_state(plane, &pipe))
15434                 return true;
15435
15436         return pipe == crtc->pipe;
15437 }
15438
15439 static void
15440 intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
15441 {
15442         struct intel_crtc *crtc;
15443
15444         if (INTEL_GEN(dev_priv) >= 4)
15445                 return;
15446
15447         for_each_intel_crtc(&dev_priv->drm, crtc) {
15448                 struct intel_plane *plane =
15449                         to_intel_plane(crtc->base.primary);
15450
15451                 if (intel_plane_mapping_ok(crtc, plane))
15452                         continue;
15453
15454                 DRM_DEBUG_KMS("%s attached to the wrong pipe, disabling plane\n",
15455                               plane->base.name);
15456                 intel_plane_disable_noatomic(crtc, plane);
15457         }
15458 }
15459
15460 static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
15461 {
15462         struct drm_device *dev = crtc->base.dev;
15463         struct intel_encoder *encoder;
15464
15465         for_each_encoder_on_crtc(dev, &crtc->base, encoder)
15466                 return true;
15467
15468         return false;
15469 }
15470
15471 static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
15472 {
15473         struct drm_device *dev = encoder->base.dev;
15474         struct intel_connector *connector;
15475
15476         for_each_connector_on_encoder(dev, &encoder->base, connector)
15477                 return connector;
15478
15479         return NULL;
15480 }
15481
15482 static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
15483                               enum pipe pch_transcoder)
15484 {
15485         return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
15486                 (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A);
15487 }
15488
15489 static void intel_sanitize_crtc(struct intel_crtc *crtc,
15490                                 struct drm_modeset_acquire_ctx *ctx)
15491 {
15492         struct drm_device *dev = crtc->base.dev;
15493         struct drm_i915_private *dev_priv = to_i915(dev);
15494         enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
15495
15496         /* Clear any frame start delays used for debugging left by the BIOS */
15497         if (crtc->active && !transcoder_is_dsi(cpu_transcoder)) {
15498                 i915_reg_t reg = PIPECONF(cpu_transcoder);
15499
15500                 I915_WRITE(reg,
15501                            I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
15502         }
15503
15504         /* restore vblank interrupts to correct state */
15505         drm_crtc_vblank_reset(&crtc->base);
15506         if (crtc->active) {
15507                 struct intel_plane *plane;
15508
15509                 drm_crtc_vblank_on(&crtc->base);
15510
15511                 /* Disable everything but the primary plane */
15512                 for_each_intel_plane_on_crtc(dev, crtc, plane) {
15513                         const struct intel_plane_state *plane_state =
15514                                 to_intel_plane_state(plane->base.state);
15515
15516                         if (plane_state->base.visible &&
15517                             plane->base.type != DRM_PLANE_TYPE_PRIMARY)
15518                                 intel_plane_disable_noatomic(crtc, plane);
15519                 }
15520         }
15521
15522         /* Adjust the state of the output pipe according to whether we
15523          * have active connectors/encoders. */
15524         if (crtc->active && !intel_crtc_has_encoders(crtc))
15525                 intel_crtc_disable_noatomic(&crtc->base, ctx);
15526
15527         if (crtc->active || HAS_GMCH_DISPLAY(dev_priv)) {
15528                 /*
15529                  * We start out with underrun reporting disabled to avoid races.
15530                  * For correct bookkeeping mark this on active crtcs.
15531                  *
15532                  * Also on gmch platforms we dont have any hardware bits to
15533                  * disable the underrun reporting. Which means we need to start
15534                  * out with underrun reporting disabled also on inactive pipes,
15535                  * since otherwise we'll complain about the garbage we read when
15536                  * e.g. coming up after runtime pm.
15537                  *
15538                  * No protection against concurrent access is required - at
15539                  * worst a fifo underrun happens which also sets this to false.
15540                  */
15541                 crtc->cpu_fifo_underrun_disabled = true;
15542                 /*
15543                  * We track the PCH trancoder underrun reporting state
15544                  * within the crtc. With crtc for pipe A housing the underrun
15545                  * reporting state for PCH transcoder A, crtc for pipe B housing
15546                  * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
15547                  * and marking underrun reporting as disabled for the non-existing
15548                  * PCH transcoders B and C would prevent enabling the south
15549                  * error interrupt (see cpt_can_enable_serr_int()).
15550                  */
15551                 if (has_pch_trancoder(dev_priv, crtc->pipe))
15552                         crtc->pch_fifo_underrun_disabled = true;
15553         }
15554 }
15555
15556 static void intel_sanitize_encoder(struct intel_encoder *encoder)
15557 {
15558         struct intel_connector *connector;
15559
15560         /* We need to check both for a crtc link (meaning that the
15561          * encoder is active and trying to read from a pipe) and the
15562          * pipe itself being active. */
15563         bool has_active_crtc = encoder->base.crtc &&
15564                 to_intel_crtc(encoder->base.crtc)->active;
15565
15566         connector = intel_encoder_find_connector(encoder);
15567         if (connector && !has_active_crtc) {
15568                 DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
15569                               encoder->base.base.id,
15570                               encoder->base.name);
15571
15572                 /* Connector is active, but has no active pipe. This is
15573                  * fallout from our resume register restoring. Disable
15574                  * the encoder manually again. */
15575                 if (encoder->base.crtc) {
15576                         struct drm_crtc_state *crtc_state = encoder->base.crtc->state;
15577
15578                         DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
15579                                       encoder->base.base.id,
15580                                       encoder->base.name);
15581                         encoder->disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state);
15582                         if (encoder->post_disable)
15583                                 encoder->post_disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state);
15584                 }
15585                 encoder->base.crtc = NULL;
15586
15587                 /* Inconsistent output/port/pipe state happens presumably due to
15588                  * a bug in one of the get_hw_state functions. Or someplace else
15589                  * in our code, like the register restore mess on resume. Clamp
15590                  * things to off as a safer default. */
15591
15592                 connector->base.dpms = DRM_MODE_DPMS_OFF;
15593                 connector->base.encoder = NULL;
15594         }
15595
15596         /* notify opregion of the sanitized encoder state */
15597         intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
15598 }
15599
15600 void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv)
15601 {
15602         i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
15603
15604         if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
15605                 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
15606                 i915_disable_vga(dev_priv);
15607         }
15608 }
15609
15610 void i915_redisable_vga(struct drm_i915_private *dev_priv)
15611 {
15612         /* This function can be called both from intel_modeset_setup_hw_state or
15613          * at a very early point in our resume sequence, where the power well
15614          * structures are not yet restored. Since this function is at a very
15615          * paranoid "someone might have enabled VGA while we were not looking"
15616          * level, just check if the power well is enabled instead of trying to
15617          * follow the "don't touch the power well if we don't need it" policy
15618          * the rest of the driver uses. */
15619         if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_VGA))
15620                 return;
15621
15622         i915_redisable_vga_power_on(dev_priv);
15623
15624         intel_display_power_put(dev_priv, POWER_DOMAIN_VGA);
15625 }
15626
15627 /* FIXME read out full plane state for all planes */
15628 static void readout_plane_state(struct intel_crtc *crtc)
15629 {
15630         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
15631         struct intel_crtc_state *crtc_state =
15632                 to_intel_crtc_state(crtc->base.state);
15633         struct intel_plane *plane;
15634
15635         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
15636                 struct intel_plane_state *plane_state =
15637                         to_intel_plane_state(plane->base.state);
15638                 enum pipe pipe;
15639                 bool visible;
15640
15641                 visible = plane->get_hw_state(plane, &pipe);
15642
15643                 intel_set_plane_visible(crtc_state, plane_state, visible);
15644         }
15645 }
15646
15647 static void intel_modeset_readout_hw_state(struct drm_device *dev)
15648 {
15649         struct drm_i915_private *dev_priv = to_i915(dev);
15650         enum pipe pipe;
15651         struct intel_crtc *crtc;
15652         struct intel_encoder *encoder;
15653         struct intel_connector *connector;
15654         struct drm_connector_list_iter conn_iter;
15655         int i;
15656
15657         dev_priv->active_crtcs = 0;
15658
15659         for_each_intel_crtc(dev, crtc) {
15660                 struct intel_crtc_state *crtc_state =
15661                         to_intel_crtc_state(crtc->base.state);
15662
15663                 __drm_atomic_helper_crtc_destroy_state(&crtc_state->base);
15664                 memset(crtc_state, 0, sizeof(*crtc_state));
15665                 crtc_state->base.crtc = &crtc->base;
15666
15667                 crtc_state->base.active = crtc_state->base.enable =
15668                         dev_priv->display.get_pipe_config(crtc, crtc_state);
15669
15670                 crtc->base.enabled = crtc_state->base.enable;
15671                 crtc->active = crtc_state->base.active;
15672
15673                 if (crtc_state->base.active)
15674                         dev_priv->active_crtcs |= 1 << crtc->pipe;
15675
15676                 readout_plane_state(crtc);
15677
15678                 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
15679                               crtc->base.base.id, crtc->base.name,
15680                               enableddisabled(crtc_state->base.active));
15681         }
15682
15683         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
15684                 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
15685
15686                 pll->on = pll->info->funcs->get_hw_state(dev_priv, pll,
15687                                                         &pll->state.hw_state);
15688                 pll->state.crtc_mask = 0;
15689                 for_each_intel_crtc(dev, crtc) {
15690                         struct intel_crtc_state *crtc_state =
15691                                 to_intel_crtc_state(crtc->base.state);
15692
15693                         if (crtc_state->base.active &&
15694                             crtc_state->shared_dpll == pll)
15695                                 pll->state.crtc_mask |= 1 << crtc->pipe;
15696                 }
15697                 pll->active_mask = pll->state.crtc_mask;
15698
15699                 DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
15700                               pll->info->name, pll->state.crtc_mask, pll->on);
15701         }
15702
15703         for_each_intel_encoder(dev, encoder) {
15704                 pipe = 0;
15705
15706                 if (encoder->get_hw_state(encoder, &pipe)) {
15707                         struct intel_crtc_state *crtc_state;
15708
15709                         crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
15710                         crtc_state = to_intel_crtc_state(crtc->base.state);
15711
15712                         encoder->base.crtc = &crtc->base;
15713                         encoder->get_config(encoder, crtc_state);
15714                 } else {
15715                         encoder->base.crtc = NULL;
15716                 }
15717
15718                 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
15719                               encoder->base.base.id, encoder->base.name,
15720                               enableddisabled(encoder->base.crtc),
15721                               pipe_name(pipe));
15722         }
15723
15724         drm_connector_list_iter_begin(dev, &conn_iter);
15725         for_each_intel_connector_iter(connector, &conn_iter) {
15726                 if (connector->get_hw_state(connector)) {
15727                         connector->base.dpms = DRM_MODE_DPMS_ON;
15728
15729                         encoder = connector->encoder;
15730                         connector->base.encoder = &encoder->base;
15731
15732                         if (encoder->base.crtc &&
15733                             encoder->base.crtc->state->active) {
15734                                 /*
15735                                  * This has to be done during hardware readout
15736                                  * because anything calling .crtc_disable may
15737                                  * rely on the connector_mask being accurate.
15738                                  */
15739                                 encoder->base.crtc->state->connector_mask |=
15740                                         drm_connector_mask(&connector->base);
15741                                 encoder->base.crtc->state->encoder_mask |=
15742                                         drm_encoder_mask(&encoder->base);
15743                         }
15744
15745                 } else {
15746                         connector->base.dpms = DRM_MODE_DPMS_OFF;
15747                         connector->base.encoder = NULL;
15748                 }
15749                 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
15750                               connector->base.base.id, connector->base.name,
15751                               enableddisabled(connector->base.encoder));
15752         }
15753         drm_connector_list_iter_end(&conn_iter);
15754
15755         for_each_intel_crtc(dev, crtc) {
15756                 struct intel_crtc_state *crtc_state =
15757                         to_intel_crtc_state(crtc->base.state);
15758                 int min_cdclk = 0;
15759
15760                 memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
15761                 if (crtc_state->base.active) {
15762                         intel_mode_from_pipe_config(&crtc->base.mode, crtc_state);
15763                         crtc->base.mode.hdisplay = crtc_state->pipe_src_w;
15764                         crtc->base.mode.vdisplay = crtc_state->pipe_src_h;
15765                         intel_mode_from_pipe_config(&crtc_state->base.adjusted_mode, crtc_state);
15766                         WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
15767
15768                         /*
15769                          * The initial mode needs to be set in order to keep
15770                          * the atomic core happy. It wants a valid mode if the
15771                          * crtc's enabled, so we do the above call.
15772                          *
15773                          * But we don't set all the derived state fully, hence
15774                          * set a flag to indicate that a full recalculation is
15775                          * needed on the next commit.
15776                          */
15777                         crtc_state->base.mode.private_flags = I915_MODE_FLAG_INHERITED;
15778
15779                         intel_crtc_compute_pixel_rate(crtc_state);
15780
15781                         if (dev_priv->display.modeset_calc_cdclk) {
15782                                 min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
15783                                 if (WARN_ON(min_cdclk < 0))
15784                                         min_cdclk = 0;
15785                         }
15786
15787                         drm_calc_timestamping_constants(&crtc->base,
15788                                                         &crtc_state->base.adjusted_mode);
15789                         update_scanline_offset(crtc);
15790                 }
15791
15792                 dev_priv->min_cdclk[crtc->pipe] = min_cdclk;
15793                 dev_priv->min_voltage_level[crtc->pipe] =
15794                         crtc_state->min_voltage_level;
15795
15796                 intel_pipe_config_sanity_check(dev_priv, crtc_state);
15797         }
15798 }
15799
15800 static void
15801 get_encoder_power_domains(struct drm_i915_private *dev_priv)
15802 {
15803         struct intel_encoder *encoder;
15804
15805         for_each_intel_encoder(&dev_priv->drm, encoder) {
15806                 u64 get_domains;
15807                 enum intel_display_power_domain domain;
15808                 struct intel_crtc_state *crtc_state;
15809
15810                 if (!encoder->get_power_domains)
15811                         continue;
15812
15813                 /*
15814                  * MST-primary and inactive encoders don't have a crtc state
15815                  * and neither of these require any power domain references.
15816                  */
15817                 if (!encoder->base.crtc)
15818                         continue;
15819
15820                 crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
15821                 get_domains = encoder->get_power_domains(encoder, crtc_state);
15822                 for_each_power_domain(domain, get_domains)
15823                         intel_display_power_get(dev_priv, domain);
15824         }
15825 }
15826
15827 static void intel_early_display_was(struct drm_i915_private *dev_priv)
15828 {
15829         /* Display WA #1185 WaDisableDARBFClkGating:cnl,glk */
15830         if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
15831                 I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
15832                            DARBF_GATING_DIS);
15833
15834         if (IS_HASWELL(dev_priv)) {
15835                 /*
15836                  * WaRsPkgCStateDisplayPMReq:hsw
15837                  * System hang if this isn't done before disabling all planes!
15838                  */
15839                 I915_WRITE(CHICKEN_PAR1_1,
15840                            I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
15841         }
15842 }
15843
15844 /* Scan out the current hw modeset state,
15845  * and sanitizes it to the current state
15846  */
15847 static void
15848 intel_modeset_setup_hw_state(struct drm_device *dev,
15849                              struct drm_modeset_acquire_ctx *ctx)
15850 {
15851         struct drm_i915_private *dev_priv = to_i915(dev);
15852         enum pipe pipe;
15853         struct intel_crtc *crtc;
15854         struct intel_encoder *encoder;
15855         int i;
15856
15857         intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
15858
15859         intel_early_display_was(dev_priv);
15860         intel_modeset_readout_hw_state(dev);
15861
15862         /* HW state is read out, now we need to sanitize this mess. */
15863         get_encoder_power_domains(dev_priv);
15864
15865         intel_sanitize_plane_mapping(dev_priv);
15866
15867         for_each_intel_encoder(dev, encoder) {
15868                 intel_sanitize_encoder(encoder);
15869         }
15870
15871         for_each_pipe(dev_priv, pipe) {
15872                 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
15873
15874                 intel_sanitize_crtc(crtc, ctx);
15875                 intel_dump_pipe_config(crtc, crtc->config,
15876                                        "[setup_hw_state]");
15877         }
15878
15879         intel_modeset_update_connector_atomic_state(dev);
15880
15881         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
15882                 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
15883
15884                 if (!pll->on || pll->active_mask)
15885                         continue;
15886
15887                 DRM_DEBUG_KMS("%s enabled but not in use, disabling\n",
15888                               pll->info->name);
15889
15890                 pll->info->funcs->disable(dev_priv, pll);
15891                 pll->on = false;
15892         }
15893
15894         if (IS_G4X(dev_priv)) {
15895                 g4x_wm_get_hw_state(dev);
15896                 g4x_wm_sanitize(dev_priv);
15897         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
15898                 vlv_wm_get_hw_state(dev);
15899                 vlv_wm_sanitize(dev_priv);
15900         } else if (INTEL_GEN(dev_priv) >= 9) {
15901                 skl_wm_get_hw_state(dev);
15902         } else if (HAS_PCH_SPLIT(dev_priv)) {
15903                 ilk_wm_get_hw_state(dev);
15904         }
15905
15906         for_each_intel_crtc(dev, crtc) {
15907                 u64 put_domains;
15908
15909                 put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc->config);
15910                 if (WARN_ON(put_domains))
15911                         modeset_put_power_domains(dev_priv, put_domains);
15912         }
15913
15914         intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
15915
15916         intel_fbc_init_pipe_state(dev_priv);
15917 }
15918
15919 void intel_display_resume(struct drm_device *dev)
15920 {
15921         struct drm_i915_private *dev_priv = to_i915(dev);
15922         struct drm_atomic_state *state = dev_priv->modeset_restore_state;
15923         struct drm_modeset_acquire_ctx ctx;
15924         int ret;
15925
15926         dev_priv->modeset_restore_state = NULL;
15927         if (state)
15928                 state->acquire_ctx = &ctx;
15929
15930         drm_modeset_acquire_init(&ctx, 0);
15931
15932         while (1) {
15933                 ret = drm_modeset_lock_all_ctx(dev, &ctx);
15934                 if (ret != -EDEADLK)
15935                         break;
15936
15937                 drm_modeset_backoff(&ctx);
15938         }
15939
15940         if (!ret)
15941                 ret = __intel_display_resume(dev, state, &ctx);
15942
15943         intel_enable_ipc(dev_priv);
15944         drm_modeset_drop_locks(&ctx);
15945         drm_modeset_acquire_fini(&ctx);
15946
15947         if (ret)
15948                 DRM_ERROR("Restoring old state failed with %i\n", ret);
15949         if (state)
15950                 drm_atomic_state_put(state);
15951 }
15952
15953 int intel_connector_register(struct drm_connector *connector)
15954 {
15955         struct intel_connector *intel_connector = to_intel_connector(connector);
15956         int ret;
15957
15958         ret = intel_backlight_device_register(intel_connector);
15959         if (ret)
15960                 goto err;
15961
15962         return 0;
15963
15964 err:
15965         return ret;
15966 }
15967
15968 void intel_connector_unregister(struct drm_connector *connector)
15969 {
15970         struct intel_connector *intel_connector = to_intel_connector(connector);
15971
15972         intel_backlight_device_unregister(intel_connector);
15973         intel_panel_destroy_backlight(connector);
15974 }
15975
15976 static void intel_hpd_poll_fini(struct drm_device *dev)
15977 {
15978         struct intel_connector *connector;
15979         struct drm_connector_list_iter conn_iter;
15980
15981         /* Kill all the work that may have been queued by hpd. */
15982         drm_connector_list_iter_begin(dev, &conn_iter);
15983         for_each_intel_connector_iter(connector, &conn_iter) {
15984                 if (connector->modeset_retry_work.func)
15985                         cancel_work_sync(&connector->modeset_retry_work);
15986                 if (connector->hdcp_shim) {
15987                         cancel_delayed_work_sync(&connector->hdcp_check_work);
15988                         cancel_work_sync(&connector->hdcp_prop_work);
15989                 }
15990         }
15991         drm_connector_list_iter_end(&conn_iter);
15992 }
15993
15994 void intel_modeset_cleanup(struct drm_device *dev)
15995 {
15996         struct drm_i915_private *dev_priv = to_i915(dev);
15997
15998         flush_workqueue(dev_priv->modeset_wq);
15999
16000         flush_work(&dev_priv->atomic_helper.free_work);
16001         WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list));
16002
16003         /*
16004          * Interrupts and polling as the first thing to avoid creating havoc.
16005          * Too much stuff here (turning of connectors, ...) would
16006          * experience fancy races otherwise.
16007          */
16008         intel_irq_uninstall(dev_priv);
16009
16010         /*
16011          * Due to the hpd irq storm handling the hotplug work can re-arm the
16012          * poll handlers. Hence disable polling after hpd handling is shut down.
16013          */
16014         intel_hpd_poll_fini(dev);
16015
16016         /* poll work can call into fbdev, hence clean that up afterwards */
16017         intel_fbdev_fini(dev_priv);
16018
16019         intel_unregister_dsm_handler();
16020
16021         intel_fbc_global_disable(dev_priv);
16022
16023         /* flush any delayed tasks or pending work */
16024         flush_scheduled_work();
16025
16026         drm_mode_config_cleanup(dev);
16027
16028         intel_cleanup_overlay(dev_priv);
16029
16030         intel_teardown_gmbus(dev_priv);
16031
16032         destroy_workqueue(dev_priv->modeset_wq);
16033 }
16034
16035 void intel_connector_attach_encoder(struct intel_connector *connector,
16036                                     struct intel_encoder *encoder)
16037 {
16038         connector->encoder = encoder;
16039         drm_connector_attach_encoder(&connector->base, &encoder->base);
16040 }
16041
16042 /*
16043  * set vga decode state - true == enable VGA decode
16044  */
16045 int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, bool state)
16046 {
16047         unsigned reg = INTEL_GEN(dev_priv) >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
16048         u16 gmch_ctrl;
16049
16050         if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) {
16051                 DRM_ERROR("failed to read control word\n");
16052                 return -EIO;
16053         }
16054
16055         if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state)
16056                 return 0;
16057
16058         if (state)
16059                 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
16060         else
16061                 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
16062
16063         if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) {
16064                 DRM_ERROR("failed to write control word\n");
16065                 return -EIO;
16066         }
16067
16068         return 0;
16069 }
16070
16071 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
16072
16073 struct intel_display_error_state {
16074
16075         u32 power_well_driver;
16076
16077         int num_transcoders;
16078
16079         struct intel_cursor_error_state {
16080                 u32 control;
16081                 u32 position;
16082                 u32 base;
16083                 u32 size;
16084         } cursor[I915_MAX_PIPES];
16085
16086         struct intel_pipe_error_state {
16087                 bool power_domain_on;
16088                 u32 source;
16089                 u32 stat;
16090         } pipe[I915_MAX_PIPES];
16091
16092         struct intel_plane_error_state {
16093                 u32 control;
16094                 u32 stride;
16095                 u32 size;
16096                 u32 pos;
16097                 u32 addr;
16098                 u32 surface;
16099                 u32 tile_offset;
16100         } plane[I915_MAX_PIPES];
16101
16102         struct intel_transcoder_error_state {
16103                 bool power_domain_on;
16104                 enum transcoder cpu_transcoder;
16105
16106                 u32 conf;
16107
16108                 u32 htotal;
16109                 u32 hblank;
16110                 u32 hsync;
16111                 u32 vtotal;
16112                 u32 vblank;
16113                 u32 vsync;
16114         } transcoder[4];
16115 };
16116
16117 struct intel_display_error_state *
16118 intel_display_capture_error_state(struct drm_i915_private *dev_priv)
16119 {
16120         struct intel_display_error_state *error;
16121         int transcoders[] = {
16122                 TRANSCODER_A,
16123                 TRANSCODER_B,
16124                 TRANSCODER_C,
16125                 TRANSCODER_EDP,
16126         };
16127         int i;
16128
16129         if (INTEL_INFO(dev_priv)->num_pipes == 0)
16130                 return NULL;
16131
16132         error = kzalloc(sizeof(*error), GFP_ATOMIC);
16133         if (error == NULL)
16134                 return NULL;
16135
16136         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
16137                 error->power_well_driver = I915_READ(HSW_PWR_WELL_CTL2);
16138
16139         for_each_pipe(dev_priv, i) {
16140                 error->pipe[i].power_domain_on =
16141                         __intel_display_power_is_enabled(dev_priv,
16142                                                          POWER_DOMAIN_PIPE(i));
16143                 if (!error->pipe[i].power_domain_on)
16144                         continue;
16145
16146                 error->cursor[i].control = I915_READ(CURCNTR(i));
16147                 error->cursor[i].position = I915_READ(CURPOS(i));
16148                 error->cursor[i].base = I915_READ(CURBASE(i));
16149
16150                 error->plane[i].control = I915_READ(DSPCNTR(i));
16151                 error->plane[i].stride = I915_READ(DSPSTRIDE(i));
16152                 if (INTEL_GEN(dev_priv) <= 3) {
16153                         error->plane[i].size = I915_READ(DSPSIZE(i));
16154                         error->plane[i].pos = I915_READ(DSPPOS(i));
16155                 }
16156                 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
16157                         error->plane[i].addr = I915_READ(DSPADDR(i));
16158                 if (INTEL_GEN(dev_priv) >= 4) {
16159                         error->plane[i].surface = I915_READ(DSPSURF(i));
16160                         error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
16161                 }
16162
16163                 error->pipe[i].source = I915_READ(PIPESRC(i));
16164
16165                 if (HAS_GMCH_DISPLAY(dev_priv))
16166                         error->pipe[i].stat = I915_READ(PIPESTAT(i));
16167         }
16168
16169         /* Note: this does not include DSI transcoders. */
16170         error->num_transcoders = INTEL_INFO(dev_priv)->num_pipes;
16171         if (HAS_DDI(dev_priv))
16172                 error->num_transcoders++; /* Account for eDP. */
16173
16174         for (i = 0; i < error->num_transcoders; i++) {
16175                 enum transcoder cpu_transcoder = transcoders[i];
16176
16177                 error->transcoder[i].power_domain_on =
16178                         __intel_display_power_is_enabled(dev_priv,
16179                                 POWER_DOMAIN_TRANSCODER(cpu_transcoder));
16180                 if (!error->transcoder[i].power_domain_on)
16181                         continue;
16182
16183                 error->transcoder[i].cpu_transcoder = cpu_transcoder;
16184
16185                 error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
16186                 error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
16187                 error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
16188                 error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
16189                 error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
16190                 error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
16191                 error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
16192         }
16193
16194         return error;
16195 }
16196
16197 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
16198
16199 void
16200 intel_display_print_error_state(struct drm_i915_error_state_buf *m,
16201                                 struct intel_display_error_state *error)
16202 {
16203         struct drm_i915_private *dev_priv = m->i915;
16204         int i;
16205
16206         if (!error)
16207                 return;
16208
16209         err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev_priv)->num_pipes);
16210         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
16211                 err_printf(m, "PWR_WELL_CTL2: %08x\n",
16212                            error->power_well_driver);
16213         for_each_pipe(dev_priv, i) {
16214                 err_printf(m, "Pipe [%d]:\n", i);
16215                 err_printf(m, "  Power: %s\n",
16216                            onoff(error->pipe[i].power_domain_on));
16217                 err_printf(m, "  SRC: %08x\n", error->pipe[i].source);
16218                 err_printf(m, "  STAT: %08x\n", error->pipe[i].stat);
16219
16220                 err_printf(m, "Plane [%d]:\n", i);
16221                 err_printf(m, "  CNTR: %08x\n", error->plane[i].control);
16222                 err_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
16223                 if (INTEL_GEN(dev_priv) <= 3) {
16224                         err_printf(m, "  SIZE: %08x\n", error->plane[i].size);
16225                         err_printf(m, "  POS: %08x\n", error->plane[i].pos);
16226                 }
16227                 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
16228                         err_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
16229                 if (INTEL_GEN(dev_priv) >= 4) {
16230                         err_printf(m, "  SURF: %08x\n", error->plane[i].surface);
16231                         err_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
16232                 }
16233
16234                 err_printf(m, "Cursor [%d]:\n", i);
16235                 err_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
16236                 err_printf(m, "  POS: %08x\n", error->cursor[i].position);
16237                 err_printf(m, "  BASE: %08x\n", error->cursor[i].base);
16238         }
16239
16240         for (i = 0; i < error->num_transcoders; i++) {
16241                 err_printf(m, "CPU transcoder: %s\n",
16242                            transcoder_name(error->transcoder[i].cpu_transcoder));
16243                 err_printf(m, "  Power: %s\n",
16244                            onoff(error->transcoder[i].power_domain_on));
16245                 err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
16246                 err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
16247                 err_printf(m, "  HBLANK: %08x\n", error->transcoder[i].hblank);
16248                 err_printf(m, "  HSYNC: %08x\n", error->transcoder[i].hsync);
16249                 err_printf(m, "  VTOTAL: %08x\n", error->transcoder[i].vtotal);
16250                 err_printf(m, "  VBLANK: %08x\n", error->transcoder[i].vblank);
16251                 err_printf(m, "  VSYNC: %08x\n", error->transcoder[i].vsync);
16252         }
16253 }
16254
16255 #endif