87df5f937f9478b4c80ace9538c4f2e6bfa3b9ff
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / display / intel_display.c
1 /*
2  * Copyright © 2006-2007 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *      Eric Anholt <eric@anholt.net>
25  */
26
27 #include <linux/i2c.h>
28 #include <linux/input.h>
29 #include <linux/intel-iommu.h>
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/dma-resv.h>
33 #include <linux/slab.h>
34
35 #include <drm/drm_atomic.h>
36 #include <drm/drm_atomic_helper.h>
37 #include <drm/drm_atomic_uapi.h>
38 #include <drm/drm_dp_helper.h>
39 #include <drm/drm_edid.h>
40 #include <drm/drm_fourcc.h>
41 #include <drm/drm_plane_helper.h>
42 #include <drm/drm_probe_helper.h>
43 #include <drm/drm_rect.h>
44 #include <drm/i915_drm.h>
45
46 #include "display/intel_crt.h"
47 #include "display/intel_ddi.h"
48 #include "display/intel_dp.h"
49 #include "display/intel_dp_mst.h"
50 #include "display/intel_dsi.h"
51 #include "display/intel_dvo.h"
52 #include "display/intel_gmbus.h"
53 #include "display/intel_hdmi.h"
54 #include "display/intel_lvds.h"
55 #include "display/intel_sdvo.h"
56 #include "display/intel_tv.h"
57 #include "display/intel_vdsc.h"
58
59 #include "gt/intel_rps.h"
60
61 #include "i915_drv.h"
62 #include "i915_trace.h"
63 #include "intel_acpi.h"
64 #include "intel_atomic.h"
65 #include "intel_atomic_plane.h"
66 #include "intel_bw.h"
67 #include "intel_cdclk.h"
68 #include "intel_color.h"
69 #include "intel_display_types.h"
70 #include "intel_dp_link_training.h"
71 #include "intel_fbc.h"
72 #include "intel_fbdev.h"
73 #include "intel_fifo_underrun.h"
74 #include "intel_frontbuffer.h"
75 #include "intel_hdcp.h"
76 #include "intel_hotplug.h"
77 #include "intel_overlay.h"
78 #include "intel_pipe_crc.h"
79 #include "intel_pm.h"
80 #include "intel_psr.h"
81 #include "intel_quirks.h"
82 #include "intel_sideband.h"
83 #include "intel_sprite.h"
84 #include "intel_tc.h"
85 #include "intel_vga.h"
86
87 /* Primary plane formats for gen <= 3 */
88 static const u32 i8xx_primary_formats[] = {
89         DRM_FORMAT_C8,
90         DRM_FORMAT_XRGB1555,
91         DRM_FORMAT_RGB565,
92         DRM_FORMAT_XRGB8888,
93 };
94
95 /* Primary plane formats for ivb (no fp16 due to hw issue) */
96 static const u32 ivb_primary_formats[] = {
97         DRM_FORMAT_C8,
98         DRM_FORMAT_RGB565,
99         DRM_FORMAT_XRGB8888,
100         DRM_FORMAT_XBGR8888,
101         DRM_FORMAT_XRGB2101010,
102         DRM_FORMAT_XBGR2101010,
103 };
104
105 /* Primary plane formats for gen >= 4, except ivb */
106 static const u32 i965_primary_formats[] = {
107         DRM_FORMAT_C8,
108         DRM_FORMAT_RGB565,
109         DRM_FORMAT_XRGB8888,
110         DRM_FORMAT_XBGR8888,
111         DRM_FORMAT_XRGB2101010,
112         DRM_FORMAT_XBGR2101010,
113         DRM_FORMAT_XBGR16161616F,
114 };
115
116 /* Primary plane formats for vlv/chv */
117 static const u32 vlv_primary_formats[] = {
118         DRM_FORMAT_C8,
119         DRM_FORMAT_RGB565,
120         DRM_FORMAT_XRGB8888,
121         DRM_FORMAT_XBGR8888,
122         DRM_FORMAT_ARGB8888,
123         DRM_FORMAT_ABGR8888,
124         DRM_FORMAT_XRGB2101010,
125         DRM_FORMAT_XBGR2101010,
126         DRM_FORMAT_ARGB2101010,
127         DRM_FORMAT_ABGR2101010,
128         DRM_FORMAT_XBGR16161616F,
129 };
130
131 static const u64 i9xx_format_modifiers[] = {
132         I915_FORMAT_MOD_X_TILED,
133         DRM_FORMAT_MOD_LINEAR,
134         DRM_FORMAT_MOD_INVALID
135 };
136
137 /* Cursor formats */
138 static const u32 intel_cursor_formats[] = {
139         DRM_FORMAT_ARGB8888,
140 };
141
142 static const u64 cursor_format_modifiers[] = {
143         DRM_FORMAT_MOD_LINEAR,
144         DRM_FORMAT_MOD_INVALID
145 };
146
147 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
148                                 struct intel_crtc_state *pipe_config);
149 static void ilk_pch_clock_get(struct intel_crtc *crtc,
150                               struct intel_crtc_state *pipe_config);
151
152 static int intel_framebuffer_init(struct intel_framebuffer *ifb,
153                                   struct drm_i915_gem_object *obj,
154                                   struct drm_mode_fb_cmd2 *mode_cmd);
155 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state);
156 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
157 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
158                                          const struct intel_link_m_n *m_n,
159                                          const struct intel_link_m_n *m2_n2);
160 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
161 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state);
162 static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state);
163 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
164 static void vlv_prepare_pll(struct intel_crtc *crtc,
165                             const struct intel_crtc_state *pipe_config);
166 static void chv_prepare_pll(struct intel_crtc *crtc,
167                             const struct intel_crtc_state *pipe_config);
168 static void skl_pfit_enable(const struct intel_crtc_state *crtc_state);
169 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state);
170 static void intel_modeset_setup_hw_state(struct drm_device *dev,
171                                          struct drm_modeset_acquire_ctx *ctx);
172 static struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc);
173
174 struct intel_limit {
175         struct {
176                 int min, max;
177         } dot, vco, n, m, m1, m2, p, p1;
178
179         struct {
180                 int dot_limit;
181                 int p2_slow, p2_fast;
182         } p2;
183 };
184
185 /* returns HPLL frequency in kHz */
186 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
187 {
188         int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
189
190         /* Obtain SKU information */
191         hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
192                 CCK_FUSE_HPLL_FREQ_MASK;
193
194         return vco_freq[hpll_freq] * 1000;
195 }
196
197 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
198                       const char *name, u32 reg, int ref_freq)
199 {
200         u32 val;
201         int divider;
202
203         val = vlv_cck_read(dev_priv, reg);
204         divider = val & CCK_FREQUENCY_VALUES;
205
206         WARN((val & CCK_FREQUENCY_STATUS) !=
207              (divider << CCK_FREQUENCY_STATUS_SHIFT),
208              "%s change in progress\n", name);
209
210         return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
211 }
212
213 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
214                            const char *name, u32 reg)
215 {
216         int hpll;
217
218         vlv_cck_get(dev_priv);
219
220         if (dev_priv->hpll_freq == 0)
221                 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
222
223         hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq);
224
225         vlv_cck_put(dev_priv);
226
227         return hpll;
228 }
229
230 static void intel_update_czclk(struct drm_i915_private *dev_priv)
231 {
232         if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
233                 return;
234
235         dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
236                                                       CCK_CZ_CLOCK_CONTROL);
237
238         drm_dbg(&dev_priv->drm, "CZ clock rate: %d kHz\n",
239                 dev_priv->czclk_freq);
240 }
241
242 static inline u32 /* units of 100MHz */
243 intel_fdi_link_freq(struct drm_i915_private *dev_priv,
244                     const struct intel_crtc_state *pipe_config)
245 {
246         if (HAS_DDI(dev_priv))
247                 return pipe_config->port_clock; /* SPLL */
248         else
249                 return dev_priv->fdi_pll_freq;
250 }
251
252 static const struct intel_limit intel_limits_i8xx_dac = {
253         .dot = { .min = 25000, .max = 350000 },
254         .vco = { .min = 908000, .max = 1512000 },
255         .n = { .min = 2, .max = 16 },
256         .m = { .min = 96, .max = 140 },
257         .m1 = { .min = 18, .max = 26 },
258         .m2 = { .min = 6, .max = 16 },
259         .p = { .min = 4, .max = 128 },
260         .p1 = { .min = 2, .max = 33 },
261         .p2 = { .dot_limit = 165000,
262                 .p2_slow = 4, .p2_fast = 2 },
263 };
264
265 static const struct intel_limit intel_limits_i8xx_dvo = {
266         .dot = { .min = 25000, .max = 350000 },
267         .vco = { .min = 908000, .max = 1512000 },
268         .n = { .min = 2, .max = 16 },
269         .m = { .min = 96, .max = 140 },
270         .m1 = { .min = 18, .max = 26 },
271         .m2 = { .min = 6, .max = 16 },
272         .p = { .min = 4, .max = 128 },
273         .p1 = { .min = 2, .max = 33 },
274         .p2 = { .dot_limit = 165000,
275                 .p2_slow = 4, .p2_fast = 4 },
276 };
277
278 static const struct intel_limit intel_limits_i8xx_lvds = {
279         .dot = { .min = 25000, .max = 350000 },
280         .vco = { .min = 908000, .max = 1512000 },
281         .n = { .min = 2, .max = 16 },
282         .m = { .min = 96, .max = 140 },
283         .m1 = { .min = 18, .max = 26 },
284         .m2 = { .min = 6, .max = 16 },
285         .p = { .min = 4, .max = 128 },
286         .p1 = { .min = 1, .max = 6 },
287         .p2 = { .dot_limit = 165000,
288                 .p2_slow = 14, .p2_fast = 7 },
289 };
290
291 static const struct intel_limit intel_limits_i9xx_sdvo = {
292         .dot = { .min = 20000, .max = 400000 },
293         .vco = { .min = 1400000, .max = 2800000 },
294         .n = { .min = 1, .max = 6 },
295         .m = { .min = 70, .max = 120 },
296         .m1 = { .min = 8, .max = 18 },
297         .m2 = { .min = 3, .max = 7 },
298         .p = { .min = 5, .max = 80 },
299         .p1 = { .min = 1, .max = 8 },
300         .p2 = { .dot_limit = 200000,
301                 .p2_slow = 10, .p2_fast = 5 },
302 };
303
304 static const struct intel_limit intel_limits_i9xx_lvds = {
305         .dot = { .min = 20000, .max = 400000 },
306         .vco = { .min = 1400000, .max = 2800000 },
307         .n = { .min = 1, .max = 6 },
308         .m = { .min = 70, .max = 120 },
309         .m1 = { .min = 8, .max = 18 },
310         .m2 = { .min = 3, .max = 7 },
311         .p = { .min = 7, .max = 98 },
312         .p1 = { .min = 1, .max = 8 },
313         .p2 = { .dot_limit = 112000,
314                 .p2_slow = 14, .p2_fast = 7 },
315 };
316
317
318 static const struct intel_limit intel_limits_g4x_sdvo = {
319         .dot = { .min = 25000, .max = 270000 },
320         .vco = { .min = 1750000, .max = 3500000},
321         .n = { .min = 1, .max = 4 },
322         .m = { .min = 104, .max = 138 },
323         .m1 = { .min = 17, .max = 23 },
324         .m2 = { .min = 5, .max = 11 },
325         .p = { .min = 10, .max = 30 },
326         .p1 = { .min = 1, .max = 3},
327         .p2 = { .dot_limit = 270000,
328                 .p2_slow = 10,
329                 .p2_fast = 10
330         },
331 };
332
333 static const struct intel_limit intel_limits_g4x_hdmi = {
334         .dot = { .min = 22000, .max = 400000 },
335         .vco = { .min = 1750000, .max = 3500000},
336         .n = { .min = 1, .max = 4 },
337         .m = { .min = 104, .max = 138 },
338         .m1 = { .min = 16, .max = 23 },
339         .m2 = { .min = 5, .max = 11 },
340         .p = { .min = 5, .max = 80 },
341         .p1 = { .min = 1, .max = 8},
342         .p2 = { .dot_limit = 165000,
343                 .p2_slow = 10, .p2_fast = 5 },
344 };
345
346 static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
347         .dot = { .min = 20000, .max = 115000 },
348         .vco = { .min = 1750000, .max = 3500000 },
349         .n = { .min = 1, .max = 3 },
350         .m = { .min = 104, .max = 138 },
351         .m1 = { .min = 17, .max = 23 },
352         .m2 = { .min = 5, .max = 11 },
353         .p = { .min = 28, .max = 112 },
354         .p1 = { .min = 2, .max = 8 },
355         .p2 = { .dot_limit = 0,
356                 .p2_slow = 14, .p2_fast = 14
357         },
358 };
359
360 static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
361         .dot = { .min = 80000, .max = 224000 },
362         .vco = { .min = 1750000, .max = 3500000 },
363         .n = { .min = 1, .max = 3 },
364         .m = { .min = 104, .max = 138 },
365         .m1 = { .min = 17, .max = 23 },
366         .m2 = { .min = 5, .max = 11 },
367         .p = { .min = 14, .max = 42 },
368         .p1 = { .min = 2, .max = 6 },
369         .p2 = { .dot_limit = 0,
370                 .p2_slow = 7, .p2_fast = 7
371         },
372 };
373
374 static const struct intel_limit pnv_limits_sdvo = {
375         .dot = { .min = 20000, .max = 400000},
376         .vco = { .min = 1700000, .max = 3500000 },
377         /* Pineview's Ncounter is a ring counter */
378         .n = { .min = 3, .max = 6 },
379         .m = { .min = 2, .max = 256 },
380         /* Pineview only has one combined m divider, which we treat as m2. */
381         .m1 = { .min = 0, .max = 0 },
382         .m2 = { .min = 0, .max = 254 },
383         .p = { .min = 5, .max = 80 },
384         .p1 = { .min = 1, .max = 8 },
385         .p2 = { .dot_limit = 200000,
386                 .p2_slow = 10, .p2_fast = 5 },
387 };
388
389 static const struct intel_limit pnv_limits_lvds = {
390         .dot = { .min = 20000, .max = 400000 },
391         .vco = { .min = 1700000, .max = 3500000 },
392         .n = { .min = 3, .max = 6 },
393         .m = { .min = 2, .max = 256 },
394         .m1 = { .min = 0, .max = 0 },
395         .m2 = { .min = 0, .max = 254 },
396         .p = { .min = 7, .max = 112 },
397         .p1 = { .min = 1, .max = 8 },
398         .p2 = { .dot_limit = 112000,
399                 .p2_slow = 14, .p2_fast = 14 },
400 };
401
402 /* Ironlake / Sandybridge
403  *
404  * We calculate clock using (register_value + 2) for N/M1/M2, so here
405  * the range value for them is (actual_value - 2).
406  */
407 static const struct intel_limit ilk_limits_dac = {
408         .dot = { .min = 25000, .max = 350000 },
409         .vco = { .min = 1760000, .max = 3510000 },
410         .n = { .min = 1, .max = 5 },
411         .m = { .min = 79, .max = 127 },
412         .m1 = { .min = 12, .max = 22 },
413         .m2 = { .min = 5, .max = 9 },
414         .p = { .min = 5, .max = 80 },
415         .p1 = { .min = 1, .max = 8 },
416         .p2 = { .dot_limit = 225000,
417                 .p2_slow = 10, .p2_fast = 5 },
418 };
419
420 static const struct intel_limit ilk_limits_single_lvds = {
421         .dot = { .min = 25000, .max = 350000 },
422         .vco = { .min = 1760000, .max = 3510000 },
423         .n = { .min = 1, .max = 3 },
424         .m = { .min = 79, .max = 118 },
425         .m1 = { .min = 12, .max = 22 },
426         .m2 = { .min = 5, .max = 9 },
427         .p = { .min = 28, .max = 112 },
428         .p1 = { .min = 2, .max = 8 },
429         .p2 = { .dot_limit = 225000,
430                 .p2_slow = 14, .p2_fast = 14 },
431 };
432
433 static const struct intel_limit ilk_limits_dual_lvds = {
434         .dot = { .min = 25000, .max = 350000 },
435         .vco = { .min = 1760000, .max = 3510000 },
436         .n = { .min = 1, .max = 3 },
437         .m = { .min = 79, .max = 127 },
438         .m1 = { .min = 12, .max = 22 },
439         .m2 = { .min = 5, .max = 9 },
440         .p = { .min = 14, .max = 56 },
441         .p1 = { .min = 2, .max = 8 },
442         .p2 = { .dot_limit = 225000,
443                 .p2_slow = 7, .p2_fast = 7 },
444 };
445
446 /* LVDS 100mhz refclk limits. */
447 static const struct intel_limit ilk_limits_single_lvds_100m = {
448         .dot = { .min = 25000, .max = 350000 },
449         .vco = { .min = 1760000, .max = 3510000 },
450         .n = { .min = 1, .max = 2 },
451         .m = { .min = 79, .max = 126 },
452         .m1 = { .min = 12, .max = 22 },
453         .m2 = { .min = 5, .max = 9 },
454         .p = { .min = 28, .max = 112 },
455         .p1 = { .min = 2, .max = 8 },
456         .p2 = { .dot_limit = 225000,
457                 .p2_slow = 14, .p2_fast = 14 },
458 };
459
460 static const struct intel_limit ilk_limits_dual_lvds_100m = {
461         .dot = { .min = 25000, .max = 350000 },
462         .vco = { .min = 1760000, .max = 3510000 },
463         .n = { .min = 1, .max = 3 },
464         .m = { .min = 79, .max = 126 },
465         .m1 = { .min = 12, .max = 22 },
466         .m2 = { .min = 5, .max = 9 },
467         .p = { .min = 14, .max = 42 },
468         .p1 = { .min = 2, .max = 6 },
469         .p2 = { .dot_limit = 225000,
470                 .p2_slow = 7, .p2_fast = 7 },
471 };
472
473 static const struct intel_limit intel_limits_vlv = {
474          /*
475           * These are the data rate limits (measured in fast clocks)
476           * since those are the strictest limits we have. The fast
477           * clock and actual rate limits are more relaxed, so checking
478           * them would make no difference.
479           */
480         .dot = { .min = 25000 * 5, .max = 270000 * 5 },
481         .vco = { .min = 4000000, .max = 6000000 },
482         .n = { .min = 1, .max = 7 },
483         .m1 = { .min = 2, .max = 3 },
484         .m2 = { .min = 11, .max = 156 },
485         .p1 = { .min = 2, .max = 3 },
486         .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
487 };
488
489 static const struct intel_limit intel_limits_chv = {
490         /*
491          * These are the data rate limits (measured in fast clocks)
492          * since those are the strictest limits we have.  The fast
493          * clock and actual rate limits are more relaxed, so checking
494          * them would make no difference.
495          */
496         .dot = { .min = 25000 * 5, .max = 540000 * 5},
497         .vco = { .min = 4800000, .max = 6480000 },
498         .n = { .min = 1, .max = 1 },
499         .m1 = { .min = 2, .max = 2 },
500         .m2 = { .min = 24 << 22, .max = 175 << 22 },
501         .p1 = { .min = 2, .max = 4 },
502         .p2 = { .p2_slow = 1, .p2_fast = 14 },
503 };
504
505 static const struct intel_limit intel_limits_bxt = {
506         /* FIXME: find real dot limits */
507         .dot = { .min = 0, .max = INT_MAX },
508         .vco = { .min = 4800000, .max = 6700000 },
509         .n = { .min = 1, .max = 1 },
510         .m1 = { .min = 2, .max = 2 },
511         /* FIXME: find real m2 limits */
512         .m2 = { .min = 2 << 22, .max = 255 << 22 },
513         .p1 = { .min = 2, .max = 4 },
514         .p2 = { .p2_slow = 1, .p2_fast = 20 },
515 };
516
517 /* WA Display #0827: Gen9:all */
518 static void
519 skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
520 {
521         if (enable)
522                 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
523                                intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DUPS1_GATING_DIS | DUPS2_GATING_DIS);
524         else
525                 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
526                                intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
527 }
528
529 /* Wa_2006604312:icl */
530 static void
531 icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
532                        bool enable)
533 {
534         if (enable)
535                 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
536                                intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS);
537         else
538                 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
539                                intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
540 }
541
542 static bool
543 needs_modeset(const struct intel_crtc_state *state)
544 {
545         return drm_atomic_crtc_needs_modeset(&state->uapi);
546 }
547
548 bool
549 is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
550 {
551         return (crtc_state->master_transcoder != INVALID_TRANSCODER ||
552                 crtc_state->sync_mode_slaves_mask);
553 }
554
555 static bool
556 is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state)
557 {
558         return crtc_state->master_transcoder != INVALID_TRANSCODER;
559 }
560
561 /*
562  * Platform specific helpers to calculate the port PLL loopback- (clock.m),
563  * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
564  * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
565  * The helpers' return value is the rate of the clock that is fed to the
566  * display engine's pipe which can be the above fast dot clock rate or a
567  * divided-down version of it.
568  */
569 /* m1 is reserved as 0 in Pineview, n is a ring counter */
570 static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
571 {
572         clock->m = clock->m2 + 2;
573         clock->p = clock->p1 * clock->p2;
574         if (WARN_ON(clock->n == 0 || clock->p == 0))
575                 return 0;
576         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
577         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
578
579         return clock->dot;
580 }
581
582 static u32 i9xx_dpll_compute_m(struct dpll *dpll)
583 {
584         return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
585 }
586
587 static int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
588 {
589         clock->m = i9xx_dpll_compute_m(clock);
590         clock->p = clock->p1 * clock->p2;
591         if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
592                 return 0;
593         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
594         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
595
596         return clock->dot;
597 }
598
599 static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
600 {
601         clock->m = clock->m1 * clock->m2;
602         clock->p = clock->p1 * clock->p2;
603         if (WARN_ON(clock->n == 0 || clock->p == 0))
604                 return 0;
605         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
606         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
607
608         return clock->dot / 5;
609 }
610
611 int chv_calc_dpll_params(int refclk, struct dpll *clock)
612 {
613         clock->m = clock->m1 * clock->m2;
614         clock->p = clock->p1 * clock->p2;
615         if (WARN_ON(clock->n == 0 || clock->p == 0))
616                 return 0;
617         clock->vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock->m),
618                                            clock->n << 22);
619         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
620
621         return clock->dot / 5;
622 }
623
624 #define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
625
626 /*
627  * Returns whether the given set of divisors are valid for a given refclk with
628  * the given connectors.
629  */
630 static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv,
631                                const struct intel_limit *limit,
632                                const struct dpll *clock)
633 {
634         if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
635                 INTELPllInvalid("n out of range\n");
636         if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
637                 INTELPllInvalid("p1 out of range\n");
638         if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
639                 INTELPllInvalid("m2 out of range\n");
640         if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
641                 INTELPllInvalid("m1 out of range\n");
642
643         if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
644             !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv))
645                 if (clock->m1 <= clock->m2)
646                         INTELPllInvalid("m1 <= m2\n");
647
648         if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
649             !IS_GEN9_LP(dev_priv)) {
650                 if (clock->p < limit->p.min || limit->p.max < clock->p)
651                         INTELPllInvalid("p out of range\n");
652                 if (clock->m < limit->m.min || limit->m.max < clock->m)
653                         INTELPllInvalid("m out of range\n");
654         }
655
656         if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
657                 INTELPllInvalid("vco out of range\n");
658         /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
659          * connector, etc., rather than just a single range.
660          */
661         if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
662                 INTELPllInvalid("dot out of range\n");
663
664         return true;
665 }
666
667 static int
668 i9xx_select_p2_div(const struct intel_limit *limit,
669                    const struct intel_crtc_state *crtc_state,
670                    int target)
671 {
672         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
673
674         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
675                 /*
676                  * For LVDS just rely on its current settings for dual-channel.
677                  * We haven't figured out how to reliably set up different
678                  * single/dual channel state, if we even can.
679                  */
680                 if (intel_is_dual_link_lvds(dev_priv))
681                         return limit->p2.p2_fast;
682                 else
683                         return limit->p2.p2_slow;
684         } else {
685                 if (target < limit->p2.dot_limit)
686                         return limit->p2.p2_slow;
687                 else
688                         return limit->p2.p2_fast;
689         }
690 }
691
692 /*
693  * Returns a set of divisors for the desired target clock with the given
694  * refclk, or FALSE.  The returned values represent the clock equation:
695  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
696  *
697  * Target and reference clocks are specified in kHz.
698  *
699  * If match_clock is provided, then best_clock P divider must match the P
700  * divider from @match_clock used for LVDS downclocking.
701  */
702 static bool
703 i9xx_find_best_dpll(const struct intel_limit *limit,
704                     struct intel_crtc_state *crtc_state,
705                     int target, int refclk, struct dpll *match_clock,
706                     struct dpll *best_clock)
707 {
708         struct drm_device *dev = crtc_state->uapi.crtc->dev;
709         struct dpll clock;
710         int err = target;
711
712         memset(best_clock, 0, sizeof(*best_clock));
713
714         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
715
716         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
717              clock.m1++) {
718                 for (clock.m2 = limit->m2.min;
719                      clock.m2 <= limit->m2.max; clock.m2++) {
720                         if (clock.m2 >= clock.m1)
721                                 break;
722                         for (clock.n = limit->n.min;
723                              clock.n <= limit->n.max; clock.n++) {
724                                 for (clock.p1 = limit->p1.min;
725                                         clock.p1 <= limit->p1.max; clock.p1++) {
726                                         int this_err;
727
728                                         i9xx_calc_dpll_params(refclk, &clock);
729                                         if (!intel_PLL_is_valid(to_i915(dev),
730                                                                 limit,
731                                                                 &clock))
732                                                 continue;
733                                         if (match_clock &&
734                                             clock.p != match_clock->p)
735                                                 continue;
736
737                                         this_err = abs(clock.dot - target);
738                                         if (this_err < err) {
739                                                 *best_clock = clock;
740                                                 err = this_err;
741                                         }
742                                 }
743                         }
744                 }
745         }
746
747         return (err != target);
748 }
749
750 /*
751  * Returns a set of divisors for the desired target clock with the given
752  * refclk, or FALSE.  The returned values represent the clock equation:
753  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
754  *
755  * Target and reference clocks are specified in kHz.
756  *
757  * If match_clock is provided, then best_clock P divider must match the P
758  * divider from @match_clock used for LVDS downclocking.
759  */
760 static bool
761 pnv_find_best_dpll(const struct intel_limit *limit,
762                    struct intel_crtc_state *crtc_state,
763                    int target, int refclk, struct dpll *match_clock,
764                    struct dpll *best_clock)
765 {
766         struct drm_device *dev = crtc_state->uapi.crtc->dev;
767         struct dpll clock;
768         int err = target;
769
770         memset(best_clock, 0, sizeof(*best_clock));
771
772         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
773
774         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
775              clock.m1++) {
776                 for (clock.m2 = limit->m2.min;
777                      clock.m2 <= limit->m2.max; clock.m2++) {
778                         for (clock.n = limit->n.min;
779                              clock.n <= limit->n.max; clock.n++) {
780                                 for (clock.p1 = limit->p1.min;
781                                         clock.p1 <= limit->p1.max; clock.p1++) {
782                                         int this_err;
783
784                                         pnv_calc_dpll_params(refclk, &clock);
785                                         if (!intel_PLL_is_valid(to_i915(dev),
786                                                                 limit,
787                                                                 &clock))
788                                                 continue;
789                                         if (match_clock &&
790                                             clock.p != match_clock->p)
791                                                 continue;
792
793                                         this_err = abs(clock.dot - target);
794                                         if (this_err < err) {
795                                                 *best_clock = clock;
796                                                 err = this_err;
797                                         }
798                                 }
799                         }
800                 }
801         }
802
803         return (err != target);
804 }
805
806 /*
807  * Returns a set of divisors for the desired target clock with the given
808  * refclk, or FALSE.  The returned values represent the clock equation:
809  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
810  *
811  * Target and reference clocks are specified in kHz.
812  *
813  * If match_clock is provided, then best_clock P divider must match the P
814  * divider from @match_clock used for LVDS downclocking.
815  */
816 static bool
817 g4x_find_best_dpll(const struct intel_limit *limit,
818                    struct intel_crtc_state *crtc_state,
819                    int target, int refclk, struct dpll *match_clock,
820                    struct dpll *best_clock)
821 {
822         struct drm_device *dev = crtc_state->uapi.crtc->dev;
823         struct dpll clock;
824         int max_n;
825         bool found = false;
826         /* approximately equals target * 0.00585 */
827         int err_most = (target >> 8) + (target >> 9);
828
829         memset(best_clock, 0, sizeof(*best_clock));
830
831         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
832
833         max_n = limit->n.max;
834         /* based on hardware requirement, prefer smaller n to precision */
835         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
836                 /* based on hardware requirement, prefere larger m1,m2 */
837                 for (clock.m1 = limit->m1.max;
838                      clock.m1 >= limit->m1.min; clock.m1--) {
839                         for (clock.m2 = limit->m2.max;
840                              clock.m2 >= limit->m2.min; clock.m2--) {
841                                 for (clock.p1 = limit->p1.max;
842                                      clock.p1 >= limit->p1.min; clock.p1--) {
843                                         int this_err;
844
845                                         i9xx_calc_dpll_params(refclk, &clock);
846                                         if (!intel_PLL_is_valid(to_i915(dev),
847                                                                 limit,
848                                                                 &clock))
849                                                 continue;
850
851                                         this_err = abs(clock.dot - target);
852                                         if (this_err < err_most) {
853                                                 *best_clock = clock;
854                                                 err_most = this_err;
855                                                 max_n = clock.n;
856                                                 found = true;
857                                         }
858                                 }
859                         }
860                 }
861         }
862         return found;
863 }
864
865 /*
866  * Check if the calculated PLL configuration is more optimal compared to the
867  * best configuration and error found so far. Return the calculated error.
868  */
869 static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
870                                const struct dpll *calculated_clock,
871                                const struct dpll *best_clock,
872                                unsigned int best_error_ppm,
873                                unsigned int *error_ppm)
874 {
875         /*
876          * For CHV ignore the error and consider only the P value.
877          * Prefer a bigger P value based on HW requirements.
878          */
879         if (IS_CHERRYVIEW(to_i915(dev))) {
880                 *error_ppm = 0;
881
882                 return calculated_clock->p > best_clock->p;
883         }
884
885         if (WARN_ON_ONCE(!target_freq))
886                 return false;
887
888         *error_ppm = div_u64(1000000ULL *
889                                 abs(target_freq - calculated_clock->dot),
890                              target_freq);
891         /*
892          * Prefer a better P value over a better (smaller) error if the error
893          * is small. Ensure this preference for future configurations too by
894          * setting the error to 0.
895          */
896         if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
897                 *error_ppm = 0;
898
899                 return true;
900         }
901
902         return *error_ppm + 10 < best_error_ppm;
903 }
904
905 /*
906  * Returns a set of divisors for the desired target clock with the given
907  * refclk, or FALSE.  The returned values represent the clock equation:
908  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
909  */
910 static bool
911 vlv_find_best_dpll(const struct intel_limit *limit,
912                    struct intel_crtc_state *crtc_state,
913                    int target, int refclk, struct dpll *match_clock,
914                    struct dpll *best_clock)
915 {
916         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
917         struct drm_device *dev = crtc->base.dev;
918         struct dpll clock;
919         unsigned int bestppm = 1000000;
920         /* min update 19.2 MHz */
921         int max_n = min(limit->n.max, refclk / 19200);
922         bool found = false;
923
924         target *= 5; /* fast clock */
925
926         memset(best_clock, 0, sizeof(*best_clock));
927
928         /* based on hardware requirement, prefer smaller n to precision */
929         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
930                 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
931                         for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
932                              clock.p2 -= clock.p2 > 10 ? 2 : 1) {
933                                 clock.p = clock.p1 * clock.p2;
934                                 /* based on hardware requirement, prefer bigger m1,m2 values */
935                                 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
936                                         unsigned int ppm;
937
938                                         clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
939                                                                      refclk * clock.m1);
940
941                                         vlv_calc_dpll_params(refclk, &clock);
942
943                                         if (!intel_PLL_is_valid(to_i915(dev),
944                                                                 limit,
945                                                                 &clock))
946                                                 continue;
947
948                                         if (!vlv_PLL_is_optimal(dev, target,
949                                                                 &clock,
950                                                                 best_clock,
951                                                                 bestppm, &ppm))
952                                                 continue;
953
954                                         *best_clock = clock;
955                                         bestppm = ppm;
956                                         found = true;
957                                 }
958                         }
959                 }
960         }
961
962         return found;
963 }
964
965 /*
966  * Returns a set of divisors for the desired target clock with the given
967  * refclk, or FALSE.  The returned values represent the clock equation:
968  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
969  */
970 static bool
971 chv_find_best_dpll(const struct intel_limit *limit,
972                    struct intel_crtc_state *crtc_state,
973                    int target, int refclk, struct dpll *match_clock,
974                    struct dpll *best_clock)
975 {
976         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
977         struct drm_device *dev = crtc->base.dev;
978         unsigned int best_error_ppm;
979         struct dpll clock;
980         u64 m2;
981         int found = false;
982
983         memset(best_clock, 0, sizeof(*best_clock));
984         best_error_ppm = 1000000;
985
986         /*
987          * Based on hardware doc, the n always set to 1, and m1 always
988          * set to 2.  If requires to support 200Mhz refclk, we need to
989          * revisit this because n may not 1 anymore.
990          */
991         clock.n = 1, clock.m1 = 2;
992         target *= 5;    /* fast clock */
993
994         for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
995                 for (clock.p2 = limit->p2.p2_fast;
996                                 clock.p2 >= limit->p2.p2_slow;
997                                 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
998                         unsigned int error_ppm;
999
1000                         clock.p = clock.p1 * clock.p2;
1001
1002                         m2 = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(target, clock.p * clock.n) << 22,
1003                                                    refclk * clock.m1);
1004
1005                         if (m2 > INT_MAX/clock.m1)
1006                                 continue;
1007
1008                         clock.m2 = m2;
1009
1010                         chv_calc_dpll_params(refclk, &clock);
1011
1012                         if (!intel_PLL_is_valid(to_i915(dev), limit, &clock))
1013                                 continue;
1014
1015                         if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
1016                                                 best_error_ppm, &error_ppm))
1017                                 continue;
1018
1019                         *best_clock = clock;
1020                         best_error_ppm = error_ppm;
1021                         found = true;
1022                 }
1023         }
1024
1025         return found;
1026 }
1027
1028 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
1029                         struct dpll *best_clock)
1030 {
1031         int refclk = 100000;
1032         const struct intel_limit *limit = &intel_limits_bxt;
1033
1034         return chv_find_best_dpll(limit, crtc_state,
1035                                   crtc_state->port_clock, refclk,
1036                                   NULL, best_clock);
1037 }
1038
1039 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
1040                                     enum pipe pipe)
1041 {
1042         i915_reg_t reg = PIPEDSL(pipe);
1043         u32 line1, line2;
1044         u32 line_mask;
1045
1046         if (IS_GEN(dev_priv, 2))
1047                 line_mask = DSL_LINEMASK_GEN2;
1048         else
1049                 line_mask = DSL_LINEMASK_GEN3;
1050
1051         line1 = intel_de_read(dev_priv, reg) & line_mask;
1052         msleep(5);
1053         line2 = intel_de_read(dev_priv, reg) & line_mask;
1054
1055         return line1 != line2;
1056 }
1057
1058 static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
1059 {
1060         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1061         enum pipe pipe = crtc->pipe;
1062
1063         /* Wait for the display line to settle/start moving */
1064         if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
1065                 drm_err(&dev_priv->drm,
1066                         "pipe %c scanline %s wait timed out\n",
1067                         pipe_name(pipe), onoff(state));
1068 }
1069
1070 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
1071 {
1072         wait_for_pipe_scanline_moving(crtc, false);
1073 }
1074
1075 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
1076 {
1077         wait_for_pipe_scanline_moving(crtc, true);
1078 }
1079
1080 static void
1081 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
1082 {
1083         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
1084         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1085
1086         if (INTEL_GEN(dev_priv) >= 4) {
1087                 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1088                 i915_reg_t reg = PIPECONF(cpu_transcoder);
1089
1090                 /* Wait for the Pipe State to go off */
1091                 if (intel_de_wait_for_clear(dev_priv, reg,
1092                                             I965_PIPECONF_ACTIVE, 100))
1093                         WARN(1, "pipe_off wait timed out\n");
1094         } else {
1095                 intel_wait_for_pipe_scanline_stopped(crtc);
1096         }
1097 }
1098
1099 /* Only for pre-ILK configs */
1100 void assert_pll(struct drm_i915_private *dev_priv,
1101                 enum pipe pipe, bool state)
1102 {
1103         u32 val;
1104         bool cur_state;
1105
1106         val = intel_de_read(dev_priv, DPLL(pipe));
1107         cur_state = !!(val & DPLL_VCO_ENABLE);
1108         I915_STATE_WARN(cur_state != state,
1109              "PLL state assertion failure (expected %s, current %s)\n",
1110                         onoff(state), onoff(cur_state));
1111 }
1112
1113 /* XXX: the dsi pll is shared between MIPI DSI ports */
1114 void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
1115 {
1116         u32 val;
1117         bool cur_state;
1118
1119         vlv_cck_get(dev_priv);
1120         val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
1121         vlv_cck_put(dev_priv);
1122
1123         cur_state = val & DSI_PLL_VCO_EN;
1124         I915_STATE_WARN(cur_state != state,
1125              "DSI PLL state assertion failure (expected %s, current %s)\n",
1126                         onoff(state), onoff(cur_state));
1127 }
1128
1129 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1130                           enum pipe pipe, bool state)
1131 {
1132         bool cur_state;
1133
1134         if (HAS_DDI(dev_priv)) {
1135                 /*
1136                  * DDI does not have a specific FDI_TX register.
1137                  *
1138                  * FDI is never fed from EDP transcoder
1139                  * so pipe->transcoder cast is fine here.
1140                  */
1141                 enum transcoder cpu_transcoder = (enum transcoder)pipe;
1142                 u32 val = intel_de_read(dev_priv,
1143                                         TRANS_DDI_FUNC_CTL(cpu_transcoder));
1144                 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
1145         } else {
1146                 u32 val = intel_de_read(dev_priv, FDI_TX_CTL(pipe));
1147                 cur_state = !!(val & FDI_TX_ENABLE);
1148         }
1149         I915_STATE_WARN(cur_state != state,
1150              "FDI TX state assertion failure (expected %s, current %s)\n",
1151                         onoff(state), onoff(cur_state));
1152 }
1153 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1154 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1155
1156 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1157                           enum pipe pipe, bool state)
1158 {
1159         u32 val;
1160         bool cur_state;
1161
1162         val = intel_de_read(dev_priv, FDI_RX_CTL(pipe));
1163         cur_state = !!(val & FDI_RX_ENABLE);
1164         I915_STATE_WARN(cur_state != state,
1165              "FDI RX state assertion failure (expected %s, current %s)\n",
1166                         onoff(state), onoff(cur_state));
1167 }
1168 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1169 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1170
1171 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1172                                       enum pipe pipe)
1173 {
1174         u32 val;
1175
1176         /* ILK FDI PLL is always enabled */
1177         if (IS_GEN(dev_priv, 5))
1178                 return;
1179
1180         /* On Haswell, DDI ports are responsible for the FDI PLL setup */
1181         if (HAS_DDI(dev_priv))
1182                 return;
1183
1184         val = intel_de_read(dev_priv, FDI_TX_CTL(pipe));
1185         I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1186 }
1187
1188 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1189                        enum pipe pipe, bool state)
1190 {
1191         u32 val;
1192         bool cur_state;
1193
1194         val = intel_de_read(dev_priv, FDI_RX_CTL(pipe));
1195         cur_state = !!(val & FDI_RX_PLL_ENABLE);
1196         I915_STATE_WARN(cur_state != state,
1197              "FDI RX PLL assertion failure (expected %s, current %s)\n",
1198                         onoff(state), onoff(cur_state));
1199 }
1200
1201 void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
1202 {
1203         i915_reg_t pp_reg;
1204         u32 val;
1205         enum pipe panel_pipe = INVALID_PIPE;
1206         bool locked = true;
1207
1208         if (WARN_ON(HAS_DDI(dev_priv)))
1209                 return;
1210
1211         if (HAS_PCH_SPLIT(dev_priv)) {
1212                 u32 port_sel;
1213
1214                 pp_reg = PP_CONTROL(0);
1215                 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1216
1217                 switch (port_sel) {
1218                 case PANEL_PORT_SELECT_LVDS:
1219                         intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
1220                         break;
1221                 case PANEL_PORT_SELECT_DPA:
1222                         intel_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
1223                         break;
1224                 case PANEL_PORT_SELECT_DPC:
1225                         intel_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
1226                         break;
1227                 case PANEL_PORT_SELECT_DPD:
1228                         intel_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
1229                         break;
1230                 default:
1231                         MISSING_CASE(port_sel);
1232                         break;
1233                 }
1234         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1235                 /* presumably write lock depends on pipe, not port select */
1236                 pp_reg = PP_CONTROL(pipe);
1237                 panel_pipe = pipe;
1238         } else {
1239                 u32 port_sel;
1240
1241                 pp_reg = PP_CONTROL(0);
1242                 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1243
1244                 WARN_ON(port_sel != PANEL_PORT_SELECT_LVDS);
1245                 intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
1246         }
1247
1248         val = intel_de_read(dev_priv, pp_reg);
1249         if (!(val & PANEL_POWER_ON) ||
1250             ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
1251                 locked = false;
1252
1253         I915_STATE_WARN(panel_pipe == pipe && locked,
1254              "panel assertion failure, pipe %c regs locked\n",
1255              pipe_name(pipe));
1256 }
1257
1258 void assert_pipe(struct drm_i915_private *dev_priv,
1259                  enum transcoder cpu_transcoder, bool state)
1260 {
1261         bool cur_state;
1262         enum intel_display_power_domain power_domain;
1263         intel_wakeref_t wakeref;
1264
1265         /* we keep both pipes enabled on 830 */
1266         if (IS_I830(dev_priv))
1267                 state = true;
1268
1269         power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
1270         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
1271         if (wakeref) {
1272                 u32 val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
1273                 cur_state = !!(val & PIPECONF_ENABLE);
1274
1275                 intel_display_power_put(dev_priv, power_domain, wakeref);
1276         } else {
1277                 cur_state = false;
1278         }
1279
1280         I915_STATE_WARN(cur_state != state,
1281                         "transcoder %s assertion failure (expected %s, current %s)\n",
1282                         transcoder_name(cpu_transcoder),
1283                         onoff(state), onoff(cur_state));
1284 }
1285
1286 static void assert_plane(struct intel_plane *plane, bool state)
1287 {
1288         enum pipe pipe;
1289         bool cur_state;
1290
1291         cur_state = plane->get_hw_state(plane, &pipe);
1292
1293         I915_STATE_WARN(cur_state != state,
1294                         "%s assertion failure (expected %s, current %s)\n",
1295                         plane->base.name, onoff(state), onoff(cur_state));
1296 }
1297
1298 #define assert_plane_enabled(p) assert_plane(p, true)
1299 #define assert_plane_disabled(p) assert_plane(p, false)
1300
1301 static void assert_planes_disabled(struct intel_crtc *crtc)
1302 {
1303         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1304         struct intel_plane *plane;
1305
1306         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
1307                 assert_plane_disabled(plane);
1308 }
1309
1310 static void assert_vblank_disabled(struct drm_crtc *crtc)
1311 {
1312         if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
1313                 drm_crtc_vblank_put(crtc);
1314 }
1315
1316 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1317                                     enum pipe pipe)
1318 {
1319         u32 val;
1320         bool enabled;
1321
1322         val = intel_de_read(dev_priv, PCH_TRANSCONF(pipe));
1323         enabled = !!(val & TRANS_ENABLE);
1324         I915_STATE_WARN(enabled,
1325              "transcoder assertion failed, should be off on pipe %c but is still active\n",
1326              pipe_name(pipe));
1327 }
1328
1329 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1330                                    enum pipe pipe, enum port port,
1331                                    i915_reg_t dp_reg)
1332 {
1333         enum pipe port_pipe;
1334         bool state;
1335
1336         state = intel_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
1337
1338         I915_STATE_WARN(state && port_pipe == pipe,
1339                         "PCH DP %c enabled on transcoder %c, should be disabled\n",
1340                         port_name(port), pipe_name(pipe));
1341
1342         I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1343                         "IBX PCH DP %c still using transcoder B\n",
1344                         port_name(port));
1345 }
1346
1347 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1348                                      enum pipe pipe, enum port port,
1349                                      i915_reg_t hdmi_reg)
1350 {
1351         enum pipe port_pipe;
1352         bool state;
1353
1354         state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
1355
1356         I915_STATE_WARN(state && port_pipe == pipe,
1357                         "PCH HDMI %c enabled on transcoder %c, should be disabled\n",
1358                         port_name(port), pipe_name(pipe));
1359
1360         I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1361                         "IBX PCH HDMI %c still using transcoder B\n",
1362                         port_name(port));
1363 }
1364
1365 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1366                                       enum pipe pipe)
1367 {
1368         enum pipe port_pipe;
1369
1370         assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
1371         assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
1372         assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
1373
1374         I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) &&
1375                         port_pipe == pipe,
1376                         "PCH VGA enabled on transcoder %c, should be disabled\n",
1377                         pipe_name(pipe));
1378
1379         I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) &&
1380                         port_pipe == pipe,
1381                         "PCH LVDS enabled on transcoder %c, should be disabled\n",
1382                         pipe_name(pipe));
1383
1384         /* PCH SDVOB multiplex with HDMIB */
1385         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
1386         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
1387         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
1388 }
1389
1390 static void _vlv_enable_pll(struct intel_crtc *crtc,
1391                             const struct intel_crtc_state *pipe_config)
1392 {
1393         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1394         enum pipe pipe = crtc->pipe;
1395
1396         intel_de_write(dev_priv, DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1397         intel_de_posting_read(dev_priv, DPLL(pipe));
1398         udelay(150);
1399
1400         if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
1401                 drm_err(&dev_priv->drm, "DPLL %d failed to lock\n", pipe);
1402 }
1403
1404 static void vlv_enable_pll(struct intel_crtc *crtc,
1405                            const struct intel_crtc_state *pipe_config)
1406 {
1407         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1408         enum pipe pipe = crtc->pipe;
1409
1410         assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
1411
1412         /* PLL is protected by panel, make sure we can write it */
1413         assert_panel_unlocked(dev_priv, pipe);
1414
1415         if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1416                 _vlv_enable_pll(crtc, pipe_config);
1417
1418         intel_de_write(dev_priv, DPLL_MD(pipe),
1419                        pipe_config->dpll_hw_state.dpll_md);
1420         intel_de_posting_read(dev_priv, DPLL_MD(pipe));
1421 }
1422
1423
1424 static void _chv_enable_pll(struct intel_crtc *crtc,
1425                             const struct intel_crtc_state *pipe_config)
1426 {
1427         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1428         enum pipe pipe = crtc->pipe;
1429         enum dpio_channel port = vlv_pipe_to_channel(pipe);
1430         u32 tmp;
1431
1432         vlv_dpio_get(dev_priv);
1433
1434         /* Enable back the 10bit clock to display controller */
1435         tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1436         tmp |= DPIO_DCLKP_EN;
1437         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1438
1439         vlv_dpio_put(dev_priv);
1440
1441         /*
1442          * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1443          */
1444         udelay(1);
1445
1446         /* Enable PLL */
1447         intel_de_write(dev_priv, DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1448
1449         /* Check PLL is locked */
1450         if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
1451                 drm_err(&dev_priv->drm, "PLL %d failed to lock\n", pipe);
1452 }
1453
1454 static void chv_enable_pll(struct intel_crtc *crtc,
1455                            const struct intel_crtc_state *pipe_config)
1456 {
1457         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1458         enum pipe pipe = crtc->pipe;
1459
1460         assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
1461
1462         /* PLL is protected by panel, make sure we can write it */
1463         assert_panel_unlocked(dev_priv, pipe);
1464
1465         if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1466                 _chv_enable_pll(crtc, pipe_config);
1467
1468         if (pipe != PIPE_A) {
1469                 /*
1470                  * WaPixelRepeatModeFixForC0:chv
1471                  *
1472                  * DPLLCMD is AWOL. Use chicken bits to propagate
1473                  * the value from DPLLBMD to either pipe B or C.
1474                  */
1475                 intel_de_write(dev_priv, CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
1476                 intel_de_write(dev_priv, DPLL_MD(PIPE_B),
1477                                pipe_config->dpll_hw_state.dpll_md);
1478                 intel_de_write(dev_priv, CBR4_VLV, 0);
1479                 dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
1480
1481                 /*
1482                  * DPLLB VGA mode also seems to cause problems.
1483                  * We should always have it disabled.
1484                  */
1485                 WARN_ON((intel_de_read(dev_priv, DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0);
1486         } else {
1487                 intel_de_write(dev_priv, DPLL_MD(pipe),
1488                                pipe_config->dpll_hw_state.dpll_md);
1489                 intel_de_posting_read(dev_priv, DPLL_MD(pipe));
1490         }
1491 }
1492
1493 static bool i9xx_has_pps(struct drm_i915_private *dev_priv)
1494 {
1495         if (IS_I830(dev_priv))
1496                 return false;
1497
1498         return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
1499 }
1500
1501 static void i9xx_enable_pll(struct intel_crtc *crtc,
1502                             const struct intel_crtc_state *crtc_state)
1503 {
1504         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1505         i915_reg_t reg = DPLL(crtc->pipe);
1506         u32 dpll = crtc_state->dpll_hw_state.dpll;
1507         int i;
1508
1509         assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
1510
1511         /* PLL is protected by panel, make sure we can write it */
1512         if (i9xx_has_pps(dev_priv))
1513                 assert_panel_unlocked(dev_priv, crtc->pipe);
1514
1515         /*
1516          * Apparently we need to have VGA mode enabled prior to changing
1517          * the P1/P2 dividers. Otherwise the DPLL will keep using the old
1518          * dividers, even though the register value does change.
1519          */
1520         intel_de_write(dev_priv, reg, dpll & ~DPLL_VGA_MODE_DIS);
1521         intel_de_write(dev_priv, reg, dpll);
1522
1523         /* Wait for the clocks to stabilize. */
1524         intel_de_posting_read(dev_priv, reg);
1525         udelay(150);
1526
1527         if (INTEL_GEN(dev_priv) >= 4) {
1528                 intel_de_write(dev_priv, DPLL_MD(crtc->pipe),
1529                                crtc_state->dpll_hw_state.dpll_md);
1530         } else {
1531                 /* The pixel multiplier can only be updated once the
1532                  * DPLL is enabled and the clocks are stable.
1533                  *
1534                  * So write it again.
1535                  */
1536                 intel_de_write(dev_priv, reg, dpll);
1537         }
1538
1539         /* We do this three times for luck */
1540         for (i = 0; i < 3; i++) {
1541                 intel_de_write(dev_priv, reg, dpll);
1542                 intel_de_posting_read(dev_priv, reg);
1543                 udelay(150); /* wait for warmup */
1544         }
1545 }
1546
1547 static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
1548 {
1549         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1550         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1551         enum pipe pipe = crtc->pipe;
1552
1553         /* Don't disable pipe or pipe PLLs if needed */
1554         if (IS_I830(dev_priv))
1555                 return;
1556
1557         /* Make sure the pipe isn't still relying on us */
1558         assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
1559
1560         intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS);
1561         intel_de_posting_read(dev_priv, DPLL(pipe));
1562 }
1563
1564 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1565 {
1566         u32 val;
1567
1568         /* Make sure the pipe isn't still relying on us */
1569         assert_pipe_disabled(dev_priv, (enum transcoder)pipe);
1570
1571         val = DPLL_INTEGRATED_REF_CLK_VLV |
1572                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1573         if (pipe != PIPE_A)
1574                 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1575
1576         intel_de_write(dev_priv, DPLL(pipe), val);
1577         intel_de_posting_read(dev_priv, DPLL(pipe));
1578 }
1579
1580 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1581 {
1582         enum dpio_channel port = vlv_pipe_to_channel(pipe);
1583         u32 val;
1584
1585         /* Make sure the pipe isn't still relying on us */
1586         assert_pipe_disabled(dev_priv, (enum transcoder)pipe);
1587
1588         val = DPLL_SSC_REF_CLK_CHV |
1589                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1590         if (pipe != PIPE_A)
1591                 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1592
1593         intel_de_write(dev_priv, DPLL(pipe), val);
1594         intel_de_posting_read(dev_priv, DPLL(pipe));
1595
1596         vlv_dpio_get(dev_priv);
1597
1598         /* Disable 10bit clock to display controller */
1599         val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1600         val &= ~DPIO_DCLKP_EN;
1601         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1602
1603         vlv_dpio_put(dev_priv);
1604 }
1605
1606 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1607                          struct intel_digital_port *dport,
1608                          unsigned int expected_mask)
1609 {
1610         u32 port_mask;
1611         i915_reg_t dpll_reg;
1612
1613         switch (dport->base.port) {
1614         case PORT_B:
1615                 port_mask = DPLL_PORTB_READY_MASK;
1616                 dpll_reg = DPLL(0);
1617                 break;
1618         case PORT_C:
1619                 port_mask = DPLL_PORTC_READY_MASK;
1620                 dpll_reg = DPLL(0);
1621                 expected_mask <<= 4;
1622                 break;
1623         case PORT_D:
1624                 port_mask = DPLL_PORTD_READY_MASK;
1625                 dpll_reg = DPIO_PHY_STATUS;
1626                 break;
1627         default:
1628                 BUG();
1629         }
1630
1631         if (intel_de_wait_for_register(dev_priv, dpll_reg,
1632                                        port_mask, expected_mask, 1000))
1633                 WARN(1, "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
1634                      dport->base.base.base.id, dport->base.base.name,
1635                      intel_de_read(dev_priv, dpll_reg) & port_mask,
1636                      expected_mask);
1637 }
1638
1639 static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
1640 {
1641         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1642         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1643         enum pipe pipe = crtc->pipe;
1644         i915_reg_t reg;
1645         u32 val, pipeconf_val;
1646
1647         /* Make sure PCH DPLL is enabled */
1648         assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll);
1649
1650         /* FDI must be feeding us bits for PCH ports */
1651         assert_fdi_tx_enabled(dev_priv, pipe);
1652         assert_fdi_rx_enabled(dev_priv, pipe);
1653
1654         if (HAS_PCH_CPT(dev_priv)) {
1655                 reg = TRANS_CHICKEN2(pipe);
1656                 val = intel_de_read(dev_priv, reg);
1657                 /*
1658                  * Workaround: Set the timing override bit
1659                  * before enabling the pch transcoder.
1660                  */
1661                 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1662                 /* Configure frame start delay to match the CPU */
1663                 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
1664                 val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
1665                 intel_de_write(dev_priv, reg, val);
1666         }
1667
1668         reg = PCH_TRANSCONF(pipe);
1669         val = intel_de_read(dev_priv, reg);
1670         pipeconf_val = intel_de_read(dev_priv, PIPECONF(pipe));
1671
1672         if (HAS_PCH_IBX(dev_priv)) {
1673                 /* Configure frame start delay to match the CPU */
1674                 val &= ~TRANS_FRAME_START_DELAY_MASK;
1675                 val |= TRANS_FRAME_START_DELAY(0);
1676
1677                 /*
1678                  * Make the BPC in transcoder be consistent with
1679                  * that in pipeconf reg. For HDMI we must use 8bpc
1680                  * here for both 8bpc and 12bpc.
1681                  */
1682                 val &= ~PIPECONF_BPC_MASK;
1683                 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1684                         val |= PIPECONF_8BPC;
1685                 else
1686                         val |= pipeconf_val & PIPECONF_BPC_MASK;
1687         }
1688
1689         val &= ~TRANS_INTERLACE_MASK;
1690         if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) {
1691                 if (HAS_PCH_IBX(dev_priv) &&
1692                     intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
1693                         val |= TRANS_LEGACY_INTERLACED_ILK;
1694                 else
1695                         val |= TRANS_INTERLACED;
1696         } else {
1697                 val |= TRANS_PROGRESSIVE;
1698         }
1699
1700         intel_de_write(dev_priv, reg, val | TRANS_ENABLE);
1701         if (intel_de_wait_for_set(dev_priv, reg, TRANS_STATE_ENABLE, 100))
1702                 drm_err(&dev_priv->drm, "failed to enable transcoder %c\n",
1703                         pipe_name(pipe));
1704 }
1705
1706 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1707                                       enum transcoder cpu_transcoder)
1708 {
1709         u32 val, pipeconf_val;
1710
1711         /* FDI must be feeding us bits for PCH ports */
1712         assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
1713         assert_fdi_rx_enabled(dev_priv, PIPE_A);
1714
1715         val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
1716         /* Workaround: set timing override bit. */
1717         val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1718         /* Configure frame start delay to match the CPU */
1719         val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
1720         val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
1721         intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
1722
1723         val = TRANS_ENABLE;
1724         pipeconf_val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
1725
1726         if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1727             PIPECONF_INTERLACED_ILK)
1728                 val |= TRANS_INTERLACED;
1729         else
1730                 val |= TRANS_PROGRESSIVE;
1731
1732         intel_de_write(dev_priv, LPT_TRANSCONF, val);
1733         if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF,
1734                                   TRANS_STATE_ENABLE, 100))
1735                 drm_err(&dev_priv->drm, "Failed to enable PCH transcoder\n");
1736 }
1737
1738 static void ilk_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1739                                        enum pipe pipe)
1740 {
1741         i915_reg_t reg;
1742         u32 val;
1743
1744         /* FDI relies on the transcoder */
1745         assert_fdi_tx_disabled(dev_priv, pipe);
1746         assert_fdi_rx_disabled(dev_priv, pipe);
1747
1748         /* Ports must be off as well */
1749         assert_pch_ports_disabled(dev_priv, pipe);
1750
1751         reg = PCH_TRANSCONF(pipe);
1752         val = intel_de_read(dev_priv, reg);
1753         val &= ~TRANS_ENABLE;
1754         intel_de_write(dev_priv, reg, val);
1755         /* wait for PCH transcoder off, transcoder state */
1756         if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50))
1757                 drm_err(&dev_priv->drm, "failed to disable transcoder %c\n",
1758                         pipe_name(pipe));
1759
1760         if (HAS_PCH_CPT(dev_priv)) {
1761                 /* Workaround: Clear the timing override chicken bit again. */
1762                 reg = TRANS_CHICKEN2(pipe);
1763                 val = intel_de_read(dev_priv, reg);
1764                 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1765                 intel_de_write(dev_priv, reg, val);
1766         }
1767 }
1768
1769 void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1770 {
1771         u32 val;
1772
1773         val = intel_de_read(dev_priv, LPT_TRANSCONF);
1774         val &= ~TRANS_ENABLE;
1775         intel_de_write(dev_priv, LPT_TRANSCONF, val);
1776         /* wait for PCH transcoder off, transcoder state */
1777         if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF,
1778                                     TRANS_STATE_ENABLE, 50))
1779                 drm_err(&dev_priv->drm, "Failed to disable PCH transcoder\n");
1780
1781         /* Workaround: clear timing override bit. */
1782         val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
1783         val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1784         intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
1785 }
1786
1787 enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
1788 {
1789         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1790
1791         if (HAS_PCH_LPT(dev_priv))
1792                 return PIPE_A;
1793         else
1794                 return crtc->pipe;
1795 }
1796
1797 static u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state)
1798 {
1799         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1800
1801         /*
1802          * On i965gm the hardware frame counter reads
1803          * zero when the TV encoder is enabled :(
1804          */
1805         if (IS_I965GM(dev_priv) &&
1806             (crtc_state->output_types & BIT(INTEL_OUTPUT_TVOUT)))
1807                 return 0;
1808
1809         if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
1810                 return 0xffffffff; /* full 32 bit counter */
1811         else if (INTEL_GEN(dev_priv) >= 3)
1812                 return 0xffffff; /* only 24 bits of frame count */
1813         else
1814                 return 0; /* Gen2 doesn't have a hardware frame counter */
1815 }
1816
1817 void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state)
1818 {
1819         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1820
1821         assert_vblank_disabled(&crtc->base);
1822         drm_crtc_set_max_vblank_count(&crtc->base,
1823                                       intel_crtc_max_vblank_count(crtc_state));
1824         drm_crtc_vblank_on(&crtc->base);
1825 }
1826
1827 void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state)
1828 {
1829         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1830
1831         drm_crtc_vblank_off(&crtc->base);
1832         assert_vblank_disabled(&crtc->base);
1833 }
1834
1835 void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
1836 {
1837         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
1838         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1839         enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
1840         enum pipe pipe = crtc->pipe;
1841         i915_reg_t reg;
1842         u32 val;
1843
1844         drm_dbg_kms(&dev_priv->drm, "enabling pipe %c\n", pipe_name(pipe));
1845
1846         assert_planes_disabled(crtc);
1847
1848         /*
1849          * A pipe without a PLL won't actually be able to drive bits from
1850          * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
1851          * need the check.
1852          */
1853         if (HAS_GMCH(dev_priv)) {
1854                 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
1855                         assert_dsi_pll_enabled(dev_priv);
1856                 else
1857                         assert_pll_enabled(dev_priv, pipe);
1858         } else {
1859                 if (new_crtc_state->has_pch_encoder) {
1860                         /* if driving the PCH, we need FDI enabled */
1861                         assert_fdi_rx_pll_enabled(dev_priv,
1862                                                   intel_crtc_pch_transcoder(crtc));
1863                         assert_fdi_tx_pll_enabled(dev_priv,
1864                                                   (enum pipe) cpu_transcoder);
1865                 }
1866                 /* FIXME: assert CPU port conditions for SNB+ */
1867         }
1868
1869         trace_intel_pipe_enable(crtc);
1870
1871         reg = PIPECONF(cpu_transcoder);
1872         val = intel_de_read(dev_priv, reg);
1873         if (val & PIPECONF_ENABLE) {
1874                 /* we keep both pipes enabled on 830 */
1875                 WARN_ON(!IS_I830(dev_priv));
1876                 return;
1877         }
1878
1879         intel_de_write(dev_priv, reg, val | PIPECONF_ENABLE);
1880         intel_de_posting_read(dev_priv, reg);
1881
1882         /*
1883          * Until the pipe starts PIPEDSL reads will return a stale value,
1884          * which causes an apparent vblank timestamp jump when PIPEDSL
1885          * resets to its proper value. That also messes up the frame count
1886          * when it's derived from the timestamps. So let's wait for the
1887          * pipe to start properly before we call drm_crtc_vblank_on()
1888          */
1889         if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
1890                 intel_wait_for_pipe_scanline_moving(crtc);
1891 }
1892
1893 void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
1894 {
1895         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
1896         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1897         enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1898         enum pipe pipe = crtc->pipe;
1899         i915_reg_t reg;
1900         u32 val;
1901
1902         drm_dbg_kms(&dev_priv->drm, "disabling pipe %c\n", pipe_name(pipe));
1903
1904         /*
1905          * Make sure planes won't keep trying to pump pixels to us,
1906          * or we might hang the display.
1907          */
1908         assert_planes_disabled(crtc);
1909
1910         trace_intel_pipe_disable(crtc);
1911
1912         reg = PIPECONF(cpu_transcoder);
1913         val = intel_de_read(dev_priv, reg);
1914         if ((val & PIPECONF_ENABLE) == 0)
1915                 return;
1916
1917         /*
1918          * Double wide has implications for planes
1919          * so best keep it disabled when not needed.
1920          */
1921         if (old_crtc_state->double_wide)
1922                 val &= ~PIPECONF_DOUBLE_WIDE;
1923
1924         /* Don't disable pipe or pipe PLLs if needed */
1925         if (!IS_I830(dev_priv))
1926                 val &= ~PIPECONF_ENABLE;
1927
1928         intel_de_write(dev_priv, reg, val);
1929         if ((val & PIPECONF_ENABLE) == 0)
1930                 intel_wait_for_pipe_off(old_crtc_state);
1931 }
1932
1933 static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
1934 {
1935         return IS_GEN(dev_priv, 2) ? 2048 : 4096;
1936 }
1937
1938 static bool is_ccs_plane(const struct drm_framebuffer *fb, int plane)
1939 {
1940         if (!is_ccs_modifier(fb->modifier))
1941                 return false;
1942
1943         return plane >= fb->format->num_planes / 2;
1944 }
1945
1946 static bool is_gen12_ccs_modifier(u64 modifier)
1947 {
1948         return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
1949                modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS;
1950
1951 }
1952
1953 static bool is_gen12_ccs_plane(const struct drm_framebuffer *fb, int plane)
1954 {
1955         return is_gen12_ccs_modifier(fb->modifier) && is_ccs_plane(fb, plane);
1956 }
1957
1958 static bool is_aux_plane(const struct drm_framebuffer *fb, int plane)
1959 {
1960         if (is_ccs_modifier(fb->modifier))
1961                 return is_ccs_plane(fb, plane);
1962
1963         return plane == 1;
1964 }
1965
1966 static int main_to_ccs_plane(const struct drm_framebuffer *fb, int main_plane)
1967 {
1968         WARN_ON(!is_ccs_modifier(fb->modifier) ||
1969                 (main_plane && main_plane >= fb->format->num_planes / 2));
1970
1971         return fb->format->num_planes / 2 + main_plane;
1972 }
1973
1974 static int ccs_to_main_plane(const struct drm_framebuffer *fb, int ccs_plane)
1975 {
1976         WARN_ON(!is_ccs_modifier(fb->modifier) ||
1977                 ccs_plane < fb->format->num_planes / 2);
1978
1979         return ccs_plane - fb->format->num_planes / 2;
1980 }
1981
1982 /* Return either the main plane's CCS or - if not a CCS FB - UV plane */
1983 int intel_main_to_aux_plane(const struct drm_framebuffer *fb, int main_plane)
1984 {
1985         if (is_ccs_modifier(fb->modifier))
1986                 return main_to_ccs_plane(fb, main_plane);
1987
1988         return 1;
1989 }
1990
1991 bool
1992 intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info,
1993                                     uint64_t modifier)
1994 {
1995         return info->is_yuv &&
1996                info->num_planes == (is_ccs_modifier(modifier) ? 4 : 2);
1997 }
1998
1999 static bool is_semiplanar_uv_plane(const struct drm_framebuffer *fb,
2000                                    int color_plane)
2001 {
2002         return intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) &&
2003                color_plane == 1;
2004 }
2005
2006 static unsigned int
2007 intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
2008 {
2009         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2010         unsigned int cpp = fb->format->cpp[color_plane];
2011
2012         switch (fb->modifier) {
2013         case DRM_FORMAT_MOD_LINEAR:
2014                 return intel_tile_size(dev_priv);
2015         case I915_FORMAT_MOD_X_TILED:
2016                 if (IS_GEN(dev_priv, 2))
2017                         return 128;
2018                 else
2019                         return 512;
2020         case I915_FORMAT_MOD_Y_TILED_CCS:
2021                 if (is_ccs_plane(fb, color_plane))
2022                         return 128;
2023                 /* fall through */
2024         case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
2025         case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
2026                 if (is_ccs_plane(fb, color_plane))
2027                         return 64;
2028                 /* fall through */
2029         case I915_FORMAT_MOD_Y_TILED:
2030                 if (IS_GEN(dev_priv, 2) || HAS_128_BYTE_Y_TILING(dev_priv))
2031                         return 128;
2032                 else
2033                         return 512;
2034         case I915_FORMAT_MOD_Yf_TILED_CCS:
2035                 if (is_ccs_plane(fb, color_plane))
2036                         return 128;
2037                 /* fall through */
2038         case I915_FORMAT_MOD_Yf_TILED:
2039                 switch (cpp) {
2040                 case 1:
2041                         return 64;
2042                 case 2:
2043                 case 4:
2044                         return 128;
2045                 case 8:
2046                 case 16:
2047                         return 256;
2048                 default:
2049                         MISSING_CASE(cpp);
2050                         return cpp;
2051                 }
2052                 break;
2053         default:
2054                 MISSING_CASE(fb->modifier);
2055                 return cpp;
2056         }
2057 }
2058
2059 static unsigned int
2060 intel_tile_height(const struct drm_framebuffer *fb, int color_plane)
2061 {
2062         if (is_gen12_ccs_plane(fb, color_plane))
2063                 return 1;
2064
2065         return intel_tile_size(to_i915(fb->dev)) /
2066                 intel_tile_width_bytes(fb, color_plane);
2067 }
2068
2069 /* Return the tile dimensions in pixel units */
2070 static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane,
2071                             unsigned int *tile_width,
2072                             unsigned int *tile_height)
2073 {
2074         unsigned int tile_width_bytes = intel_tile_width_bytes(fb, color_plane);
2075         unsigned int cpp = fb->format->cpp[color_plane];
2076
2077         *tile_width = tile_width_bytes / cpp;
2078         *tile_height = intel_tile_height(fb, color_plane);
2079 }
2080
2081 static unsigned int intel_tile_row_size(const struct drm_framebuffer *fb,
2082                                         int color_plane)
2083 {
2084         unsigned int tile_width, tile_height;
2085
2086         intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2087
2088         return fb->pitches[color_plane] * tile_height;
2089 }
2090
2091 unsigned int
2092 intel_fb_align_height(const struct drm_framebuffer *fb,
2093                       int color_plane, unsigned int height)
2094 {
2095         unsigned int tile_height = intel_tile_height(fb, color_plane);
2096
2097         return ALIGN(height, tile_height);
2098 }
2099
2100 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
2101 {
2102         unsigned int size = 0;
2103         int i;
2104
2105         for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
2106                 size += rot_info->plane[i].width * rot_info->plane[i].height;
2107
2108         return size;
2109 }
2110
2111 unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info)
2112 {
2113         unsigned int size = 0;
2114         int i;
2115
2116         for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++)
2117                 size += rem_info->plane[i].width * rem_info->plane[i].height;
2118
2119         return size;
2120 }
2121
2122 static void
2123 intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
2124                         const struct drm_framebuffer *fb,
2125                         unsigned int rotation)
2126 {
2127         view->type = I915_GGTT_VIEW_NORMAL;
2128         if (drm_rotation_90_or_270(rotation)) {
2129                 view->type = I915_GGTT_VIEW_ROTATED;
2130                 view->rotated = to_intel_framebuffer(fb)->rot_info;
2131         }
2132 }
2133
2134 static unsigned int intel_cursor_alignment(const struct drm_i915_private *dev_priv)
2135 {
2136         if (IS_I830(dev_priv))
2137                 return 16 * 1024;
2138         else if (IS_I85X(dev_priv))
2139                 return 256;
2140         else if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
2141                 return 32;
2142         else
2143                 return 4 * 1024;
2144 }
2145
2146 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
2147 {
2148         if (INTEL_GEN(dev_priv) >= 9)
2149                 return 256 * 1024;
2150         else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
2151                  IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2152                 return 128 * 1024;
2153         else if (INTEL_GEN(dev_priv) >= 4)
2154                 return 4 * 1024;
2155         else
2156                 return 0;
2157 }
2158
2159 static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
2160                                          int color_plane)
2161 {
2162         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2163
2164         /* AUX_DIST needs only 4K alignment */
2165         if ((INTEL_GEN(dev_priv) < 12 && is_aux_plane(fb, color_plane)) ||
2166             is_ccs_plane(fb, color_plane))
2167                 return 4096;
2168
2169         switch (fb->modifier) {
2170         case DRM_FORMAT_MOD_LINEAR:
2171                 return intel_linear_alignment(dev_priv);
2172         case I915_FORMAT_MOD_X_TILED:
2173                 if (INTEL_GEN(dev_priv) >= 9)
2174                         return 256 * 1024;
2175                 return 0;
2176         case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
2177                 if (is_semiplanar_uv_plane(fb, color_plane))
2178                         return intel_tile_row_size(fb, color_plane);
2179                 /* Fall-through */
2180         case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
2181                 return 16 * 1024;
2182         case I915_FORMAT_MOD_Y_TILED_CCS:
2183         case I915_FORMAT_MOD_Yf_TILED_CCS:
2184         case I915_FORMAT_MOD_Y_TILED:
2185                 if (INTEL_GEN(dev_priv) >= 12 &&
2186                     is_semiplanar_uv_plane(fb, color_plane))
2187                         return intel_tile_row_size(fb, color_plane);
2188                 /* Fall-through */
2189         case I915_FORMAT_MOD_Yf_TILED:
2190                 return 1 * 1024 * 1024;
2191         default:
2192                 MISSING_CASE(fb->modifier);
2193                 return 0;
2194         }
2195 }
2196
2197 static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
2198 {
2199         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
2200         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
2201
2202         return INTEL_GEN(dev_priv) < 4 ||
2203                 (plane->has_fbc &&
2204                  plane_state->view.type == I915_GGTT_VIEW_NORMAL);
2205 }
2206
2207 struct i915_vma *
2208 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
2209                            const struct i915_ggtt_view *view,
2210                            bool uses_fence,
2211                            unsigned long *out_flags)
2212 {
2213         struct drm_device *dev = fb->dev;
2214         struct drm_i915_private *dev_priv = to_i915(dev);
2215         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2216         intel_wakeref_t wakeref;
2217         struct i915_vma *vma;
2218         unsigned int pinctl;
2219         u32 alignment;
2220
2221         if (WARN_ON(!i915_gem_object_is_framebuffer(obj)))
2222                 return ERR_PTR(-EINVAL);
2223
2224         alignment = intel_surf_alignment(fb, 0);
2225         if (WARN_ON(alignment && !is_power_of_2(alignment)))
2226                 return ERR_PTR(-EINVAL);
2227
2228         /* Note that the w/a also requires 64 PTE of padding following the
2229          * bo. We currently fill all unused PTE with the shadow page and so
2230          * we should always have valid PTE following the scanout preventing
2231          * the VT-d warning.
2232          */
2233         if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
2234                 alignment = 256 * 1024;
2235
2236         /*
2237          * Global gtt pte registers are special registers which actually forward
2238          * writes to a chunk of system memory. Which means that there is no risk
2239          * that the register values disappear as soon as we call
2240          * intel_runtime_pm_put(), so it is correct to wrap only the
2241          * pin/unpin/fence and not more.
2242          */
2243         wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2244
2245         atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
2246
2247         /*
2248          * Valleyview is definitely limited to scanning out the first
2249          * 512MiB. Lets presume this behaviour was inherited from the
2250          * g4x display engine and that all earlier gen are similarly
2251          * limited. Testing suggests that it is a little more
2252          * complicated than this. For example, Cherryview appears quite
2253          * happy to scanout from anywhere within its global aperture.
2254          */
2255         pinctl = 0;
2256         if (HAS_GMCH(dev_priv))
2257                 pinctl |= PIN_MAPPABLE;
2258
2259         vma = i915_gem_object_pin_to_display_plane(obj,
2260                                                    alignment, view, pinctl);
2261         if (IS_ERR(vma))
2262                 goto err;
2263
2264         if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
2265                 int ret;
2266
2267                 /*
2268                  * Install a fence for tiled scan-out. Pre-i965 always needs a
2269                  * fence, whereas 965+ only requires a fence if using
2270                  * framebuffer compression.  For simplicity, we always, when
2271                  * possible, install a fence as the cost is not that onerous.
2272                  *
2273                  * If we fail to fence the tiled scanout, then either the
2274                  * modeset will reject the change (which is highly unlikely as
2275                  * the affected systems, all but one, do not have unmappable
2276                  * space) or we will not be able to enable full powersaving
2277                  * techniques (also likely not to apply due to various limits
2278                  * FBC and the like impose on the size of the buffer, which
2279                  * presumably we violated anyway with this unmappable buffer).
2280                  * Anyway, it is presumably better to stumble onwards with
2281                  * something and try to run the system in a "less than optimal"
2282                  * mode that matches the user configuration.
2283                  */
2284                 ret = i915_vma_pin_fence(vma);
2285                 if (ret != 0 && INTEL_GEN(dev_priv) < 4) {
2286                         i915_gem_object_unpin_from_display_plane(vma);
2287                         vma = ERR_PTR(ret);
2288                         goto err;
2289                 }
2290
2291                 if (ret == 0 && vma->fence)
2292                         *out_flags |= PLANE_HAS_FENCE;
2293         }
2294
2295         i915_vma_get(vma);
2296 err:
2297         atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
2298         intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2299         return vma;
2300 }
2301
2302 void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
2303 {
2304         i915_gem_object_lock(vma->obj);
2305         if (flags & PLANE_HAS_FENCE)
2306                 i915_vma_unpin_fence(vma);
2307         i915_gem_object_unpin_from_display_plane(vma);
2308         i915_gem_object_unlock(vma->obj);
2309
2310         i915_vma_put(vma);
2311 }
2312
2313 static int intel_fb_pitch(const struct drm_framebuffer *fb, int color_plane,
2314                           unsigned int rotation)
2315 {
2316         if (drm_rotation_90_or_270(rotation))
2317                 return to_intel_framebuffer(fb)->rotated[color_plane].pitch;
2318         else
2319                 return fb->pitches[color_plane];
2320 }
2321
2322 /*
2323  * Convert the x/y offsets into a linear offset.
2324  * Only valid with 0/180 degree rotation, which is fine since linear
2325  * offset is only used with linear buffers on pre-hsw and tiled buffers
2326  * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
2327  */
2328 u32 intel_fb_xy_to_linear(int x, int y,
2329                           const struct intel_plane_state *state,
2330                           int color_plane)
2331 {
2332         const struct drm_framebuffer *fb = state->hw.fb;
2333         unsigned int cpp = fb->format->cpp[color_plane];
2334         unsigned int pitch = state->color_plane[color_plane].stride;
2335
2336         return y * pitch + x * cpp;
2337 }
2338
2339 /*
2340  * Add the x/y offsets derived from fb->offsets[] to the user
2341  * specified plane src x/y offsets. The resulting x/y offsets
2342  * specify the start of scanout from the beginning of the gtt mapping.
2343  */
2344 void intel_add_fb_offsets(int *x, int *y,
2345                           const struct intel_plane_state *state,
2346                           int color_plane)
2347
2348 {
2349         *x += state->color_plane[color_plane].x;
2350         *y += state->color_plane[color_plane].y;
2351 }
2352
2353 static u32 intel_adjust_tile_offset(int *x, int *y,
2354                                     unsigned int tile_width,
2355                                     unsigned int tile_height,
2356                                     unsigned int tile_size,
2357                                     unsigned int pitch_tiles,
2358                                     u32 old_offset,
2359                                     u32 new_offset)
2360 {
2361         unsigned int pitch_pixels = pitch_tiles * tile_width;
2362         unsigned int tiles;
2363
2364         WARN_ON(old_offset & (tile_size - 1));
2365         WARN_ON(new_offset & (tile_size - 1));
2366         WARN_ON(new_offset > old_offset);
2367
2368         tiles = (old_offset - new_offset) / tile_size;
2369
2370         *y += tiles / pitch_tiles * tile_height;
2371         *x += tiles % pitch_tiles * tile_width;
2372
2373         /* minimize x in case it got needlessly big */
2374         *y += *x / pitch_pixels * tile_height;
2375         *x %= pitch_pixels;
2376
2377         return new_offset;
2378 }
2379
2380 static bool is_surface_linear(const struct drm_framebuffer *fb, int color_plane)
2381 {
2382         return fb->modifier == DRM_FORMAT_MOD_LINEAR ||
2383                is_gen12_ccs_plane(fb, color_plane);
2384 }
2385
2386 static u32 intel_adjust_aligned_offset(int *x, int *y,
2387                                        const struct drm_framebuffer *fb,
2388                                        int color_plane,
2389                                        unsigned int rotation,
2390                                        unsigned int pitch,
2391                                        u32 old_offset, u32 new_offset)
2392 {
2393         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2394         unsigned int cpp = fb->format->cpp[color_plane];
2395
2396         WARN_ON(new_offset > old_offset);
2397
2398         if (!is_surface_linear(fb, color_plane)) {
2399                 unsigned int tile_size, tile_width, tile_height;
2400                 unsigned int pitch_tiles;
2401
2402                 tile_size = intel_tile_size(dev_priv);
2403                 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2404
2405                 if (drm_rotation_90_or_270(rotation)) {
2406                         pitch_tiles = pitch / tile_height;
2407                         swap(tile_width, tile_height);
2408                 } else {
2409                         pitch_tiles = pitch / (tile_width * cpp);
2410                 }
2411
2412                 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2413                                          tile_size, pitch_tiles,
2414                                          old_offset, new_offset);
2415         } else {
2416                 old_offset += *y * pitch + *x * cpp;
2417
2418                 *y = (old_offset - new_offset) / pitch;
2419                 *x = ((old_offset - new_offset) - *y * pitch) / cpp;
2420         }
2421
2422         return new_offset;
2423 }
2424
2425 /*
2426  * Adjust the tile offset by moving the difference into
2427  * the x/y offsets.
2428  */
2429 static u32 intel_plane_adjust_aligned_offset(int *x, int *y,
2430                                              const struct intel_plane_state *state,
2431                                              int color_plane,
2432                                              u32 old_offset, u32 new_offset)
2433 {
2434         return intel_adjust_aligned_offset(x, y, state->hw.fb, color_plane,
2435                                            state->hw.rotation,
2436                                            state->color_plane[color_plane].stride,
2437                                            old_offset, new_offset);
2438 }
2439
2440 /*
2441  * Computes the aligned offset to the base tile and adjusts
2442  * x, y. bytes per pixel is assumed to be a power-of-two.
2443  *
2444  * In the 90/270 rotated case, x and y are assumed
2445  * to be already rotated to match the rotated GTT view, and
2446  * pitch is the tile_height aligned framebuffer height.
2447  *
2448  * This function is used when computing the derived information
2449  * under intel_framebuffer, so using any of that information
2450  * here is not allowed. Anything under drm_framebuffer can be
2451  * used. This is why the user has to pass in the pitch since it
2452  * is specified in the rotated orientation.
2453  */
2454 static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv,
2455                                         int *x, int *y,
2456                                         const struct drm_framebuffer *fb,
2457                                         int color_plane,
2458                                         unsigned int pitch,
2459                                         unsigned int rotation,
2460                                         u32 alignment)
2461 {
2462         unsigned int cpp = fb->format->cpp[color_plane];
2463         u32 offset, offset_aligned;
2464
2465         if (!is_surface_linear(fb, color_plane)) {
2466                 unsigned int tile_size, tile_width, tile_height;
2467                 unsigned int tile_rows, tiles, pitch_tiles;
2468
2469                 tile_size = intel_tile_size(dev_priv);
2470                 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2471
2472                 if (drm_rotation_90_or_270(rotation)) {
2473                         pitch_tiles = pitch / tile_height;
2474                         swap(tile_width, tile_height);
2475                 } else {
2476                         pitch_tiles = pitch / (tile_width * cpp);
2477                 }
2478
2479                 tile_rows = *y / tile_height;
2480                 *y %= tile_height;
2481
2482                 tiles = *x / tile_width;
2483                 *x %= tile_width;
2484
2485                 offset = (tile_rows * pitch_tiles + tiles) * tile_size;
2486
2487                 offset_aligned = offset;
2488                 if (alignment)
2489                         offset_aligned = rounddown(offset_aligned, alignment);
2490
2491                 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2492                                          tile_size, pitch_tiles,
2493                                          offset, offset_aligned);
2494         } else {
2495                 offset = *y * pitch + *x * cpp;
2496                 offset_aligned = offset;
2497                 if (alignment) {
2498                         offset_aligned = rounddown(offset_aligned, alignment);
2499                         *y = (offset % alignment) / pitch;
2500                         *x = ((offset % alignment) - *y * pitch) / cpp;
2501                 } else {
2502                         *y = *x = 0;
2503                 }
2504         }
2505
2506         return offset_aligned;
2507 }
2508
2509 static u32 intel_plane_compute_aligned_offset(int *x, int *y,
2510                                               const struct intel_plane_state *state,
2511                                               int color_plane)
2512 {
2513         struct intel_plane *intel_plane = to_intel_plane(state->uapi.plane);
2514         struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
2515         const struct drm_framebuffer *fb = state->hw.fb;
2516         unsigned int rotation = state->hw.rotation;
2517         int pitch = state->color_plane[color_plane].stride;
2518         u32 alignment;
2519
2520         if (intel_plane->id == PLANE_CURSOR)
2521                 alignment = intel_cursor_alignment(dev_priv);
2522         else
2523                 alignment = intel_surf_alignment(fb, color_plane);
2524
2525         return intel_compute_aligned_offset(dev_priv, x, y, fb, color_plane,
2526                                             pitch, rotation, alignment);
2527 }
2528
2529 /* Convert the fb->offset[] into x/y offsets */
2530 static int intel_fb_offset_to_xy(int *x, int *y,
2531                                  const struct drm_framebuffer *fb,
2532                                  int color_plane)
2533 {
2534         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2535         unsigned int height;
2536         u32 alignment;
2537
2538         if (INTEL_GEN(dev_priv) >= 12 &&
2539             is_semiplanar_uv_plane(fb, color_plane))
2540                 alignment = intel_tile_row_size(fb, color_plane);
2541         else if (fb->modifier != DRM_FORMAT_MOD_LINEAR)
2542                 alignment = intel_tile_size(dev_priv);
2543         else
2544                 alignment = 0;
2545
2546         if (alignment != 0 && fb->offsets[color_plane] % alignment) {
2547                 drm_dbg_kms(&dev_priv->drm,
2548                             "Misaligned offset 0x%08x for color plane %d\n",
2549                             fb->offsets[color_plane], color_plane);
2550                 return -EINVAL;
2551         }
2552
2553         height = drm_framebuffer_plane_height(fb->height, fb, color_plane);
2554         height = ALIGN(height, intel_tile_height(fb, color_plane));
2555
2556         /* Catch potential overflows early */
2557         if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]),
2558                             fb->offsets[color_plane])) {
2559                 drm_dbg_kms(&dev_priv->drm,
2560                             "Bad offset 0x%08x or pitch %d for color plane %d\n",
2561                             fb->offsets[color_plane], fb->pitches[color_plane],
2562                             color_plane);
2563                 return -ERANGE;
2564         }
2565
2566         *x = 0;
2567         *y = 0;
2568
2569         intel_adjust_aligned_offset(x, y,
2570                                     fb, color_plane, DRM_MODE_ROTATE_0,
2571                                     fb->pitches[color_plane],
2572                                     fb->offsets[color_plane], 0);
2573
2574         return 0;
2575 }
2576
2577 static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
2578 {
2579         switch (fb_modifier) {
2580         case I915_FORMAT_MOD_X_TILED:
2581                 return I915_TILING_X;
2582         case I915_FORMAT_MOD_Y_TILED:
2583         case I915_FORMAT_MOD_Y_TILED_CCS:
2584         case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
2585         case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
2586                 return I915_TILING_Y;
2587         default:
2588                 return I915_TILING_NONE;
2589         }
2590 }
2591
2592 /*
2593  * From the Sky Lake PRM:
2594  * "The Color Control Surface (CCS) contains the compression status of
2595  *  the cache-line pairs. The compression state of the cache-line pair
2596  *  is specified by 2 bits in the CCS. Each CCS cache-line represents
2597  *  an area on the main surface of 16 x16 sets of 128 byte Y-tiled
2598  *  cache-line-pairs. CCS is always Y tiled."
2599  *
2600  * Since cache line pairs refers to horizontally adjacent cache lines,
2601  * each cache line in the CCS corresponds to an area of 32x16 cache
2602  * lines on the main surface. Since each pixel is 4 bytes, this gives
2603  * us a ratio of one byte in the CCS for each 8x16 pixels in the
2604  * main surface.
2605  */
2606 static const struct drm_format_info skl_ccs_formats[] = {
2607         { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
2608           .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2609         { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
2610           .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2611         { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
2612           .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
2613         { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
2614           .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
2615 };
2616
2617 /*
2618  * Gen-12 compression uses 4 bits of CCS data for each cache line pair in the
2619  * main surface. And each 64B CCS cache line represents an area of 4x1 Y-tiles
2620  * in the main surface. With 4 byte pixels and each Y-tile having dimensions of
2621  * 32x32 pixels, the ratio turns out to 1B in the CCS for every 2x32 pixels in
2622  * the main surface.
2623  */
2624 static const struct drm_format_info gen12_ccs_formats[] = {
2625         { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
2626           .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
2627           .hsub = 1, .vsub = 1, },
2628         { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
2629           .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
2630           .hsub = 1, .vsub = 1, },
2631         { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
2632           .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
2633           .hsub = 1, .vsub = 1, .has_alpha = true },
2634         { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
2635           .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
2636           .hsub = 1, .vsub = 1, .has_alpha = true },
2637         { .format = DRM_FORMAT_YUYV, .num_planes = 2,
2638           .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
2639           .hsub = 2, .vsub = 1, .is_yuv = true },
2640         { .format = DRM_FORMAT_YVYU, .num_planes = 2,
2641           .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
2642           .hsub = 2, .vsub = 1, .is_yuv = true },
2643         { .format = DRM_FORMAT_UYVY, .num_planes = 2,
2644           .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
2645           .hsub = 2, .vsub = 1, .is_yuv = true },
2646         { .format = DRM_FORMAT_VYUY, .num_planes = 2,
2647           .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
2648           .hsub = 2, .vsub = 1, .is_yuv = true },
2649         { .format = DRM_FORMAT_NV12, .num_planes = 4,
2650           .char_per_block = { 1, 2, 1, 1 }, .block_w = { 1, 1, 4, 4 }, .block_h = { 1, 1, 1, 1 },
2651           .hsub = 2, .vsub = 2, .is_yuv = true },
2652         { .format = DRM_FORMAT_P010, .num_planes = 4,
2653           .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
2654           .hsub = 2, .vsub = 2, .is_yuv = true },
2655         { .format = DRM_FORMAT_P012, .num_planes = 4,
2656           .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
2657           .hsub = 2, .vsub = 2, .is_yuv = true },
2658         { .format = DRM_FORMAT_P016, .num_planes = 4,
2659           .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
2660           .hsub = 2, .vsub = 2, .is_yuv = true },
2661 };
2662
2663 static const struct drm_format_info *
2664 lookup_format_info(const struct drm_format_info formats[],
2665                    int num_formats, u32 format)
2666 {
2667         int i;
2668
2669         for (i = 0; i < num_formats; i++) {
2670                 if (formats[i].format == format)
2671                         return &formats[i];
2672         }
2673
2674         return NULL;
2675 }
2676
2677 static const struct drm_format_info *
2678 intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
2679 {
2680         switch (cmd->modifier[0]) {
2681         case I915_FORMAT_MOD_Y_TILED_CCS:
2682         case I915_FORMAT_MOD_Yf_TILED_CCS:
2683                 return lookup_format_info(skl_ccs_formats,
2684                                           ARRAY_SIZE(skl_ccs_formats),
2685                                           cmd->pixel_format);
2686         case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
2687         case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
2688                 return lookup_format_info(gen12_ccs_formats,
2689                                           ARRAY_SIZE(gen12_ccs_formats),
2690                                           cmd->pixel_format);
2691         default:
2692                 return NULL;
2693         }
2694 }
2695
2696 bool is_ccs_modifier(u64 modifier)
2697 {
2698         return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
2699                modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS ||
2700                modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
2701                modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
2702 }
2703
2704 static int gen12_ccs_aux_stride(struct drm_framebuffer *fb, int ccs_plane)
2705 {
2706         return DIV_ROUND_UP(fb->pitches[ccs_to_main_plane(fb, ccs_plane)],
2707                             512) * 64;
2708 }
2709
2710 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
2711                               u32 pixel_format, u64 modifier)
2712 {
2713         struct intel_crtc *crtc;
2714         struct intel_plane *plane;
2715
2716         /*
2717          * We assume the primary plane for pipe A has
2718          * the highest stride limits of them all.
2719          */
2720         crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
2721         if (!crtc)
2722                 return 0;
2723
2724         plane = to_intel_plane(crtc->base.primary);
2725
2726         return plane->max_stride(plane, pixel_format, modifier,
2727                                  DRM_MODE_ROTATE_0);
2728 }
2729
2730 static
2731 u32 intel_fb_max_stride(struct drm_i915_private *dev_priv,
2732                         u32 pixel_format, u64 modifier)
2733 {
2734         /*
2735          * Arbitrary limit for gen4+ chosen to match the
2736          * render engine max stride.
2737          *
2738          * The new CCS hash mode makes remapping impossible
2739          */
2740         if (!is_ccs_modifier(modifier)) {
2741                 if (INTEL_GEN(dev_priv) >= 7)
2742                         return 256*1024;
2743                 else if (INTEL_GEN(dev_priv) >= 4)
2744                         return 128*1024;
2745         }
2746
2747         return intel_plane_fb_max_stride(dev_priv, pixel_format, modifier);
2748 }
2749
2750 static u32
2751 intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
2752 {
2753         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2754         u32 tile_width;
2755
2756         if (is_surface_linear(fb, color_plane)) {
2757                 u32 max_stride = intel_plane_fb_max_stride(dev_priv,
2758                                                            fb->format->format,
2759                                                            fb->modifier);
2760
2761                 /*
2762                  * To make remapping with linear generally feasible
2763                  * we need the stride to be page aligned.
2764                  */
2765                 if (fb->pitches[color_plane] > max_stride &&
2766                     !is_ccs_modifier(fb->modifier))
2767                         return intel_tile_size(dev_priv);
2768                 else
2769                         return 64;
2770         }
2771
2772         tile_width = intel_tile_width_bytes(fb, color_plane);
2773         if (is_ccs_modifier(fb->modifier)) {
2774                 /*
2775                  * Display WA #0531: skl,bxt,kbl,glk
2776                  *
2777                  * Render decompression and plane width > 3840
2778                  * combined with horizontal panning requires the
2779                  * plane stride to be a multiple of 4. We'll just
2780                  * require the entire fb to accommodate that to avoid
2781                  * potential runtime errors at plane configuration time.
2782                  */
2783                 if (IS_GEN(dev_priv, 9) && color_plane == 0 && fb->width > 3840)
2784                         tile_width *= 4;
2785                 /*
2786                  * The main surface pitch must be padded to a multiple of four
2787                  * tile widths.
2788                  */
2789                 else if (INTEL_GEN(dev_priv) >= 12)
2790                         tile_width *= 4;
2791         }
2792         return tile_width;
2793 }
2794
2795 bool intel_plane_can_remap(const struct intel_plane_state *plane_state)
2796 {
2797         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
2798         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
2799         const struct drm_framebuffer *fb = plane_state->hw.fb;
2800         int i;
2801
2802         /* We don't want to deal with remapping with cursors */
2803         if (plane->id == PLANE_CURSOR)
2804                 return false;
2805
2806         /*
2807          * The display engine limits already match/exceed the
2808          * render engine limits, so not much point in remapping.
2809          * Would also need to deal with the fence POT alignment
2810          * and gen2 2KiB GTT tile size.
2811          */
2812         if (INTEL_GEN(dev_priv) < 4)
2813                 return false;
2814
2815         /*
2816          * The new CCS hash mode isn't compatible with remapping as
2817          * the virtual address of the pages affects the compressed data.
2818          */
2819         if (is_ccs_modifier(fb->modifier))
2820                 return false;
2821
2822         /* Linear needs a page aligned stride for remapping */
2823         if (fb->modifier == DRM_FORMAT_MOD_LINEAR) {
2824                 unsigned int alignment = intel_tile_size(dev_priv) - 1;
2825
2826                 for (i = 0; i < fb->format->num_planes; i++) {
2827                         if (fb->pitches[i] & alignment)
2828                                 return false;
2829                 }
2830         }
2831
2832         return true;
2833 }
2834
2835 static bool intel_plane_needs_remap(const struct intel_plane_state *plane_state)
2836 {
2837         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
2838         const struct drm_framebuffer *fb = plane_state->hw.fb;
2839         unsigned int rotation = plane_state->hw.rotation;
2840         u32 stride, max_stride;
2841
2842         /*
2843          * No remapping for invisible planes since we don't have
2844          * an actual source viewport to remap.
2845          */
2846         if (!plane_state->uapi.visible)
2847                 return false;
2848
2849         if (!intel_plane_can_remap(plane_state))
2850                 return false;
2851
2852         /*
2853          * FIXME: aux plane limits on gen9+ are
2854          * unclear in Bspec, for now no checking.
2855          */
2856         stride = intel_fb_pitch(fb, 0, rotation);
2857         max_stride = plane->max_stride(plane, fb->format->format,
2858                                        fb->modifier, rotation);
2859
2860         return stride > max_stride;
2861 }
2862
2863 static void
2864 intel_fb_plane_get_subsampling(int *hsub, int *vsub,
2865                                const struct drm_framebuffer *fb,
2866                                int color_plane)
2867 {
2868         int main_plane;
2869
2870         if (color_plane == 0) {
2871                 *hsub = 1;
2872                 *vsub = 1;
2873
2874                 return;
2875         }
2876
2877         /*
2878          * TODO: Deduct the subsampling from the char block for all CCS
2879          * formats and planes.
2880          */
2881         if (!is_gen12_ccs_plane(fb, color_plane)) {
2882                 *hsub = fb->format->hsub;
2883                 *vsub = fb->format->vsub;
2884
2885                 return;
2886         }
2887
2888         main_plane = ccs_to_main_plane(fb, color_plane);
2889         *hsub = drm_format_info_block_width(fb->format, color_plane) /
2890                 drm_format_info_block_width(fb->format, main_plane);
2891
2892         /*
2893          * The min stride check in the core framebuffer_check() function
2894          * assumes that format->hsub applies to every plane except for the
2895          * first plane. That's incorrect for the CCS AUX plane of the first
2896          * plane, but for the above check to pass we must define the block
2897          * width with that subsampling applied to it. Adjust the width here
2898          * accordingly, so we can calculate the actual subsampling factor.
2899          */
2900         if (main_plane == 0)
2901                 *hsub *= fb->format->hsub;
2902
2903         *vsub = 32;
2904 }
2905 static int
2906 intel_fb_check_ccs_xy(struct drm_framebuffer *fb, int ccs_plane, int x, int y)
2907 {
2908         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2909         int main_plane;
2910         int hsub, vsub;
2911         int tile_width, tile_height;
2912         int ccs_x, ccs_y;
2913         int main_x, main_y;
2914
2915         if (!is_ccs_plane(fb, ccs_plane))
2916                 return 0;
2917
2918         intel_tile_dims(fb, ccs_plane, &tile_width, &tile_height);
2919         intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane);
2920
2921         tile_width *= hsub;
2922         tile_height *= vsub;
2923
2924         ccs_x = (x * hsub) % tile_width;
2925         ccs_y = (y * vsub) % tile_height;
2926
2927         main_plane = ccs_to_main_plane(fb, ccs_plane);
2928         main_x = intel_fb->normal[main_plane].x % tile_width;
2929         main_y = intel_fb->normal[main_plane].y % tile_height;
2930
2931         /*
2932          * CCS doesn't have its own x/y offset register, so the intra CCS tile
2933          * x/y offsets must match between CCS and the main surface.
2934          */
2935         if (main_x != ccs_x || main_y != ccs_y) {
2936                 DRM_DEBUG_KMS("Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
2937                               main_x, main_y,
2938                               ccs_x, ccs_y,
2939                               intel_fb->normal[main_plane].x,
2940                               intel_fb->normal[main_plane].y,
2941                               x, y);
2942                 return -EINVAL;
2943         }
2944
2945         return 0;
2946 }
2947
2948 static void
2949 intel_fb_plane_dims(int *w, int *h, struct drm_framebuffer *fb, int color_plane)
2950 {
2951         int main_plane = is_ccs_plane(fb, color_plane) ?
2952                          ccs_to_main_plane(fb, color_plane) : 0;
2953         int main_hsub, main_vsub;
2954         int hsub, vsub;
2955
2956         intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, fb, main_plane);
2957         intel_fb_plane_get_subsampling(&hsub, &vsub, fb, color_plane);
2958         *w = fb->width / main_hsub / hsub;
2959         *h = fb->height / main_vsub / vsub;
2960 }
2961
2962 /*
2963  * Setup the rotated view for an FB plane and return the size the GTT mapping
2964  * requires for this view.
2965  */
2966 static u32
2967 setup_fb_rotation(int plane, const struct intel_remapped_plane_info *plane_info,
2968                   u32 gtt_offset_rotated, int x, int y,
2969                   unsigned int width, unsigned int height,
2970                   unsigned int tile_size,
2971                   unsigned int tile_width, unsigned int tile_height,
2972                   struct drm_framebuffer *fb)
2973 {
2974         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2975         struct intel_rotation_info *rot_info = &intel_fb->rot_info;
2976         unsigned int pitch_tiles;
2977         struct drm_rect r;
2978
2979         /* Y or Yf modifiers required for 90/270 rotation */
2980         if (fb->modifier != I915_FORMAT_MOD_Y_TILED &&
2981             fb->modifier != I915_FORMAT_MOD_Yf_TILED)
2982                 return 0;
2983
2984         if (WARN_ON(plane >= ARRAY_SIZE(rot_info->plane)))
2985                 return 0;
2986
2987         rot_info->plane[plane] = *plane_info;
2988
2989         intel_fb->rotated[plane].pitch = plane_info->height * tile_height;
2990
2991         /* rotate the x/y offsets to match the GTT view */
2992         drm_rect_init(&r, x, y, width, height);
2993         drm_rect_rotate(&r,
2994                         plane_info->width * tile_width,
2995                         plane_info->height * tile_height,
2996                         DRM_MODE_ROTATE_270);
2997         x = r.x1;
2998         y = r.y1;
2999
3000         /* rotate the tile dimensions to match the GTT view */
3001         pitch_tiles = intel_fb->rotated[plane].pitch / tile_height;
3002         swap(tile_width, tile_height);
3003
3004         /*
3005          * We only keep the x/y offsets, so push all of the
3006          * gtt offset into the x/y offsets.
3007          */
3008         intel_adjust_tile_offset(&x, &y,
3009                                  tile_width, tile_height,
3010                                  tile_size, pitch_tiles,
3011                                  gtt_offset_rotated * tile_size, 0);
3012
3013         /*
3014          * First pixel of the framebuffer from
3015          * the start of the rotated gtt mapping.
3016          */
3017         intel_fb->rotated[plane].x = x;
3018         intel_fb->rotated[plane].y = y;
3019
3020         return plane_info->width * plane_info->height;
3021 }
3022
3023 static int
3024 intel_fill_fb_info(struct drm_i915_private *dev_priv,
3025                    struct drm_framebuffer *fb)
3026 {
3027         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
3028         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
3029         u32 gtt_offset_rotated = 0;
3030         unsigned int max_size = 0;
3031         int i, num_planes = fb->format->num_planes;
3032         unsigned int tile_size = intel_tile_size(dev_priv);
3033
3034         for (i = 0; i < num_planes; i++) {
3035                 unsigned int width, height;
3036                 unsigned int cpp, size;
3037                 u32 offset;
3038                 int x, y;
3039                 int ret;
3040
3041                 cpp = fb->format->cpp[i];
3042                 intel_fb_plane_dims(&width, &height, fb, i);
3043
3044                 ret = intel_fb_offset_to_xy(&x, &y, fb, i);
3045                 if (ret) {
3046                         drm_dbg_kms(&dev_priv->drm,
3047                                     "bad fb plane %d offset: 0x%x\n",
3048                                     i, fb->offsets[i]);
3049                         return ret;
3050                 }
3051
3052                 ret = intel_fb_check_ccs_xy(fb, i, x, y);
3053                 if (ret)
3054                         return ret;
3055
3056                 /*
3057                  * The fence (if used) is aligned to the start of the object
3058                  * so having the framebuffer wrap around across the edge of the
3059                  * fenced region doesn't really work. We have no API to configure
3060                  * the fence start offset within the object (nor could we probably
3061                  * on gen2/3). So it's just easier if we just require that the
3062                  * fb layout agrees with the fence layout. We already check that the
3063                  * fb stride matches the fence stride elsewhere.
3064                  */
3065                 if (i == 0 && i915_gem_object_is_tiled(obj) &&
3066                     (x + width) * cpp > fb->pitches[i]) {
3067                         drm_dbg_kms(&dev_priv->drm,
3068                                     "bad fb plane %d offset: 0x%x\n",
3069                                      i, fb->offsets[i]);
3070                         return -EINVAL;
3071                 }
3072
3073                 /*
3074                  * First pixel of the framebuffer from
3075                  * the start of the normal gtt mapping.
3076                  */
3077                 intel_fb->normal[i].x = x;
3078                 intel_fb->normal[i].y = y;
3079
3080                 offset = intel_compute_aligned_offset(dev_priv, &x, &y, fb, i,
3081                                                       fb->pitches[i],
3082                                                       DRM_MODE_ROTATE_0,
3083                                                       tile_size);
3084                 offset /= tile_size;
3085
3086                 if (!is_surface_linear(fb, i)) {
3087                         struct intel_remapped_plane_info plane_info;
3088                         unsigned int tile_width, tile_height;
3089
3090                         intel_tile_dims(fb, i, &tile_width, &tile_height);
3091
3092                         plane_info.offset = offset;
3093                         plane_info.stride = DIV_ROUND_UP(fb->pitches[i],
3094                                                          tile_width * cpp);
3095                         plane_info.width = DIV_ROUND_UP(x + width, tile_width);
3096                         plane_info.height = DIV_ROUND_UP(y + height,
3097                                                          tile_height);
3098
3099                         /* how many tiles does this plane need */
3100                         size = plane_info.stride * plane_info.height;
3101                         /*
3102                          * If the plane isn't horizontally tile aligned,
3103                          * we need one more tile.
3104                          */
3105                         if (x != 0)
3106                                 size++;
3107
3108                         gtt_offset_rotated +=
3109                                 setup_fb_rotation(i, &plane_info,
3110                                                   gtt_offset_rotated,
3111                                                   x, y, width, height,
3112                                                   tile_size,
3113                                                   tile_width, tile_height,
3114                                                   fb);
3115                 } else {
3116                         size = DIV_ROUND_UP((y + height) * fb->pitches[i] +
3117                                             x * cpp, tile_size);
3118                 }
3119
3120                 /* how many tiles in total needed in the bo */
3121                 max_size = max(max_size, offset + size);
3122         }
3123
3124         if (mul_u32_u32(max_size, tile_size) > obj->base.size) {
3125                 drm_dbg_kms(&dev_priv->drm,
3126                             "fb too big for bo (need %llu bytes, have %zu bytes)\n",
3127                             mul_u32_u32(max_size, tile_size), obj->base.size);
3128                 return -EINVAL;
3129         }
3130
3131         return 0;
3132 }
3133
3134 static void
3135 intel_plane_remap_gtt(struct intel_plane_state *plane_state)
3136 {
3137         struct drm_i915_private *dev_priv =
3138                 to_i915(plane_state->uapi.plane->dev);
3139         struct drm_framebuffer *fb = plane_state->hw.fb;
3140         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
3141         struct intel_rotation_info *info = &plane_state->view.rotated;
3142         unsigned int rotation = plane_state->hw.rotation;
3143         int i, num_planes = fb->format->num_planes;
3144         unsigned int tile_size = intel_tile_size(dev_priv);
3145         unsigned int src_x, src_y;
3146         unsigned int src_w, src_h;
3147         u32 gtt_offset = 0;
3148
3149         memset(&plane_state->view, 0, sizeof(plane_state->view));
3150         plane_state->view.type = drm_rotation_90_or_270(rotation) ?
3151                 I915_GGTT_VIEW_ROTATED : I915_GGTT_VIEW_REMAPPED;
3152
3153         src_x = plane_state->uapi.src.x1 >> 16;
3154         src_y = plane_state->uapi.src.y1 >> 16;
3155         src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
3156         src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
3157
3158         WARN_ON(is_ccs_modifier(fb->modifier));
3159
3160         /* Make src coordinates relative to the viewport */
3161         drm_rect_translate(&plane_state->uapi.src,
3162                            -(src_x << 16), -(src_y << 16));
3163
3164         /* Rotate src coordinates to match rotated GTT view */
3165         if (drm_rotation_90_or_270(rotation))
3166                 drm_rect_rotate(&plane_state->uapi.src,
3167                                 src_w << 16, src_h << 16,
3168                                 DRM_MODE_ROTATE_270);
3169
3170         for (i = 0; i < num_planes; i++) {
3171                 unsigned int hsub = i ? fb->format->hsub : 1;
3172                 unsigned int vsub = i ? fb->format->vsub : 1;
3173                 unsigned int cpp = fb->format->cpp[i];
3174                 unsigned int tile_width, tile_height;
3175                 unsigned int width, height;
3176                 unsigned int pitch_tiles;
3177                 unsigned int x, y;
3178                 u32 offset;
3179
3180                 intel_tile_dims(fb, i, &tile_width, &tile_height);
3181
3182                 x = src_x / hsub;
3183                 y = src_y / vsub;
3184                 width = src_w / hsub;
3185                 height = src_h / vsub;
3186
3187                 /*
3188                  * First pixel of the src viewport from the
3189                  * start of the normal gtt mapping.
3190                  */
3191                 x += intel_fb->normal[i].x;
3192                 y += intel_fb->normal[i].y;
3193
3194                 offset = intel_compute_aligned_offset(dev_priv, &x, &y,
3195                                                       fb, i, fb->pitches[i],
3196                                                       DRM_MODE_ROTATE_0, tile_size);
3197                 offset /= tile_size;
3198
3199                 WARN_ON(i >= ARRAY_SIZE(info->plane));
3200                 info->plane[i].offset = offset;
3201                 info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i],
3202                                                      tile_width * cpp);
3203                 info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
3204                 info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
3205
3206                 if (drm_rotation_90_or_270(rotation)) {
3207                         struct drm_rect r;
3208
3209                         /* rotate the x/y offsets to match the GTT view */
3210                         drm_rect_init(&r, x, y, width, height);
3211                         drm_rect_rotate(&r,
3212                                         info->plane[i].width * tile_width,
3213                                         info->plane[i].height * tile_height,
3214                                         DRM_MODE_ROTATE_270);
3215                         x = r.x1;
3216                         y = r.y1;
3217
3218                         pitch_tiles = info->plane[i].height;
3219                         plane_state->color_plane[i].stride = pitch_tiles * tile_height;
3220
3221                         /* rotate the tile dimensions to match the GTT view */
3222                         swap(tile_width, tile_height);
3223                 } else {
3224                         pitch_tiles = info->plane[i].width;
3225                         plane_state->color_plane[i].stride = pitch_tiles * tile_width * cpp;
3226                 }
3227
3228                 /*
3229                  * We only keep the x/y offsets, so push all of the
3230                  * gtt offset into the x/y offsets.
3231                  */
3232                 intel_adjust_tile_offset(&x, &y,
3233                                          tile_width, tile_height,
3234                                          tile_size, pitch_tiles,
3235                                          gtt_offset * tile_size, 0);
3236
3237                 gtt_offset += info->plane[i].width * info->plane[i].height;
3238
3239                 plane_state->color_plane[i].offset = 0;
3240                 plane_state->color_plane[i].x = x;
3241                 plane_state->color_plane[i].y = y;
3242         }
3243 }
3244
3245 static int
3246 intel_plane_compute_gtt(struct intel_plane_state *plane_state)
3247 {
3248         const struct intel_framebuffer *fb =
3249                 to_intel_framebuffer(plane_state->hw.fb);
3250         unsigned int rotation = plane_state->hw.rotation;
3251         int i, num_planes;
3252
3253         if (!fb)
3254                 return 0;
3255
3256         num_planes = fb->base.format->num_planes;
3257
3258         if (intel_plane_needs_remap(plane_state)) {
3259                 intel_plane_remap_gtt(plane_state);
3260
3261                 /*
3262                  * Sometimes even remapping can't overcome
3263                  * the stride limitations :( Can happen with
3264                  * big plane sizes and suitably misaligned
3265                  * offsets.
3266                  */
3267                 return intel_plane_check_stride(plane_state);
3268         }
3269
3270         intel_fill_fb_ggtt_view(&plane_state->view, &fb->base, rotation);
3271
3272         for (i = 0; i < num_planes; i++) {
3273                 plane_state->color_plane[i].stride = intel_fb_pitch(&fb->base, i, rotation);
3274                 plane_state->color_plane[i].offset = 0;
3275
3276                 if (drm_rotation_90_or_270(rotation)) {
3277                         plane_state->color_plane[i].x = fb->rotated[i].x;
3278                         plane_state->color_plane[i].y = fb->rotated[i].y;
3279                 } else {
3280                         plane_state->color_plane[i].x = fb->normal[i].x;
3281                         plane_state->color_plane[i].y = fb->normal[i].y;
3282                 }
3283         }
3284
3285         /* Rotate src coordinates to match rotated GTT view */
3286         if (drm_rotation_90_or_270(rotation))
3287                 drm_rect_rotate(&plane_state->uapi.src,
3288                                 fb->base.width << 16, fb->base.height << 16,
3289                                 DRM_MODE_ROTATE_270);
3290
3291         return intel_plane_check_stride(plane_state);
3292 }
3293
3294 static int i9xx_format_to_fourcc(int format)
3295 {
3296         switch (format) {
3297         case DISPPLANE_8BPP:
3298                 return DRM_FORMAT_C8;
3299         case DISPPLANE_BGRA555:
3300                 return DRM_FORMAT_ARGB1555;
3301         case DISPPLANE_BGRX555:
3302                 return DRM_FORMAT_XRGB1555;
3303         case DISPPLANE_BGRX565:
3304                 return DRM_FORMAT_RGB565;
3305         default:
3306         case DISPPLANE_BGRX888:
3307                 return DRM_FORMAT_XRGB8888;
3308         case DISPPLANE_RGBX888:
3309                 return DRM_FORMAT_XBGR8888;
3310         case DISPPLANE_BGRA888:
3311                 return DRM_FORMAT_ARGB8888;
3312         case DISPPLANE_RGBA888:
3313                 return DRM_FORMAT_ABGR8888;
3314         case DISPPLANE_BGRX101010:
3315                 return DRM_FORMAT_XRGB2101010;
3316         case DISPPLANE_RGBX101010:
3317                 return DRM_FORMAT_XBGR2101010;
3318         case DISPPLANE_BGRA101010:
3319                 return DRM_FORMAT_ARGB2101010;
3320         case DISPPLANE_RGBA101010:
3321                 return DRM_FORMAT_ABGR2101010;
3322         case DISPPLANE_RGBX161616:
3323                 return DRM_FORMAT_XBGR16161616F;
3324         }
3325 }
3326
3327 int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
3328 {
3329         switch (format) {
3330         case PLANE_CTL_FORMAT_RGB_565:
3331                 return DRM_FORMAT_RGB565;
3332         case PLANE_CTL_FORMAT_NV12:
3333                 return DRM_FORMAT_NV12;
3334         case PLANE_CTL_FORMAT_P010:
3335                 return DRM_FORMAT_P010;
3336         case PLANE_CTL_FORMAT_P012:
3337                 return DRM_FORMAT_P012;
3338         case PLANE_CTL_FORMAT_P016:
3339                 return DRM_FORMAT_P016;
3340         case PLANE_CTL_FORMAT_Y210:
3341                 return DRM_FORMAT_Y210;
3342         case PLANE_CTL_FORMAT_Y212:
3343                 return DRM_FORMAT_Y212;
3344         case PLANE_CTL_FORMAT_Y216:
3345                 return DRM_FORMAT_Y216;
3346         case PLANE_CTL_FORMAT_Y410:
3347                 return DRM_FORMAT_XVYU2101010;
3348         case PLANE_CTL_FORMAT_Y412:
3349                 return DRM_FORMAT_XVYU12_16161616;
3350         case PLANE_CTL_FORMAT_Y416:
3351                 return DRM_FORMAT_XVYU16161616;
3352         default:
3353         case PLANE_CTL_FORMAT_XRGB_8888:
3354                 if (rgb_order) {
3355                         if (alpha)
3356                                 return DRM_FORMAT_ABGR8888;
3357                         else
3358                                 return DRM_FORMAT_XBGR8888;
3359                 } else {
3360                         if (alpha)
3361                                 return DRM_FORMAT_ARGB8888;
3362                         else
3363                                 return DRM_FORMAT_XRGB8888;
3364                 }
3365         case PLANE_CTL_FORMAT_XRGB_2101010:
3366                 if (rgb_order) {
3367                         if (alpha)
3368                                 return DRM_FORMAT_ABGR2101010;
3369                         else
3370                                 return DRM_FORMAT_XBGR2101010;
3371                 } else {
3372                         if (alpha)
3373                                 return DRM_FORMAT_ARGB2101010;
3374                         else
3375                                 return DRM_FORMAT_XRGB2101010;
3376                 }
3377         case PLANE_CTL_FORMAT_XRGB_16161616F:
3378                 if (rgb_order) {
3379                         if (alpha)
3380                                 return DRM_FORMAT_ABGR16161616F;
3381                         else
3382                                 return DRM_FORMAT_XBGR16161616F;
3383                 } else {
3384                         if (alpha)
3385                                 return DRM_FORMAT_ARGB16161616F;
3386                         else
3387                                 return DRM_FORMAT_XRGB16161616F;
3388                 }
3389         }
3390 }
3391
3392 static struct i915_vma *
3393 initial_plane_vma(struct drm_i915_private *i915,
3394                   struct intel_initial_plane_config *plane_config)
3395 {
3396         struct drm_i915_gem_object *obj;
3397         struct i915_vma *vma;
3398         u32 base, size;
3399
3400         if (plane_config->size == 0)
3401                 return NULL;
3402
3403         base = round_down(plane_config->base,
3404                           I915_GTT_MIN_ALIGNMENT);
3405         size = round_up(plane_config->base + plane_config->size,
3406                         I915_GTT_MIN_ALIGNMENT);
3407         size -= base;
3408
3409         /*
3410          * If the FB is too big, just don't use it since fbdev is not very
3411          * important and we should probably use that space with FBC or other
3412          * features.
3413          */
3414         if (size * 2 > i915->stolen_usable_size)
3415                 return NULL;
3416
3417         obj = i915_gem_object_create_stolen_for_preallocated(i915, base, size);
3418         if (IS_ERR(obj))
3419                 return NULL;
3420
3421         switch (plane_config->tiling) {
3422         case I915_TILING_NONE:
3423                 break;
3424         case I915_TILING_X:
3425         case I915_TILING_Y:
3426                 obj->tiling_and_stride =
3427                         plane_config->fb->base.pitches[0] |
3428                         plane_config->tiling;
3429                 break;
3430         default:
3431                 MISSING_CASE(plane_config->tiling);
3432                 goto err_obj;
3433         }
3434
3435         vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
3436         if (IS_ERR(vma))
3437                 goto err_obj;
3438
3439         if (i915_ggtt_pin(vma, 0, PIN_MAPPABLE | PIN_OFFSET_FIXED | base))
3440                 goto err_obj;
3441
3442         if (i915_gem_object_is_tiled(obj) &&
3443             !i915_vma_is_map_and_fenceable(vma))
3444                 goto err_obj;
3445
3446         return vma;
3447
3448 err_obj:
3449         i915_gem_object_put(obj);
3450         return NULL;
3451 }
3452
3453 static bool
3454 intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
3455                               struct intel_initial_plane_config *plane_config)
3456 {
3457         struct drm_device *dev = crtc->base.dev;
3458         struct drm_i915_private *dev_priv = to_i915(dev);
3459         struct drm_mode_fb_cmd2 mode_cmd = { 0 };
3460         struct drm_framebuffer *fb = &plane_config->fb->base;
3461         struct i915_vma *vma;
3462
3463         switch (fb->modifier) {
3464         case DRM_FORMAT_MOD_LINEAR:
3465         case I915_FORMAT_MOD_X_TILED:
3466         case I915_FORMAT_MOD_Y_TILED:
3467                 break;
3468         default:
3469                 drm_dbg(&dev_priv->drm,
3470                         "Unsupported modifier for initial FB: 0x%llx\n",
3471                         fb->modifier);
3472                 return false;
3473         }
3474
3475         vma = initial_plane_vma(dev_priv, plane_config);
3476         if (!vma)
3477                 return false;
3478
3479         mode_cmd.pixel_format = fb->format->format;
3480         mode_cmd.width = fb->width;
3481         mode_cmd.height = fb->height;
3482         mode_cmd.pitches[0] = fb->pitches[0];
3483         mode_cmd.modifier[0] = fb->modifier;
3484         mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
3485
3486         if (intel_framebuffer_init(to_intel_framebuffer(fb),
3487                                    vma->obj, &mode_cmd)) {
3488                 drm_dbg_kms(&dev_priv->drm, "intel fb init failed\n");
3489                 goto err_vma;
3490         }
3491
3492         plane_config->vma = vma;
3493         return true;
3494
3495 err_vma:
3496         i915_vma_put(vma);
3497         return false;
3498 }
3499
3500 static void
3501 intel_set_plane_visible(struct intel_crtc_state *crtc_state,
3502                         struct intel_plane_state *plane_state,
3503                         bool visible)
3504 {
3505         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
3506
3507         plane_state->uapi.visible = visible;
3508
3509         if (visible)
3510                 crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base);
3511         else
3512                 crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base);
3513 }
3514
3515 static void fixup_active_planes(struct intel_crtc_state *crtc_state)
3516 {
3517         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
3518         struct drm_plane *plane;
3519
3520         /*
3521          * Active_planes aliases if multiple "primary" or cursor planes
3522          * have been used on the same (or wrong) pipe. plane_mask uses
3523          * unique ids, hence we can use that to reconstruct active_planes.
3524          */
3525         crtc_state->active_planes = 0;
3526
3527         drm_for_each_plane_mask(plane, &dev_priv->drm,
3528                                 crtc_state->uapi.plane_mask)
3529                 crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
3530 }
3531
3532 static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
3533                                          struct intel_plane *plane)
3534 {
3535         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3536         struct intel_crtc_state *crtc_state =
3537                 to_intel_crtc_state(crtc->base.state);
3538         struct intel_plane_state *plane_state =
3539                 to_intel_plane_state(plane->base.state);
3540
3541         drm_dbg_kms(&dev_priv->drm,
3542                     "Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
3543                     plane->base.base.id, plane->base.name,
3544                     crtc->base.base.id, crtc->base.name);
3545
3546         intel_set_plane_visible(crtc_state, plane_state, false);
3547         fixup_active_planes(crtc_state);
3548         crtc_state->data_rate[plane->id] = 0;
3549         crtc_state->min_cdclk[plane->id] = 0;
3550
3551         if (plane->id == PLANE_PRIMARY)
3552                 hsw_disable_ips(crtc_state);
3553
3554         /*
3555          * Vblank time updates from the shadow to live plane control register
3556          * are blocked if the memory self-refresh mode is active at that
3557          * moment. So to make sure the plane gets truly disabled, disable
3558          * first the self-refresh mode. The self-refresh enable bit in turn
3559          * will be checked/applied by the HW only at the next frame start
3560          * event which is after the vblank start event, so we need to have a
3561          * wait-for-vblank between disabling the plane and the pipe.
3562          */
3563         if (HAS_GMCH(dev_priv) &&
3564             intel_set_memory_cxsr(dev_priv, false))
3565                 intel_wait_for_vblank(dev_priv, crtc->pipe);
3566
3567         /*
3568          * Gen2 reports pipe underruns whenever all planes are disabled.
3569          * So disable underrun reporting before all the planes get disabled.
3570          */
3571         if (IS_GEN(dev_priv, 2) && !crtc_state->active_planes)
3572                 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
3573
3574         intel_disable_plane(plane, crtc_state);
3575 }
3576
3577 static struct intel_frontbuffer *
3578 to_intel_frontbuffer(struct drm_framebuffer *fb)
3579 {
3580         return fb ? to_intel_framebuffer(fb)->frontbuffer : NULL;
3581 }
3582
3583 static void
3584 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
3585                              struct intel_initial_plane_config *plane_config)
3586 {
3587         struct drm_device *dev = intel_crtc->base.dev;
3588         struct drm_i915_private *dev_priv = to_i915(dev);
3589         struct drm_crtc *c;
3590         struct drm_plane *primary = intel_crtc->base.primary;
3591         struct drm_plane_state *plane_state = primary->state;
3592         struct intel_plane *intel_plane = to_intel_plane(primary);
3593         struct intel_plane_state *intel_state =
3594                 to_intel_plane_state(plane_state);
3595         struct drm_framebuffer *fb;
3596         struct i915_vma *vma;
3597
3598         if (!plane_config->fb)
3599                 return;
3600
3601         if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
3602                 fb = &plane_config->fb->base;
3603                 vma = plane_config->vma;
3604                 goto valid_fb;
3605         }
3606
3607         /*
3608          * Failed to alloc the obj, check to see if we should share
3609          * an fb with another CRTC instead
3610          */
3611         for_each_crtc(dev, c) {
3612                 struct intel_plane_state *state;
3613
3614                 if (c == &intel_crtc->base)
3615                         continue;
3616
3617                 if (!to_intel_crtc(c)->active)
3618                         continue;
3619
3620                 state = to_intel_plane_state(c->primary->state);
3621                 if (!state->vma)
3622                         continue;
3623
3624                 if (intel_plane_ggtt_offset(state) == plane_config->base) {
3625                         fb = state->hw.fb;
3626                         vma = state->vma;
3627                         goto valid_fb;
3628                 }
3629         }
3630
3631         /*
3632          * We've failed to reconstruct the BIOS FB.  Current display state
3633          * indicates that the primary plane is visible, but has a NULL FB,
3634          * which will lead to problems later if we don't fix it up.  The
3635          * simplest solution is to just disable the primary plane now and
3636          * pretend the BIOS never had it enabled.
3637          */
3638         intel_plane_disable_noatomic(intel_crtc, intel_plane);
3639
3640         return;
3641
3642 valid_fb:
3643         intel_state->hw.rotation = plane_config->rotation;
3644         intel_fill_fb_ggtt_view(&intel_state->view, fb,
3645                                 intel_state->hw.rotation);
3646         intel_state->color_plane[0].stride =
3647                 intel_fb_pitch(fb, 0, intel_state->hw.rotation);
3648
3649         __i915_vma_pin(vma);
3650         intel_state->vma = i915_vma_get(vma);
3651         if (intel_plane_uses_fence(intel_state) && i915_vma_pin_fence(vma) == 0)
3652                 if (vma->fence)
3653                         intel_state->flags |= PLANE_HAS_FENCE;
3654
3655         plane_state->src_x = 0;
3656         plane_state->src_y = 0;
3657         plane_state->src_w = fb->width << 16;
3658         plane_state->src_h = fb->height << 16;
3659
3660         plane_state->crtc_x = 0;
3661         plane_state->crtc_y = 0;
3662         plane_state->crtc_w = fb->width;
3663         plane_state->crtc_h = fb->height;
3664
3665         intel_state->uapi.src = drm_plane_state_src(plane_state);
3666         intel_state->uapi.dst = drm_plane_state_dest(plane_state);
3667
3668         if (plane_config->tiling)
3669                 dev_priv->preserve_bios_swizzle = true;
3670
3671         plane_state->fb = fb;
3672         drm_framebuffer_get(fb);
3673
3674         plane_state->crtc = &intel_crtc->base;
3675         intel_plane_copy_uapi_to_hw_state(intel_state, intel_state);
3676
3677         intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
3678
3679         atomic_or(to_intel_plane(primary)->frontbuffer_bit,
3680                   &to_intel_frontbuffer(fb)->bits);
3681 }
3682
3683 static int skl_max_plane_width(const struct drm_framebuffer *fb,
3684                                int color_plane,
3685                                unsigned int rotation)
3686 {
3687         int cpp = fb->format->cpp[color_plane];
3688
3689         switch (fb->modifier) {
3690         case DRM_FORMAT_MOD_LINEAR:
3691         case I915_FORMAT_MOD_X_TILED:
3692                 /*
3693                  * Validated limit is 4k, but has 5k should
3694                  * work apart from the following features:
3695                  * - Ytile (already limited to 4k)
3696                  * - FP16 (already limited to 4k)
3697                  * - render compression (already limited to 4k)
3698                  * - KVMR sprite and cursor (don't care)
3699                  * - horizontal panning (TODO verify this)
3700                  * - pipe and plane scaling (TODO verify this)
3701                  */
3702                 if (cpp == 8)
3703                         return 4096;
3704                 else
3705                         return 5120;
3706         case I915_FORMAT_MOD_Y_TILED_CCS:
3707         case I915_FORMAT_MOD_Yf_TILED_CCS:
3708         case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
3709                 /* FIXME AUX plane? */
3710         case I915_FORMAT_MOD_Y_TILED:
3711         case I915_FORMAT_MOD_Yf_TILED:
3712                 if (cpp == 8)
3713                         return 2048;
3714                 else
3715                         return 4096;
3716         default:
3717                 MISSING_CASE(fb->modifier);
3718                 return 2048;
3719         }
3720 }
3721
3722 static int glk_max_plane_width(const struct drm_framebuffer *fb,
3723                                int color_plane,
3724                                unsigned int rotation)
3725 {
3726         int cpp = fb->format->cpp[color_plane];
3727
3728         switch (fb->modifier) {
3729         case DRM_FORMAT_MOD_LINEAR:
3730         case I915_FORMAT_MOD_X_TILED:
3731                 if (cpp == 8)
3732                         return 4096;
3733                 else
3734                         return 5120;
3735         case I915_FORMAT_MOD_Y_TILED_CCS:
3736         case I915_FORMAT_MOD_Yf_TILED_CCS:
3737                 /* FIXME AUX plane? */
3738         case I915_FORMAT_MOD_Y_TILED:
3739         case I915_FORMAT_MOD_Yf_TILED:
3740                 if (cpp == 8)
3741                         return 2048;
3742                 else
3743                         return 5120;
3744         default:
3745                 MISSING_CASE(fb->modifier);
3746                 return 2048;
3747         }
3748 }
3749
3750 static int icl_max_plane_width(const struct drm_framebuffer *fb,
3751                                int color_plane,
3752                                unsigned int rotation)
3753 {
3754         return 5120;
3755 }
3756
3757 static int skl_max_plane_height(void)
3758 {
3759         return 4096;
3760 }
3761
3762 static int icl_max_plane_height(void)
3763 {
3764         return 4320;
3765 }
3766
3767 static bool
3768 skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state,
3769                                int main_x, int main_y, u32 main_offset,
3770                                int ccs_plane)
3771 {
3772         const struct drm_framebuffer *fb = plane_state->hw.fb;
3773         int aux_x = plane_state->color_plane[ccs_plane].x;
3774         int aux_y = plane_state->color_plane[ccs_plane].y;
3775         u32 aux_offset = plane_state->color_plane[ccs_plane].offset;
3776         u32 alignment = intel_surf_alignment(fb, ccs_plane);
3777         int hsub;
3778         int vsub;
3779
3780         intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane);
3781         while (aux_offset >= main_offset && aux_y <= main_y) {
3782                 int x, y;
3783
3784                 if (aux_x == main_x && aux_y == main_y)
3785                         break;
3786
3787                 if (aux_offset == 0)
3788                         break;
3789
3790                 x = aux_x / hsub;
3791                 y = aux_y / vsub;
3792                 aux_offset = intel_plane_adjust_aligned_offset(&x, &y,
3793                                                                plane_state,
3794                                                                ccs_plane,
3795                                                                aux_offset,
3796                                                                aux_offset -
3797                                                                 alignment);
3798                 aux_x = x * hsub + aux_x % hsub;
3799                 aux_y = y * vsub + aux_y % vsub;
3800         }
3801
3802         if (aux_x != main_x || aux_y != main_y)
3803                 return false;
3804
3805         plane_state->color_plane[ccs_plane].offset = aux_offset;
3806         plane_state->color_plane[ccs_plane].x = aux_x;
3807         plane_state->color_plane[ccs_plane].y = aux_y;
3808
3809         return true;
3810 }
3811
3812 static int skl_check_main_surface(struct intel_plane_state *plane_state)
3813 {
3814         struct drm_i915_private *dev_priv = to_i915(plane_state->uapi.plane->dev);
3815         const struct drm_framebuffer *fb = plane_state->hw.fb;
3816         unsigned int rotation = plane_state->hw.rotation;
3817         int x = plane_state->uapi.src.x1 >> 16;
3818         int y = plane_state->uapi.src.y1 >> 16;
3819         int w = drm_rect_width(&plane_state->uapi.src) >> 16;
3820         int h = drm_rect_height(&plane_state->uapi.src) >> 16;
3821         int max_width;
3822         int max_height;
3823         u32 alignment;
3824         u32 offset;
3825         int aux_plane = intel_main_to_aux_plane(fb, 0);
3826         u32 aux_offset = plane_state->color_plane[aux_plane].offset;
3827
3828         if (INTEL_GEN(dev_priv) >= 11)
3829                 max_width = icl_max_plane_width(fb, 0, rotation);
3830         else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
3831                 max_width = glk_max_plane_width(fb, 0, rotation);
3832         else
3833                 max_width = skl_max_plane_width(fb, 0, rotation);
3834
3835         if (INTEL_GEN(dev_priv) >= 11)
3836                 max_height = icl_max_plane_height();
3837         else
3838                 max_height = skl_max_plane_height();
3839
3840         if (w > max_width || h > max_height) {
3841                 drm_dbg_kms(&dev_priv->drm,
3842                             "requested Y/RGB source size %dx%d too big (limit %dx%d)\n",
3843                             w, h, max_width, max_height);
3844                 return -EINVAL;
3845         }
3846
3847         intel_add_fb_offsets(&x, &y, plane_state, 0);
3848         offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 0);
3849         alignment = intel_surf_alignment(fb, 0);
3850         if (WARN_ON(alignment && !is_power_of_2(alignment)))
3851                 return -EINVAL;
3852
3853         /*
3854          * AUX surface offset is specified as the distance from the
3855          * main surface offset, and it must be non-negative. Make
3856          * sure that is what we will get.
3857          */
3858         if (offset > aux_offset)
3859                 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3860                                                            offset, aux_offset & ~(alignment - 1));
3861
3862         /*
3863          * When using an X-tiled surface, the plane blows up
3864          * if the x offset + width exceed the stride.
3865          *
3866          * TODO: linear and Y-tiled seem fine, Yf untested,
3867          */
3868         if (fb->modifier == I915_FORMAT_MOD_X_TILED) {
3869                 int cpp = fb->format->cpp[0];
3870
3871                 while ((x + w) * cpp > plane_state->color_plane[0].stride) {
3872                         if (offset == 0) {
3873                                 drm_dbg_kms(&dev_priv->drm,
3874                                             "Unable to find suitable display surface offset due to X-tiling\n");
3875                                 return -EINVAL;
3876                         }
3877
3878                         offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3879                                                                    offset, offset - alignment);
3880                 }
3881         }
3882
3883         /*
3884          * CCS AUX surface doesn't have its own x/y offsets, we must make sure
3885          * they match with the main surface x/y offsets.
3886          */
3887         if (is_ccs_modifier(fb->modifier)) {
3888                 while (!skl_check_main_ccs_coordinates(plane_state, x, y,
3889                                                        offset, aux_plane)) {
3890                         if (offset == 0)
3891                                 break;
3892
3893                         offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3894                                                                    offset, offset - alignment);
3895                 }
3896
3897                 if (x != plane_state->color_plane[aux_plane].x ||
3898                     y != plane_state->color_plane[aux_plane].y) {
3899                         drm_dbg_kms(&dev_priv->drm,
3900                                     "Unable to find suitable display surface offset due to CCS\n");
3901                         return -EINVAL;
3902                 }
3903         }
3904
3905         plane_state->color_plane[0].offset = offset;
3906         plane_state->color_plane[0].x = x;
3907         plane_state->color_plane[0].y = y;
3908
3909         /*
3910          * Put the final coordinates back so that the src
3911          * coordinate checks will see the right values.
3912          */
3913         drm_rect_translate_to(&plane_state->uapi.src,
3914                               x << 16, y << 16);
3915
3916         return 0;
3917 }
3918
3919 static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
3920 {
3921         struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
3922         const struct drm_framebuffer *fb = plane_state->hw.fb;
3923         unsigned int rotation = plane_state->hw.rotation;
3924         int uv_plane = 1;
3925         int max_width = skl_max_plane_width(fb, uv_plane, rotation);
3926         int max_height = 4096;
3927         int x = plane_state->uapi.src.x1 >> 17;
3928         int y = plane_state->uapi.src.y1 >> 17;
3929         int w = drm_rect_width(&plane_state->uapi.src) >> 17;
3930         int h = drm_rect_height(&plane_state->uapi.src) >> 17;
3931         u32 offset;
3932
3933         intel_add_fb_offsets(&x, &y, plane_state, uv_plane);
3934         offset = intel_plane_compute_aligned_offset(&x, &y,
3935                                                     plane_state, uv_plane);
3936
3937         /* FIXME not quite sure how/if these apply to the chroma plane */
3938         if (w > max_width || h > max_height) {
3939                 drm_dbg_kms(&i915->drm,
3940                             "CbCr source size %dx%d too big (limit %dx%d)\n",
3941                             w, h, max_width, max_height);
3942                 return -EINVAL;
3943         }
3944
3945         if (is_ccs_modifier(fb->modifier)) {
3946                 int ccs_plane = main_to_ccs_plane(fb, uv_plane);
3947                 int aux_offset = plane_state->color_plane[ccs_plane].offset;
3948                 int alignment = intel_surf_alignment(fb, uv_plane);
3949
3950                 if (offset > aux_offset)
3951                         offset = intel_plane_adjust_aligned_offset(&x, &y,
3952                                                                    plane_state,
3953                                                                    uv_plane,
3954                                                                    offset,
3955                                                                    aux_offset & ~(alignment - 1));
3956
3957                 while (!skl_check_main_ccs_coordinates(plane_state, x, y,
3958                                                        offset, ccs_plane)) {
3959                         if (offset == 0)
3960                                 break;
3961
3962                         offset = intel_plane_adjust_aligned_offset(&x, &y,
3963                                                                    plane_state,
3964                                                                    uv_plane,
3965                                                                    offset, offset - alignment);
3966                 }
3967
3968                 if (x != plane_state->color_plane[ccs_plane].x ||
3969                     y != plane_state->color_plane[ccs_plane].y) {
3970                         drm_dbg_kms(&i915->drm,
3971                                     "Unable to find suitable display surface offset due to CCS\n");
3972                         return -EINVAL;
3973                 }
3974         }
3975
3976         plane_state->color_plane[uv_plane].offset = offset;
3977         plane_state->color_plane[uv_plane].x = x;
3978         plane_state->color_plane[uv_plane].y = y;
3979
3980         return 0;
3981 }
3982
3983 static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
3984 {
3985         const struct drm_framebuffer *fb = plane_state->hw.fb;
3986         int src_x = plane_state->uapi.src.x1 >> 16;
3987         int src_y = plane_state->uapi.src.y1 >> 16;
3988         u32 offset;
3989         int ccs_plane;
3990
3991         for (ccs_plane = 0; ccs_plane < fb->format->num_planes; ccs_plane++) {
3992                 int main_hsub, main_vsub;
3993                 int hsub, vsub;
3994                 int x, y;
3995
3996                 if (!is_ccs_plane(fb, ccs_plane))
3997                         continue;
3998
3999                 intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, fb,
4000                                                ccs_to_main_plane(fb, ccs_plane));
4001                 intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane);
4002
4003                 hsub *= main_hsub;
4004                 vsub *= main_vsub;
4005                 x = src_x / hsub;
4006                 y = src_y / vsub;
4007
4008                 intel_add_fb_offsets(&x, &y, plane_state, ccs_plane);
4009
4010                 offset = intel_plane_compute_aligned_offset(&x, &y,
4011                                                             plane_state,
4012                                                             ccs_plane);
4013
4014                 plane_state->color_plane[ccs_plane].offset = offset;
4015                 plane_state->color_plane[ccs_plane].x = (x * hsub +
4016                                                          src_x % hsub) /
4017                                                         main_hsub;
4018                 plane_state->color_plane[ccs_plane].y = (y * vsub +
4019                                                          src_y % vsub) /
4020                                                         main_vsub;
4021         }
4022
4023         return 0;
4024 }
4025
4026 int skl_check_plane_surface(struct intel_plane_state *plane_state)
4027 {
4028         const struct drm_framebuffer *fb = plane_state->hw.fb;
4029         int ret;
4030         bool needs_aux = false;
4031
4032         ret = intel_plane_compute_gtt(plane_state);
4033         if (ret)
4034                 return ret;
4035
4036         if (!plane_state->uapi.visible)
4037                 return 0;
4038
4039         /*
4040          * Handle the AUX surface first since the main surface setup depends on
4041          * it.
4042          */
4043         if (is_ccs_modifier(fb->modifier)) {
4044                 needs_aux = true;
4045                 ret = skl_check_ccs_aux_surface(plane_state);
4046                 if (ret)
4047                         return ret;
4048         }
4049
4050         if (intel_format_info_is_yuv_semiplanar(fb->format,
4051                                                 fb->modifier)) {
4052                 needs_aux = true;
4053                 ret = skl_check_nv12_aux_surface(plane_state);
4054                 if (ret)
4055                         return ret;
4056         }
4057
4058         if (!needs_aux) {
4059                 int i;
4060
4061                 for (i = 1; i < fb->format->num_planes; i++) {
4062                         plane_state->color_plane[i].offset = ~0xfff;
4063                         plane_state->color_plane[i].x = 0;
4064                         plane_state->color_plane[i].y = 0;
4065                 }
4066         }
4067
4068         ret = skl_check_main_surface(plane_state);
4069         if (ret)
4070                 return ret;
4071
4072         return 0;
4073 }
4074
4075 static void i9xx_plane_ratio(const struct intel_crtc_state *crtc_state,
4076                              const struct intel_plane_state *plane_state,
4077                              unsigned int *num, unsigned int *den)
4078 {
4079         const struct drm_framebuffer *fb = plane_state->hw.fb;
4080         unsigned int cpp = fb->format->cpp[0];
4081
4082         /*
4083          * g4x bspec says 64bpp pixel rate can't exceed 80%
4084          * of cdclk when the sprite plane is enabled on the
4085          * same pipe. ilk/snb bspec says 64bpp pixel rate is
4086          * never allowed to exceed 80% of cdclk. Let's just go
4087          * with the ilk/snb limit always.
4088          */
4089         if (cpp == 8) {
4090                 *num = 10;
4091                 *den = 8;
4092         } else {
4093                 *num = 1;
4094                 *den = 1;
4095         }
4096 }
4097
4098 static int i9xx_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
4099                                 const struct intel_plane_state *plane_state)
4100 {
4101         unsigned int pixel_rate;
4102         unsigned int num, den;
4103
4104         /*
4105          * Note that crtc_state->pixel_rate accounts for both
4106          * horizontal and vertical panel fitter downscaling factors.
4107          * Pre-HSW bspec tells us to only consider the horizontal
4108          * downscaling factor here. We ignore that and just consider
4109          * both for simplicity.
4110          */
4111         pixel_rate = crtc_state->pixel_rate;
4112
4113         i9xx_plane_ratio(crtc_state, plane_state, &num, &den);
4114
4115         /* two pixels per clock with double wide pipe */
4116         if (crtc_state->double_wide)
4117                 den *= 2;
4118
4119         return DIV_ROUND_UP(pixel_rate * num, den);
4120 }
4121
4122 unsigned int
4123 i9xx_plane_max_stride(struct intel_plane *plane,
4124                       u32 pixel_format, u64 modifier,
4125                       unsigned int rotation)
4126 {
4127         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
4128
4129         if (!HAS_GMCH(dev_priv)) {
4130                 return 32*1024;
4131         } else if (INTEL_GEN(dev_priv) >= 4) {
4132                 if (modifier == I915_FORMAT_MOD_X_TILED)
4133                         return 16*1024;
4134                 else
4135                         return 32*1024;
4136         } else if (INTEL_GEN(dev_priv) >= 3) {
4137                 if (modifier == I915_FORMAT_MOD_X_TILED)
4138                         return 8*1024;
4139                 else
4140                         return 16*1024;
4141         } else {
4142                 if (plane->i9xx_plane == PLANE_C)
4143                         return 4*1024;
4144                 else
4145                         return 8*1024;
4146         }
4147 }
4148
4149 static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
4150 {
4151         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4152         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4153         u32 dspcntr = 0;
4154
4155         if (crtc_state->gamma_enable)
4156                 dspcntr |= DISPPLANE_GAMMA_ENABLE;
4157
4158         if (crtc_state->csc_enable)
4159                 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
4160
4161         if (INTEL_GEN(dev_priv) < 5)
4162                 dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe);
4163
4164         return dspcntr;
4165 }
4166
4167 static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
4168                           const struct intel_plane_state *plane_state)
4169 {
4170         struct drm_i915_private *dev_priv =
4171                 to_i915(plane_state->uapi.plane->dev);
4172         const struct drm_framebuffer *fb = plane_state->hw.fb;
4173         unsigned int rotation = plane_state->hw.rotation;
4174         u32 dspcntr;
4175
4176         dspcntr = DISPLAY_PLANE_ENABLE;
4177
4178         if (IS_G4X(dev_priv) || IS_GEN(dev_priv, 5) ||
4179             IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
4180                 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
4181
4182         switch (fb->format->format) {
4183         case DRM_FORMAT_C8:
4184                 dspcntr |= DISPPLANE_8BPP;
4185                 break;
4186         case DRM_FORMAT_XRGB1555:
4187                 dspcntr |= DISPPLANE_BGRX555;
4188                 break;
4189         case DRM_FORMAT_ARGB1555:
4190                 dspcntr |= DISPPLANE_BGRA555;
4191                 break;
4192         case DRM_FORMAT_RGB565:
4193                 dspcntr |= DISPPLANE_BGRX565;
4194                 break;
4195         case DRM_FORMAT_XRGB8888:
4196                 dspcntr |= DISPPLANE_BGRX888;
4197                 break;
4198         case DRM_FORMAT_XBGR8888:
4199                 dspcntr |= DISPPLANE_RGBX888;
4200                 break;
4201         case DRM_FORMAT_ARGB8888:
4202                 dspcntr |= DISPPLANE_BGRA888;
4203                 break;
4204         case DRM_FORMAT_ABGR8888:
4205                 dspcntr |= DISPPLANE_RGBA888;
4206                 break;
4207         case DRM_FORMAT_XRGB2101010:
4208                 dspcntr |= DISPPLANE_BGRX101010;
4209                 break;
4210         case DRM_FORMAT_XBGR2101010:
4211                 dspcntr |= DISPPLANE_RGBX101010;
4212                 break;
4213         case DRM_FORMAT_ARGB2101010:
4214                 dspcntr |= DISPPLANE_BGRA101010;
4215                 break;
4216         case DRM_FORMAT_ABGR2101010:
4217                 dspcntr |= DISPPLANE_RGBA101010;
4218                 break;
4219         case DRM_FORMAT_XBGR16161616F:
4220                 dspcntr |= DISPPLANE_RGBX161616;
4221                 break;
4222         default:
4223                 MISSING_CASE(fb->format->format);
4224                 return 0;
4225         }
4226
4227         if (INTEL_GEN(dev_priv) >= 4 &&
4228             fb->modifier == I915_FORMAT_MOD_X_TILED)
4229                 dspcntr |= DISPPLANE_TILED;
4230
4231         if (rotation & DRM_MODE_ROTATE_180)
4232                 dspcntr |= DISPPLANE_ROTATE_180;
4233
4234         if (rotation & DRM_MODE_REFLECT_X)
4235                 dspcntr |= DISPPLANE_MIRROR;
4236
4237         return dspcntr;
4238 }
4239
4240 int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
4241 {
4242         struct drm_i915_private *dev_priv =
4243                 to_i915(plane_state->uapi.plane->dev);
4244         const struct drm_framebuffer *fb = plane_state->hw.fb;
4245         int src_x, src_y, src_w;
4246         u32 offset;
4247         int ret;
4248
4249         ret = intel_plane_compute_gtt(plane_state);
4250         if (ret)
4251                 return ret;
4252
4253         if (!plane_state->uapi.visible)
4254                 return 0;
4255
4256         src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
4257         src_x = plane_state->uapi.src.x1 >> 16;
4258         src_y = plane_state->uapi.src.y1 >> 16;
4259
4260         /* Undocumented hardware limit on i965/g4x/vlv/chv */
4261         if (HAS_GMCH(dev_priv) && fb->format->cpp[0] == 8 && src_w > 2048)
4262                 return -EINVAL;
4263
4264         intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
4265
4266         if (INTEL_GEN(dev_priv) >= 4)
4267                 offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
4268                                                             plane_state, 0);
4269         else
4270                 offset = 0;
4271
4272         /*
4273          * Put the final coordinates back so that the src
4274          * coordinate checks will see the right values.
4275          */
4276         drm_rect_translate_to(&plane_state->uapi.src,
4277                               src_x << 16, src_y << 16);
4278
4279         /* HSW/BDW do this automagically in hardware */
4280         if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
4281                 unsigned int rotation = plane_state->hw.rotation;
4282                 int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
4283                 int src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
4284
4285                 if (rotation & DRM_MODE_ROTATE_180) {
4286                         src_x += src_w - 1;
4287                         src_y += src_h - 1;
4288                 } else if (rotation & DRM_MODE_REFLECT_X) {
4289                         src_x += src_w - 1;
4290                 }
4291         }
4292
4293         plane_state->color_plane[0].offset = offset;
4294         plane_state->color_plane[0].x = src_x;
4295         plane_state->color_plane[0].y = src_y;
4296
4297         return 0;
4298 }
4299
4300 static bool i9xx_plane_has_windowing(struct intel_plane *plane)
4301 {
4302         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
4303         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
4304
4305         if (IS_CHERRYVIEW(dev_priv))
4306                 return i9xx_plane == PLANE_B;
4307         else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
4308                 return false;
4309         else if (IS_GEN(dev_priv, 4))
4310                 return i9xx_plane == PLANE_C;
4311         else
4312                 return i9xx_plane == PLANE_B ||
4313                         i9xx_plane == PLANE_C;
4314 }
4315
4316 static int
4317 i9xx_plane_check(struct intel_crtc_state *crtc_state,
4318                  struct intel_plane_state *plane_state)
4319 {
4320         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
4321         int ret;
4322
4323         ret = chv_plane_check_rotation(plane_state);
4324         if (ret)
4325                 return ret;
4326
4327         ret = drm_atomic_helper_check_plane_state(&plane_state->uapi,
4328                                                   &crtc_state->uapi,
4329                                                   DRM_PLANE_HELPER_NO_SCALING,
4330                                                   DRM_PLANE_HELPER_NO_SCALING,
4331                                                   i9xx_plane_has_windowing(plane),
4332                                                   true);
4333         if (ret)
4334                 return ret;
4335
4336         ret = i9xx_check_plane_surface(plane_state);
4337         if (ret)
4338                 return ret;
4339
4340         if (!plane_state->uapi.visible)
4341                 return 0;
4342
4343         ret = intel_plane_check_src_coordinates(plane_state);
4344         if (ret)
4345                 return ret;
4346
4347         plane_state->ctl = i9xx_plane_ctl(crtc_state, plane_state);
4348
4349         return 0;
4350 }
4351
4352 static void i9xx_update_plane(struct intel_plane *plane,
4353                               const struct intel_crtc_state *crtc_state,
4354                               const struct intel_plane_state *plane_state)
4355 {
4356         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
4357         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
4358         u32 linear_offset;
4359         int x = plane_state->color_plane[0].x;
4360         int y = plane_state->color_plane[0].y;
4361         int crtc_x = plane_state->uapi.dst.x1;
4362         int crtc_y = plane_state->uapi.dst.y1;
4363         int crtc_w = drm_rect_width(&plane_state->uapi.dst);
4364         int crtc_h = drm_rect_height(&plane_state->uapi.dst);
4365         unsigned long irqflags;
4366         u32 dspaddr_offset;
4367         u32 dspcntr;
4368
4369         dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state);
4370
4371         linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
4372
4373         if (INTEL_GEN(dev_priv) >= 4)
4374                 dspaddr_offset = plane_state->color_plane[0].offset;
4375         else
4376                 dspaddr_offset = linear_offset;
4377
4378         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
4379
4380         intel_de_write_fw(dev_priv, DSPSTRIDE(i9xx_plane),
4381                           plane_state->color_plane[0].stride);
4382
4383         if (INTEL_GEN(dev_priv) < 4) {
4384                 /*
4385                  * PLANE_A doesn't actually have a full window
4386                  * generator but let's assume we still need to
4387                  * program whatever is there.
4388                  */
4389                 intel_de_write_fw(dev_priv, DSPPOS(i9xx_plane),
4390                                   (crtc_y << 16) | crtc_x);
4391                 intel_de_write_fw(dev_priv, DSPSIZE(i9xx_plane),
4392                                   ((crtc_h - 1) << 16) | (crtc_w - 1));
4393         } else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
4394                 intel_de_write_fw(dev_priv, PRIMPOS(i9xx_plane),
4395                                   (crtc_y << 16) | crtc_x);
4396                 intel_de_write_fw(dev_priv, PRIMSIZE(i9xx_plane),
4397                                   ((crtc_h - 1) << 16) | (crtc_w - 1));
4398                 intel_de_write_fw(dev_priv, PRIMCNSTALPHA(i9xx_plane), 0);
4399         }
4400
4401         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
4402                 intel_de_write_fw(dev_priv, DSPOFFSET(i9xx_plane),
4403                                   (y << 16) | x);
4404         } else if (INTEL_GEN(dev_priv) >= 4) {
4405                 intel_de_write_fw(dev_priv, DSPLINOFF(i9xx_plane),
4406                                   linear_offset);
4407                 intel_de_write_fw(dev_priv, DSPTILEOFF(i9xx_plane),
4408                                   (y << 16) | x);
4409         }
4410
4411         /*
4412          * The control register self-arms if the plane was previously
4413          * disabled. Try to make the plane enable atomic by writing
4414          * the control register just before the surface register.
4415          */
4416         intel_de_write_fw(dev_priv, DSPCNTR(i9xx_plane), dspcntr);
4417         if (INTEL_GEN(dev_priv) >= 4)
4418                 intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane),
4419                                   intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
4420         else
4421                 intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane),
4422                                   intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
4423
4424         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
4425 }
4426
4427 static void i9xx_disable_plane(struct intel_plane *plane,
4428                                const struct intel_crtc_state *crtc_state)
4429 {
4430         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
4431         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
4432         unsigned long irqflags;
4433         u32 dspcntr;
4434
4435         /*
4436          * DSPCNTR pipe gamma enable on g4x+ and pipe csc
4437          * enable on ilk+ affect the pipe bottom color as
4438          * well, so we must configure them even if the plane
4439          * is disabled.
4440          *
4441          * On pre-g4x there is no way to gamma correct the
4442          * pipe bottom color but we'll keep on doing this
4443          * anyway so that the crtc state readout works correctly.
4444          */
4445         dspcntr = i9xx_plane_ctl_crtc(crtc_state);
4446
4447         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
4448
4449         intel_de_write_fw(dev_priv, DSPCNTR(i9xx_plane), dspcntr);
4450         if (INTEL_GEN(dev_priv) >= 4)
4451                 intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane), 0);
4452         else
4453                 intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane), 0);
4454
4455         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
4456 }
4457
4458 static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
4459                                     enum pipe *pipe)
4460 {
4461         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
4462         enum intel_display_power_domain power_domain;
4463         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
4464         intel_wakeref_t wakeref;
4465         bool ret;
4466         u32 val;
4467
4468         /*
4469          * Not 100% correct for planes that can move between pipes,
4470          * but that's only the case for gen2-4 which don't have any
4471          * display power wells.
4472          */
4473         power_domain = POWER_DOMAIN_PIPE(plane->pipe);
4474         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
4475         if (!wakeref)
4476                 return false;
4477
4478         val = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
4479
4480         ret = val & DISPLAY_PLANE_ENABLE;
4481
4482         if (INTEL_GEN(dev_priv) >= 5)
4483                 *pipe = plane->pipe;
4484         else
4485                 *pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
4486                         DISPPLANE_SEL_PIPE_SHIFT;
4487
4488         intel_display_power_put(dev_priv, power_domain, wakeref);
4489
4490         return ret;
4491 }
4492
4493 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
4494 {
4495         struct drm_device *dev = intel_crtc->base.dev;
4496         struct drm_i915_private *dev_priv = to_i915(dev);
4497
4498         intel_de_write(dev_priv, SKL_PS_CTRL(intel_crtc->pipe, id), 0);
4499         intel_de_write(dev_priv, SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
4500         intel_de_write(dev_priv, SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
4501 }
4502
4503 /*
4504  * This function detaches (aka. unbinds) unused scalers in hardware
4505  */
4506 static void skl_detach_scalers(const struct intel_crtc_state *crtc_state)
4507 {
4508         struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
4509         const struct intel_crtc_scaler_state *scaler_state =
4510                 &crtc_state->scaler_state;
4511         int i;
4512
4513         /* loop through and disable scalers that aren't in use */
4514         for (i = 0; i < intel_crtc->num_scalers; i++) {
4515                 if (!scaler_state->scalers[i].in_use)
4516                         skl_detach_scaler(intel_crtc, i);
4517         }
4518 }
4519
4520 static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb,
4521                                           int color_plane, unsigned int rotation)
4522 {
4523         /*
4524          * The stride is either expressed as a multiple of 64 bytes chunks for
4525          * linear buffers or in number of tiles for tiled buffers.
4526          */
4527         if (is_surface_linear(fb, color_plane))
4528                 return 64;
4529         else if (drm_rotation_90_or_270(rotation))
4530                 return intel_tile_height(fb, color_plane);
4531         else
4532                 return intel_tile_width_bytes(fb, color_plane);
4533 }
4534
4535 u32 skl_plane_stride(const struct intel_plane_state *plane_state,
4536                      int color_plane)
4537 {
4538         const struct drm_framebuffer *fb = plane_state->hw.fb;
4539         unsigned int rotation = plane_state->hw.rotation;
4540         u32 stride = plane_state->color_plane[color_plane].stride;
4541
4542         if (color_plane >= fb->format->num_planes)
4543                 return 0;
4544
4545         return stride / skl_plane_stride_mult(fb, color_plane, rotation);
4546 }
4547
4548 static u32 skl_plane_ctl_format(u32 pixel_format)
4549 {
4550         switch (pixel_format) {
4551         case DRM_FORMAT_C8:
4552                 return PLANE_CTL_FORMAT_INDEXED;
4553         case DRM_FORMAT_RGB565:
4554                 return PLANE_CTL_FORMAT_RGB_565;
4555         case DRM_FORMAT_XBGR8888:
4556         case DRM_FORMAT_ABGR8888:
4557                 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
4558         case DRM_FORMAT_XRGB8888:
4559         case DRM_FORMAT_ARGB8888:
4560                 return PLANE_CTL_FORMAT_XRGB_8888;
4561         case DRM_FORMAT_XBGR2101010:
4562         case DRM_FORMAT_ABGR2101010:
4563                 return PLANE_CTL_FORMAT_XRGB_2101010 | PLANE_CTL_ORDER_RGBX;
4564         case DRM_FORMAT_XRGB2101010:
4565         case DRM_FORMAT_ARGB2101010:
4566                 return PLANE_CTL_FORMAT_XRGB_2101010;
4567         case DRM_FORMAT_XBGR16161616F:
4568         case DRM_FORMAT_ABGR16161616F:
4569                 return PLANE_CTL_FORMAT_XRGB_16161616F | PLANE_CTL_ORDER_RGBX;
4570         case DRM_FORMAT_XRGB16161616F:
4571         case DRM_FORMAT_ARGB16161616F:
4572                 return PLANE_CTL_FORMAT_XRGB_16161616F;
4573         case DRM_FORMAT_YUYV:
4574                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
4575         case DRM_FORMAT_YVYU:
4576                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
4577         case DRM_FORMAT_UYVY:
4578                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
4579         case DRM_FORMAT_VYUY:
4580                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
4581         case DRM_FORMAT_NV12:
4582                 return PLANE_CTL_FORMAT_NV12;
4583         case DRM_FORMAT_P010:
4584                 return PLANE_CTL_FORMAT_P010;
4585         case DRM_FORMAT_P012:
4586                 return PLANE_CTL_FORMAT_P012;
4587         case DRM_FORMAT_P016:
4588                 return PLANE_CTL_FORMAT_P016;
4589         case DRM_FORMAT_Y210:
4590                 return PLANE_CTL_FORMAT_Y210;
4591         case DRM_FORMAT_Y212:
4592                 return PLANE_CTL_FORMAT_Y212;
4593         case DRM_FORMAT_Y216:
4594                 return PLANE_CTL_FORMAT_Y216;
4595         case DRM_FORMAT_XVYU2101010:
4596                 return PLANE_CTL_FORMAT_Y410;
4597         case DRM_FORMAT_XVYU12_16161616:
4598                 return PLANE_CTL_FORMAT_Y412;
4599         case DRM_FORMAT_XVYU16161616:
4600                 return PLANE_CTL_FORMAT_Y416;
4601         default:
4602                 MISSING_CASE(pixel_format);
4603         }
4604
4605         return 0;
4606 }
4607
4608 static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state)
4609 {
4610         if (!plane_state->hw.fb->format->has_alpha)
4611                 return PLANE_CTL_ALPHA_DISABLE;
4612
4613         switch (plane_state->hw.pixel_blend_mode) {
4614         case DRM_MODE_BLEND_PIXEL_NONE:
4615                 return PLANE_CTL_ALPHA_DISABLE;
4616         case DRM_MODE_BLEND_PREMULTI:
4617                 return PLANE_CTL_ALPHA_SW_PREMULTIPLY;
4618         case DRM_MODE_BLEND_COVERAGE:
4619                 return PLANE_CTL_ALPHA_HW_PREMULTIPLY;
4620         default:
4621                 MISSING_CASE(plane_state->hw.pixel_blend_mode);
4622                 return PLANE_CTL_ALPHA_DISABLE;
4623         }
4624 }
4625
4626 static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state)
4627 {
4628         if (!plane_state->hw.fb->format->has_alpha)
4629                 return PLANE_COLOR_ALPHA_DISABLE;
4630
4631         switch (plane_state->hw.pixel_blend_mode) {
4632         case DRM_MODE_BLEND_PIXEL_NONE:
4633                 return PLANE_COLOR_ALPHA_DISABLE;
4634         case DRM_MODE_BLEND_PREMULTI:
4635                 return PLANE_COLOR_ALPHA_SW_PREMULTIPLY;
4636         case DRM_MODE_BLEND_COVERAGE:
4637                 return PLANE_COLOR_ALPHA_HW_PREMULTIPLY;
4638         default:
4639                 MISSING_CASE(plane_state->hw.pixel_blend_mode);
4640                 return PLANE_COLOR_ALPHA_DISABLE;
4641         }
4642 }
4643
4644 static u32 skl_plane_ctl_tiling(u64 fb_modifier)
4645 {
4646         switch (fb_modifier) {
4647         case DRM_FORMAT_MOD_LINEAR:
4648                 break;
4649         case I915_FORMAT_MOD_X_TILED:
4650                 return PLANE_CTL_TILED_X;
4651         case I915_FORMAT_MOD_Y_TILED:
4652                 return PLANE_CTL_TILED_Y;
4653         case I915_FORMAT_MOD_Y_TILED_CCS:
4654                 return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
4655         case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
4656                 return PLANE_CTL_TILED_Y |
4657                        PLANE_CTL_RENDER_DECOMPRESSION_ENABLE |
4658                        PLANE_CTL_CLEAR_COLOR_DISABLE;
4659         case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
4660                 return PLANE_CTL_TILED_Y | PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE;
4661         case I915_FORMAT_MOD_Yf_TILED:
4662                 return PLANE_CTL_TILED_YF;
4663         case I915_FORMAT_MOD_Yf_TILED_CCS:
4664                 return PLANE_CTL_TILED_YF | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
4665         default:
4666                 MISSING_CASE(fb_modifier);
4667         }
4668
4669         return 0;
4670 }
4671
4672 static u32 skl_plane_ctl_rotate(unsigned int rotate)
4673 {
4674         switch (rotate) {
4675         case DRM_MODE_ROTATE_0:
4676                 break;
4677         /*
4678          * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
4679          * while i915 HW rotation is clockwise, thats why this swapping.
4680          */
4681         case DRM_MODE_ROTATE_90:
4682                 return PLANE_CTL_ROTATE_270;
4683         case DRM_MODE_ROTATE_180:
4684                 return PLANE_CTL_ROTATE_180;
4685         case DRM_MODE_ROTATE_270:
4686                 return PLANE_CTL_ROTATE_90;
4687         default:
4688                 MISSING_CASE(rotate);
4689         }
4690
4691         return 0;
4692 }
4693
4694 static u32 cnl_plane_ctl_flip(unsigned int reflect)
4695 {
4696         switch (reflect) {
4697         case 0:
4698                 break;
4699         case DRM_MODE_REFLECT_X:
4700                 return PLANE_CTL_FLIP_HORIZONTAL;
4701         case DRM_MODE_REFLECT_Y:
4702         default:
4703                 MISSING_CASE(reflect);
4704         }
4705
4706         return 0;
4707 }
4708
4709 u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
4710 {
4711         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
4712         u32 plane_ctl = 0;
4713
4714         if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
4715                 return plane_ctl;
4716
4717         if (crtc_state->gamma_enable)
4718                 plane_ctl |= PLANE_CTL_PIPE_GAMMA_ENABLE;
4719
4720         if (crtc_state->csc_enable)
4721                 plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE;
4722
4723         return plane_ctl;
4724 }
4725
4726 u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
4727                   const struct intel_plane_state *plane_state)
4728 {
4729         struct drm_i915_private *dev_priv =
4730                 to_i915(plane_state->uapi.plane->dev);
4731         const struct drm_framebuffer *fb = plane_state->hw.fb;
4732         unsigned int rotation = plane_state->hw.rotation;
4733         const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
4734         u32 plane_ctl;
4735
4736         plane_ctl = PLANE_CTL_ENABLE;
4737
4738         if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) {
4739                 plane_ctl |= skl_plane_ctl_alpha(plane_state);
4740                 plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
4741
4742                 if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709)
4743                         plane_ctl |= PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709;
4744
4745                 if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
4746                         plane_ctl |= PLANE_CTL_YUV_RANGE_CORRECTION_DISABLE;
4747         }
4748
4749         plane_ctl |= skl_plane_ctl_format(fb->format->format);
4750         plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
4751         plane_ctl |= skl_plane_ctl_rotate(rotation & DRM_MODE_ROTATE_MASK);
4752
4753         if (INTEL_GEN(dev_priv) >= 10)
4754                 plane_ctl |= cnl_plane_ctl_flip(rotation &
4755                                                 DRM_MODE_REFLECT_MASK);
4756
4757         if (key->flags & I915_SET_COLORKEY_DESTINATION)
4758                 plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
4759         else if (key->flags & I915_SET_COLORKEY_SOURCE)
4760                 plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
4761
4762         return plane_ctl;
4763 }
4764
4765 u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state)
4766 {
4767         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
4768         u32 plane_color_ctl = 0;
4769
4770         if (INTEL_GEN(dev_priv) >= 11)
4771                 return plane_color_ctl;
4772
4773         if (crtc_state->gamma_enable)
4774                 plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE;
4775
4776         if (crtc_state->csc_enable)
4777                 plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
4778
4779         return plane_color_ctl;
4780 }
4781
4782 u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
4783                         const struct intel_plane_state *plane_state)
4784 {
4785         struct drm_i915_private *dev_priv =
4786                 to_i915(plane_state->uapi.plane->dev);
4787         const struct drm_framebuffer *fb = plane_state->hw.fb;
4788         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
4789         u32 plane_color_ctl = 0;
4790
4791         plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
4792         plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state);
4793
4794         if (fb->format->is_yuv && !icl_is_hdr_plane(dev_priv, plane->id)) {
4795                 if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709)
4796                         plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
4797                 else
4798                         plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV601_TO_RGB709;
4799
4800                 if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
4801                         plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
4802         } else if (fb->format->is_yuv) {
4803                 plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE;
4804         }
4805
4806         return plane_color_ctl;
4807 }
4808
4809 static int
4810 __intel_display_resume(struct drm_device *dev,
4811                        struct drm_atomic_state *state,
4812                        struct drm_modeset_acquire_ctx *ctx)
4813 {
4814         struct drm_crtc_state *crtc_state;
4815         struct drm_crtc *crtc;
4816         int i, ret;
4817
4818         intel_modeset_setup_hw_state(dev, ctx);
4819         intel_vga_redisable(to_i915(dev));
4820
4821         if (!state)
4822                 return 0;
4823
4824         /*
4825          * We've duplicated the state, pointers to the old state are invalid.
4826          *
4827          * Don't attempt to use the old state until we commit the duplicated state.
4828          */
4829         for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
4830                 /*
4831                  * Force recalculation even if we restore
4832                  * current state. With fast modeset this may not result
4833                  * in a modeset when the state is compatible.
4834                  */
4835                 crtc_state->mode_changed = true;
4836         }
4837
4838         /* ignore any reset values/BIOS leftovers in the WM registers */
4839         if (!HAS_GMCH(to_i915(dev)))
4840                 to_intel_atomic_state(state)->skip_intermediate_wm = true;
4841
4842         ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
4843
4844         WARN_ON(ret == -EDEADLK);
4845         return ret;
4846 }
4847
4848 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
4849 {
4850         return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
4851                 intel_has_gpu_reset(&dev_priv->gt));
4852 }
4853
4854 void intel_prepare_reset(struct drm_i915_private *dev_priv)
4855 {
4856         struct drm_device *dev = &dev_priv->drm;
4857         struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
4858         struct drm_atomic_state *state;
4859         int ret;
4860
4861         /* reset doesn't touch the display */
4862         if (!i915_modparams.force_reset_modeset_test &&
4863             !gpu_reset_clobbers_display(dev_priv))
4864                 return;
4865
4866         /* We have a modeset vs reset deadlock, defensively unbreak it. */
4867         set_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
4868         smp_mb__after_atomic();
4869         wake_up_bit(&dev_priv->gt.reset.flags, I915_RESET_MODESET);
4870
4871         if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
4872                 drm_dbg_kms(&dev_priv->drm,
4873                             "Modeset potentially stuck, unbreaking through wedging\n");
4874                 intel_gt_set_wedged(&dev_priv->gt);
4875         }
4876
4877         /*
4878          * Need mode_config.mutex so that we don't
4879          * trample ongoing ->detect() and whatnot.
4880          */
4881         mutex_lock(&dev->mode_config.mutex);
4882         drm_modeset_acquire_init(ctx, 0);
4883         while (1) {
4884                 ret = drm_modeset_lock_all_ctx(dev, ctx);
4885                 if (ret != -EDEADLK)
4886                         break;
4887
4888                 drm_modeset_backoff(ctx);
4889         }
4890         /*
4891          * Disabling the crtcs gracefully seems nicer. Also the
4892          * g33 docs say we should at least disable all the planes.
4893          */
4894         state = drm_atomic_helper_duplicate_state(dev, ctx);
4895         if (IS_ERR(state)) {
4896                 ret = PTR_ERR(state);
4897                 drm_err(&dev_priv->drm, "Duplicating state failed with %i\n",
4898                         ret);
4899                 return;
4900         }
4901
4902         ret = drm_atomic_helper_disable_all(dev, ctx);
4903         if (ret) {
4904                 drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
4905                         ret);
4906                 drm_atomic_state_put(state);
4907                 return;
4908         }
4909
4910         dev_priv->modeset_restore_state = state;
4911         state->acquire_ctx = ctx;
4912 }
4913
4914 void intel_finish_reset(struct drm_i915_private *dev_priv)
4915 {
4916         struct drm_device *dev = &dev_priv->drm;
4917         struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
4918         struct drm_atomic_state *state;
4919         int ret;
4920
4921         /* reset doesn't touch the display */
4922         if (!test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
4923                 return;
4924
4925         state = fetch_and_zero(&dev_priv->modeset_restore_state);
4926         if (!state)
4927                 goto unlock;
4928
4929         /* reset doesn't touch the display */
4930         if (!gpu_reset_clobbers_display(dev_priv)) {
4931                 /* for testing only restore the display */
4932                 ret = __intel_display_resume(dev, state, ctx);
4933                 if (ret)
4934                         drm_err(&dev_priv->drm,
4935                                 "Restoring old state failed with %i\n", ret);
4936         } else {
4937                 /*
4938                  * The display has been reset as well,
4939                  * so need a full re-initialization.
4940                  */
4941                 intel_pps_unlock_regs_wa(dev_priv);
4942                 intel_modeset_init_hw(dev_priv);
4943                 intel_init_clock_gating(dev_priv);
4944
4945                 spin_lock_irq(&dev_priv->irq_lock);
4946                 if (dev_priv->display.hpd_irq_setup)
4947                         dev_priv->display.hpd_irq_setup(dev_priv);
4948                 spin_unlock_irq(&dev_priv->irq_lock);
4949
4950                 ret = __intel_display_resume(dev, state, ctx);
4951                 if (ret)
4952                         drm_err(&dev_priv->drm,
4953                                 "Restoring old state failed with %i\n", ret);
4954
4955                 intel_hpd_init(dev_priv);
4956         }
4957
4958         drm_atomic_state_put(state);
4959 unlock:
4960         drm_modeset_drop_locks(ctx);
4961         drm_modeset_acquire_fini(ctx);
4962         mutex_unlock(&dev->mode_config.mutex);
4963
4964         clear_bit_unlock(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
4965 }
4966
4967 static void icl_set_pipe_chicken(struct intel_crtc *crtc)
4968 {
4969         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4970         enum pipe pipe = crtc->pipe;
4971         u32 tmp;
4972
4973         tmp = intel_de_read(dev_priv, PIPE_CHICKEN(pipe));
4974
4975         /*
4976          * Display WA #1153: icl
4977          * enable hardware to bypass the alpha math
4978          * and rounding for per-pixel values 00 and 0xff
4979          */
4980         tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
4981         /*
4982          * Display WA # 1605353570: icl
4983          * Set the pixel rounding bit to 1 for allowing
4984          * passthrough of Frame buffer pixels unmodified
4985          * across pipe
4986          */
4987         tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
4988         intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp);
4989 }
4990
4991 static void icl_enable_trans_port_sync(const struct intel_crtc_state *crtc_state)
4992 {
4993         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4994         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4995         u32 trans_ddi_func_ctl2_val;
4996         u8 master_select;
4997
4998         /*
4999          * Configure the master select and enable Transcoder Port Sync for
5000          * Slave CRTCs transcoder.
5001          */
5002         if (crtc_state->master_transcoder == INVALID_TRANSCODER)
5003                 return;
5004
5005         if (crtc_state->master_transcoder == TRANSCODER_EDP)
5006                 master_select = 0;
5007         else
5008                 master_select = crtc_state->master_transcoder + 1;
5009
5010         /* Set the master select bits for Tranascoder Port Sync */
5011         trans_ddi_func_ctl2_val = (PORT_SYNC_MODE_MASTER_SELECT(master_select) &
5012                                    PORT_SYNC_MODE_MASTER_SELECT_MASK) <<
5013                 PORT_SYNC_MODE_MASTER_SELECT_SHIFT;
5014         /* Enable Transcoder Port Sync */
5015         trans_ddi_func_ctl2_val |= PORT_SYNC_MODE_ENABLE;
5016
5017         intel_de_write(dev_priv,
5018                        TRANS_DDI_FUNC_CTL2(crtc_state->cpu_transcoder),
5019                        trans_ddi_func_ctl2_val);
5020 }
5021
5022 static void intel_fdi_normal_train(struct intel_crtc *crtc)
5023 {
5024         struct drm_device *dev = crtc->base.dev;
5025         struct drm_i915_private *dev_priv = to_i915(dev);
5026         enum pipe pipe = crtc->pipe;
5027         i915_reg_t reg;
5028         u32 temp;
5029
5030         /* enable normal train */
5031         reg = FDI_TX_CTL(pipe);
5032         temp = intel_de_read(dev_priv, reg);
5033         if (IS_IVYBRIDGE(dev_priv)) {
5034                 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
5035                 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
5036         } else {
5037                 temp &= ~FDI_LINK_TRAIN_NONE;
5038                 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
5039         }
5040         intel_de_write(dev_priv, reg, temp);
5041
5042         reg = FDI_RX_CTL(pipe);
5043         temp = intel_de_read(dev_priv, reg);
5044         if (HAS_PCH_CPT(dev_priv)) {
5045                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
5046                 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
5047         } else {
5048                 temp &= ~FDI_LINK_TRAIN_NONE;
5049                 temp |= FDI_LINK_TRAIN_NONE;
5050         }
5051         intel_de_write(dev_priv, reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
5052
5053         /* wait one idle pattern time */
5054         intel_de_posting_read(dev_priv, reg);
5055         udelay(1000);
5056
5057         /* IVB wants error correction enabled */
5058         if (IS_IVYBRIDGE(dev_priv))
5059                 intel_de_write(dev_priv, reg,
5060                                intel_de_read(dev_priv, reg) | FDI_FS_ERRC_ENABLE | FDI_FE_ERRC_ENABLE);
5061 }
5062
5063 /* The FDI link training functions for ILK/Ibexpeak. */
5064 static void ilk_fdi_link_train(struct intel_crtc *crtc,
5065                                const struct intel_crtc_state *crtc_state)
5066 {
5067         struct drm_device *dev = crtc->base.dev;
5068         struct drm_i915_private *dev_priv = to_i915(dev);
5069         enum pipe pipe = crtc->pipe;
5070         i915_reg_t reg;
5071         u32 temp, tries;
5072
5073         /* FDI needs bits from pipe first */
5074         assert_pipe_enabled(dev_priv, crtc_state->cpu_transcoder);
5075
5076         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
5077            for train result */
5078         reg = FDI_RX_IMR(pipe);
5079         temp = intel_de_read(dev_priv, reg);
5080         temp &= ~FDI_RX_SYMBOL_LOCK;
5081         temp &= ~FDI_RX_BIT_LOCK;
5082         intel_de_write(dev_priv, reg, temp);
5083         intel_de_read(dev_priv, reg);
5084         udelay(150);
5085
5086         /* enable CPU FDI TX and PCH FDI RX */
5087         reg = FDI_TX_CTL(pipe);
5088         temp = intel_de_read(dev_priv, reg);
5089         temp &= ~FDI_DP_PORT_WIDTH_MASK;
5090         temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
5091         temp &= ~FDI_LINK_TRAIN_NONE;
5092         temp |= FDI_LINK_TRAIN_PATTERN_1;
5093         intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
5094
5095         reg = FDI_RX_CTL(pipe);
5096         temp = intel_de_read(dev_priv, reg);
5097         temp &= ~FDI_LINK_TRAIN_NONE;
5098         temp |= FDI_LINK_TRAIN_PATTERN_1;
5099         intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
5100
5101         intel_de_posting_read(dev_priv, reg);
5102         udelay(150);
5103
5104         /* Ironlake workaround, enable clock pointer after FDI enable*/
5105         intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
5106                        FDI_RX_PHASE_SYNC_POINTER_OVR);
5107         intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
5108                        FDI_RX_PHASE_SYNC_POINTER_OVR | FDI_RX_PHASE_SYNC_POINTER_EN);
5109
5110         reg = FDI_RX_IIR(pipe);
5111         for (tries = 0; tries < 5; tries++) {
5112                 temp = intel_de_read(dev_priv, reg);
5113                 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
5114
5115                 if ((temp & FDI_RX_BIT_LOCK)) {
5116                         drm_dbg_kms(&dev_priv->drm, "FDI train 1 done.\n");
5117                         intel_de_write(dev_priv, reg, temp | FDI_RX_BIT_LOCK);
5118                         break;
5119                 }
5120         }
5121         if (tries == 5)
5122                 drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
5123
5124         /* Train 2 */
5125         reg = FDI_TX_CTL(pipe);
5126         temp = intel_de_read(dev_priv, reg);
5127         temp &= ~FDI_LINK_TRAIN_NONE;
5128         temp |= FDI_LINK_TRAIN_PATTERN_2;
5129         intel_de_write(dev_priv, reg, temp);
5130
5131         reg = FDI_RX_CTL(pipe);
5132         temp = intel_de_read(dev_priv, reg);
5133         temp &= ~FDI_LINK_TRAIN_NONE;
5134         temp |= FDI_LINK_TRAIN_PATTERN_2;
5135         intel_de_write(dev_priv, reg, temp);
5136
5137         intel_de_posting_read(dev_priv, reg);
5138         udelay(150);
5139
5140         reg = FDI_RX_IIR(pipe);
5141         for (tries = 0; tries < 5; tries++) {
5142                 temp = intel_de_read(dev_priv, reg);
5143                 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
5144
5145                 if (temp & FDI_RX_SYMBOL_LOCK) {
5146                         intel_de_write(dev_priv, reg,
5147                                        temp | FDI_RX_SYMBOL_LOCK);
5148                         drm_dbg_kms(&dev_priv->drm, "FDI train 2 done.\n");
5149                         break;
5150                 }
5151         }
5152         if (tries == 5)
5153                 drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
5154
5155         drm_dbg_kms(&dev_priv->drm, "FDI train done\n");
5156
5157 }
5158
5159 static const int snb_b_fdi_train_param[] = {
5160         FDI_LINK_TRAIN_400MV_0DB_SNB_B,
5161         FDI_LINK_TRAIN_400MV_6DB_SNB_B,
5162         FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
5163         FDI_LINK_TRAIN_800MV_0DB_SNB_B,
5164 };
5165
5166 /* The FDI link training functions for SNB/Cougarpoint. */
5167 static void gen6_fdi_link_train(struct intel_crtc *crtc,
5168                                 const struct intel_crtc_state *crtc_state)
5169 {
5170         struct drm_device *dev = crtc->base.dev;
5171         struct drm_i915_private *dev_priv = to_i915(dev);
5172         enum pipe pipe = crtc->pipe;
5173         i915_reg_t reg;
5174         u32 temp, i, retry;
5175
5176         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
5177            for train result */
5178         reg = FDI_RX_IMR(pipe);
5179         temp = intel_de_read(dev_priv, reg);
5180         temp &= ~FDI_RX_SYMBOL_LOCK;
5181         temp &= ~FDI_RX_BIT_LOCK;
5182         intel_de_write(dev_priv, reg, temp);
5183
5184         intel_de_posting_read(dev_priv, reg);
5185         udelay(150);
5186
5187         /* enable CPU FDI TX and PCH FDI RX */
5188         reg = FDI_TX_CTL(pipe);
5189         temp = intel_de_read(dev_priv, reg);
5190         temp &= ~FDI_DP_PORT_WIDTH_MASK;
5191         temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
5192         temp &= ~FDI_LINK_TRAIN_NONE;
5193         temp |= FDI_LINK_TRAIN_PATTERN_1;
5194         temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
5195         /* SNB-B */
5196         temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
5197         intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
5198
5199         intel_de_write(dev_priv, FDI_RX_MISC(pipe),
5200                        FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
5201
5202         reg = FDI_RX_CTL(pipe);
5203         temp = intel_de_read(dev_priv, reg);
5204         if (HAS_PCH_CPT(dev_priv)) {
5205                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
5206                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
5207         } else {
5208                 temp &= ~FDI_LINK_TRAIN_NONE;
5209                 temp |= FDI_LINK_TRAIN_PATTERN_1;
5210         }
5211         intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
5212
5213         intel_de_posting_read(dev_priv, reg);
5214         udelay(150);
5215
5216         for (i = 0; i < 4; i++) {
5217                 reg = FDI_TX_CTL(pipe);
5218                 temp = intel_de_read(dev_priv, reg);
5219                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
5220                 temp |= snb_b_fdi_train_param[i];
5221                 intel_de_write(dev_priv, reg, temp);
5222
5223                 intel_de_posting_read(dev_priv, reg);
5224                 udelay(500);
5225
5226                 for (retry = 0; retry < 5; retry++) {
5227                         reg = FDI_RX_IIR(pipe);
5228                         temp = intel_de_read(dev_priv, reg);
5229                         drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
5230                         if (temp & FDI_RX_BIT_LOCK) {
5231                                 intel_de_write(dev_priv, reg,
5232                                                temp | FDI_RX_BIT_LOCK);
5233                                 drm_dbg_kms(&dev_priv->drm,
5234                                             "FDI train 1 done.\n");
5235                                 break;
5236                         }
5237                         udelay(50);
5238                 }
5239                 if (retry < 5)
5240                         break;
5241         }
5242         if (i == 4)
5243                 drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
5244
5245         /* Train 2 */
5246         reg = FDI_TX_CTL(pipe);
5247         temp = intel_de_read(dev_priv, reg);
5248         temp &= ~FDI_LINK_TRAIN_NONE;
5249         temp |= FDI_LINK_TRAIN_PATTERN_2;
5250         if (IS_GEN(dev_priv, 6)) {
5251                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
5252                 /* SNB-B */
5253                 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
5254         }
5255         intel_de_write(dev_priv, reg, temp);
5256
5257         reg = FDI_RX_CTL(pipe);
5258         temp = intel_de_read(dev_priv, reg);
5259         if (HAS_PCH_CPT(dev_priv)) {
5260                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
5261                 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
5262         } else {
5263                 temp &= ~FDI_LINK_TRAIN_NONE;
5264                 temp |= FDI_LINK_TRAIN_PATTERN_2;
5265         }
5266         intel_de_write(dev_priv, reg, temp);
5267
5268         intel_de_posting_read(dev_priv, reg);
5269         udelay(150);
5270
5271         for (i = 0; i < 4; i++) {
5272                 reg = FDI_TX_CTL(pipe);
5273                 temp = intel_de_read(dev_priv, reg);
5274                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
5275                 temp |= snb_b_fdi_train_param[i];
5276                 intel_de_write(dev_priv, reg, temp);
5277
5278                 intel_de_posting_read(dev_priv, reg);
5279                 udelay(500);
5280
5281                 for (retry = 0; retry < 5; retry++) {
5282                         reg = FDI_RX_IIR(pipe);
5283                         temp = intel_de_read(dev_priv, reg);
5284                         drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
5285                         if (temp & FDI_RX_SYMBOL_LOCK) {
5286                                 intel_de_write(dev_priv, reg,
5287                                                temp | FDI_RX_SYMBOL_LOCK);
5288                                 drm_dbg_kms(&dev_priv->drm,
5289                                             "FDI train 2 done.\n");
5290                                 break;
5291                         }
5292                         udelay(50);
5293                 }
5294                 if (retry < 5)
5295                         break;
5296         }
5297         if (i == 4)
5298                 drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
5299
5300         drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
5301 }
5302
5303 /* Manual link training for Ivy Bridge A0 parts */
5304 static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
5305                                       const struct intel_crtc_state *crtc_state)
5306 {
5307         struct drm_device *dev = crtc->base.dev;
5308         struct drm_i915_private *dev_priv = to_i915(dev);
5309         enum pipe pipe = crtc->pipe;
5310         i915_reg_t reg;
5311         u32 temp, i, j;
5312
5313         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
5314            for train result */
5315         reg = FDI_RX_IMR(pipe);
5316         temp = intel_de_read(dev_priv, reg);
5317         temp &= ~FDI_RX_SYMBOL_LOCK;
5318         temp &= ~FDI_RX_BIT_LOCK;
5319         intel_de_write(dev_priv, reg, temp);
5320
5321         intel_de_posting_read(dev_priv, reg);
5322         udelay(150);
5323
5324         drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR before link train 0x%x\n",
5325                     intel_de_read(dev_priv, FDI_RX_IIR(pipe)));
5326
5327         /* Try each vswing and preemphasis setting twice before moving on */
5328         for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
5329                 /* disable first in case we need to retry */
5330                 reg = FDI_TX_CTL(pipe);
5331                 temp = intel_de_read(dev_priv, reg);
5332                 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
5333                 temp &= ~FDI_TX_ENABLE;
5334                 intel_de_write(dev_priv, reg, temp);
5335
5336                 reg = FDI_RX_CTL(pipe);
5337                 temp = intel_de_read(dev_priv, reg);
5338                 temp &= ~FDI_LINK_TRAIN_AUTO;
5339                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
5340                 temp &= ~FDI_RX_ENABLE;
5341                 intel_de_write(dev_priv, reg, temp);
5342
5343                 /* enable CPU FDI TX and PCH FDI RX */
5344                 reg = FDI_TX_CTL(pipe);
5345                 temp = intel_de_read(dev_priv, reg);
5346                 temp &= ~FDI_DP_PORT_WIDTH_MASK;
5347                 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
5348                 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
5349                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
5350                 temp |= snb_b_fdi_train_param[j/2];
5351                 temp |= FDI_COMPOSITE_SYNC;
5352                 intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
5353
5354                 intel_de_write(dev_priv, FDI_RX_MISC(pipe),
5355                                FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
5356
5357                 reg = FDI_RX_CTL(pipe);
5358                 temp = intel_de_read(dev_priv, reg);
5359                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
5360                 temp |= FDI_COMPOSITE_SYNC;
5361                 intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
5362
5363                 intel_de_posting_read(dev_priv, reg);
5364                 udelay(1); /* should be 0.5us */
5365
5366                 for (i = 0; i < 4; i++) {
5367                         reg = FDI_RX_IIR(pipe);
5368                         temp = intel_de_read(dev_priv, reg);
5369                         drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
5370
5371                         if (temp & FDI_RX_BIT_LOCK ||
5372                             (intel_de_read(dev_priv, reg) & FDI_RX_BIT_LOCK)) {
5373                                 intel_de_write(dev_priv, reg,
5374                                                temp | FDI_RX_BIT_LOCK);
5375                                 drm_dbg_kms(&dev_priv->drm,
5376                                             "FDI train 1 done, level %i.\n",
5377                                             i);
5378                                 break;
5379                         }
5380                         udelay(1); /* should be 0.5us */
5381                 }
5382                 if (i == 4) {
5383                         drm_dbg_kms(&dev_priv->drm,
5384                                     "FDI train 1 fail on vswing %d\n", j / 2);
5385                         continue;
5386                 }
5387
5388                 /* Train 2 */
5389                 reg = FDI_TX_CTL(pipe);
5390                 temp = intel_de_read(dev_priv, reg);
5391                 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
5392                 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
5393                 intel_de_write(dev_priv, reg, temp);
5394
5395                 reg = FDI_RX_CTL(pipe);
5396                 temp = intel_de_read(dev_priv, reg);
5397                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
5398                 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
5399                 intel_de_write(dev_priv, reg, temp);
5400
5401                 intel_de_posting_read(dev_priv, reg);
5402                 udelay(2); /* should be 1.5us */
5403
5404                 for (i = 0; i < 4; i++) {
5405                         reg = FDI_RX_IIR(pipe);
5406                         temp = intel_de_read(dev_priv, reg);
5407                         drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
5408
5409                         if (temp & FDI_RX_SYMBOL_LOCK ||
5410                             (intel_de_read(dev_priv, reg) & FDI_RX_SYMBOL_LOCK)) {
5411                                 intel_de_write(dev_priv, reg,
5412                                                temp | FDI_RX_SYMBOL_LOCK);
5413                                 drm_dbg_kms(&dev_priv->drm,
5414                                             "FDI train 2 done, level %i.\n",
5415                                             i);
5416                                 goto train_done;
5417                         }
5418                         udelay(2); /* should be 1.5us */
5419                 }
5420                 if (i == 4)
5421                         drm_dbg_kms(&dev_priv->drm,
5422                                     "FDI train 2 fail on vswing %d\n", j / 2);
5423         }
5424
5425 train_done:
5426         drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
5427 }
5428
5429 static void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
5430 {
5431         struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
5432         struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
5433         enum pipe pipe = intel_crtc->pipe;
5434         i915_reg_t reg;
5435         u32 temp;
5436
5437         /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
5438         reg = FDI_RX_CTL(pipe);
5439         temp = intel_de_read(dev_priv, reg);
5440         temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
5441         temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
5442         temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
5443         intel_de_write(dev_priv, reg, temp | FDI_RX_PLL_ENABLE);
5444
5445         intel_de_posting_read(dev_priv, reg);
5446         udelay(200);
5447
5448         /* Switch from Rawclk to PCDclk */
5449         temp = intel_de_read(dev_priv, reg);
5450         intel_de_write(dev_priv, reg, temp | FDI_PCDCLK);
5451
5452         intel_de_posting_read(dev_priv, reg);
5453         udelay(200);
5454
5455         /* Enable CPU FDI TX PLL, always on for Ironlake */
5456         reg = FDI_TX_CTL(pipe);
5457         temp = intel_de_read(dev_priv, reg);
5458         if ((temp & FDI_TX_PLL_ENABLE) == 0) {
5459                 intel_de_write(dev_priv, reg, temp | FDI_TX_PLL_ENABLE);
5460
5461                 intel_de_posting_read(dev_priv, reg);
5462                 udelay(100);
5463         }
5464 }
5465
5466 static void ilk_fdi_pll_disable(struct intel_crtc *intel_crtc)
5467 {
5468         struct drm_device *dev = intel_crtc->base.dev;
5469         struct drm_i915_private *dev_priv = to_i915(dev);
5470         enum pipe pipe = intel_crtc->pipe;
5471         i915_reg_t reg;
5472         u32 temp;
5473
5474         /* Switch from PCDclk to Rawclk */
5475         reg = FDI_RX_CTL(pipe);
5476         temp = intel_de_read(dev_priv, reg);
5477         intel_de_write(dev_priv, reg, temp & ~FDI_PCDCLK);
5478
5479         /* Disable CPU FDI TX PLL */
5480         reg = FDI_TX_CTL(pipe);
5481         temp = intel_de_read(dev_priv, reg);
5482         intel_de_write(dev_priv, reg, temp & ~FDI_TX_PLL_ENABLE);
5483
5484         intel_de_posting_read(dev_priv, reg);
5485         udelay(100);
5486
5487         reg = FDI_RX_CTL(pipe);
5488         temp = intel_de_read(dev_priv, reg);
5489         intel_de_write(dev_priv, reg, temp & ~FDI_RX_PLL_ENABLE);
5490
5491         /* Wait for the clocks to turn off. */
5492         intel_de_posting_read(dev_priv, reg);
5493         udelay(100);
5494 }
5495
5496 static void ilk_fdi_disable(struct intel_crtc *crtc)
5497 {
5498         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5499         enum pipe pipe = crtc->pipe;
5500         i915_reg_t reg;
5501         u32 temp;
5502
5503         /* disable CPU FDI tx and PCH FDI rx */
5504         reg = FDI_TX_CTL(pipe);
5505         temp = intel_de_read(dev_priv, reg);
5506         intel_de_write(dev_priv, reg, temp & ~FDI_TX_ENABLE);
5507         intel_de_posting_read(dev_priv, reg);
5508
5509         reg = FDI_RX_CTL(pipe);
5510         temp = intel_de_read(dev_priv, reg);
5511         temp &= ~(0x7 << 16);
5512         temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
5513         intel_de_write(dev_priv, reg, temp & ~FDI_RX_ENABLE);
5514
5515         intel_de_posting_read(dev_priv, reg);
5516         udelay(100);
5517
5518         /* Ironlake workaround, disable clock pointer after downing FDI */
5519         if (HAS_PCH_IBX(dev_priv))
5520                 intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
5521                                FDI_RX_PHASE_SYNC_POINTER_OVR);
5522
5523         /* still set train pattern 1 */
5524         reg = FDI_TX_CTL(pipe);
5525         temp = intel_de_read(dev_priv, reg);
5526         temp &= ~FDI_LINK_TRAIN_NONE;
5527         temp |= FDI_LINK_TRAIN_PATTERN_1;
5528         intel_de_write(dev_priv, reg, temp);
5529
5530         reg = FDI_RX_CTL(pipe);
5531         temp = intel_de_read(dev_priv, reg);
5532         if (HAS_PCH_CPT(dev_priv)) {
5533                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
5534                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
5535         } else {
5536                 temp &= ~FDI_LINK_TRAIN_NONE;
5537                 temp |= FDI_LINK_TRAIN_PATTERN_1;
5538         }
5539         /* BPC in FDI rx is consistent with that in PIPECONF */
5540         temp &= ~(0x07 << 16);
5541         temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
5542         intel_de_write(dev_priv, reg, temp);
5543
5544         intel_de_posting_read(dev_priv, reg);
5545         udelay(100);
5546 }
5547
5548 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
5549 {
5550         struct drm_crtc *crtc;
5551         bool cleanup_done;
5552
5553         drm_for_each_crtc(crtc, &dev_priv->drm) {
5554                 struct drm_crtc_commit *commit;
5555                 spin_lock(&crtc->commit_lock);
5556                 commit = list_first_entry_or_null(&crtc->commit_list,
5557                                                   struct drm_crtc_commit, commit_entry);
5558                 cleanup_done = commit ?
5559                         try_wait_for_completion(&commit->cleanup_done) : true;
5560                 spin_unlock(&crtc->commit_lock);
5561
5562                 if (cleanup_done)
5563                         continue;
5564
5565                 drm_crtc_wait_one_vblank(crtc);
5566
5567                 return true;
5568         }
5569
5570         return false;
5571 }
5572
5573 void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
5574 {
5575         u32 temp;
5576
5577         intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_GATE);
5578
5579         mutex_lock(&dev_priv->sb_lock);
5580
5581         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
5582         temp |= SBI_SSCCTL_DISABLE;
5583         intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
5584
5585         mutex_unlock(&dev_priv->sb_lock);
5586 }
5587
5588 /* Program iCLKIP clock to the desired frequency */
5589 static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
5590 {
5591         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5592         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5593         int clock = crtc_state->hw.adjusted_mode.crtc_clock;
5594         u32 divsel, phaseinc, auxdiv, phasedir = 0;
5595         u32 temp;
5596
5597         lpt_disable_iclkip(dev_priv);
5598
5599         /* The iCLK virtual clock root frequency is in MHz,
5600          * but the adjusted_mode->crtc_clock in in KHz. To get the
5601          * divisors, it is necessary to divide one by another, so we
5602          * convert the virtual clock precision to KHz here for higher
5603          * precision.
5604          */
5605         for (auxdiv = 0; auxdiv < 2; auxdiv++) {
5606                 u32 iclk_virtual_root_freq = 172800 * 1000;
5607                 u32 iclk_pi_range = 64;
5608                 u32 desired_divisor;
5609
5610                 desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
5611                                                     clock << auxdiv);
5612                 divsel = (desired_divisor / iclk_pi_range) - 2;
5613                 phaseinc = desired_divisor % iclk_pi_range;
5614
5615                 /*
5616                  * Near 20MHz is a corner case which is
5617                  * out of range for the 7-bit divisor
5618                  */
5619                 if (divsel <= 0x7f)
5620                         break;
5621         }
5622
5623         /* This should not happen with any sane values */
5624         WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
5625                 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
5626         WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
5627                 ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
5628
5629         drm_dbg_kms(&dev_priv->drm,
5630                     "iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
5631                     clock, auxdiv, divsel, phasedir, phaseinc);
5632
5633         mutex_lock(&dev_priv->sb_lock);
5634
5635         /* Program SSCDIVINTPHASE6 */
5636         temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
5637         temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
5638         temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
5639         temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
5640         temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
5641         temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
5642         temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
5643         intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
5644
5645         /* Program SSCAUXDIV */
5646         temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
5647         temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
5648         temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
5649         intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
5650
5651         /* Enable modulator and associated divider */
5652         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
5653         temp &= ~SBI_SSCCTL_DISABLE;
5654         intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
5655
5656         mutex_unlock(&dev_priv->sb_lock);
5657
5658         /* Wait for initialization time */
5659         udelay(24);
5660
5661         intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_UNGATE);
5662 }
5663
5664 int lpt_get_iclkip(struct drm_i915_private *dev_priv)
5665 {
5666         u32 divsel, phaseinc, auxdiv;
5667         u32 iclk_virtual_root_freq = 172800 * 1000;
5668         u32 iclk_pi_range = 64;
5669         u32 desired_divisor;
5670         u32 temp;
5671
5672         if ((intel_de_read(dev_priv, PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
5673                 return 0;
5674
5675         mutex_lock(&dev_priv->sb_lock);
5676
5677         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
5678         if (temp & SBI_SSCCTL_DISABLE) {
5679                 mutex_unlock(&dev_priv->sb_lock);
5680                 return 0;
5681         }
5682
5683         temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
5684         divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
5685                 SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
5686         phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
5687                 SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
5688
5689         temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
5690         auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
5691                 SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
5692
5693         mutex_unlock(&dev_priv->sb_lock);
5694
5695         desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
5696
5697         return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
5698                                  desired_divisor << auxdiv);
5699 }
5700
5701 static void ilk_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
5702                                            enum pipe pch_transcoder)
5703 {
5704         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5705         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5706         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
5707
5708         intel_de_write(dev_priv, PCH_TRANS_HTOTAL(pch_transcoder),
5709                        intel_de_read(dev_priv, HTOTAL(cpu_transcoder)));
5710         intel_de_write(dev_priv, PCH_TRANS_HBLANK(pch_transcoder),
5711                        intel_de_read(dev_priv, HBLANK(cpu_transcoder)));
5712         intel_de_write(dev_priv, PCH_TRANS_HSYNC(pch_transcoder),
5713                        intel_de_read(dev_priv, HSYNC(cpu_transcoder)));
5714
5715         intel_de_write(dev_priv, PCH_TRANS_VTOTAL(pch_transcoder),
5716                        intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
5717         intel_de_write(dev_priv, PCH_TRANS_VBLANK(pch_transcoder),
5718                        intel_de_read(dev_priv, VBLANK(cpu_transcoder)));
5719         intel_de_write(dev_priv, PCH_TRANS_VSYNC(pch_transcoder),
5720                        intel_de_read(dev_priv, VSYNC(cpu_transcoder)));
5721         intel_de_write(dev_priv, PCH_TRANS_VSYNCSHIFT(pch_transcoder),
5722                        intel_de_read(dev_priv, VSYNCSHIFT(cpu_transcoder)));
5723 }
5724
5725 static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
5726 {
5727         u32 temp;
5728
5729         temp = intel_de_read(dev_priv, SOUTH_CHICKEN1);
5730         if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
5731                 return;
5732
5733         WARN_ON(intel_de_read(dev_priv, FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
5734         WARN_ON(intel_de_read(dev_priv, FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
5735
5736         temp &= ~FDI_BC_BIFURCATION_SELECT;
5737         if (enable)
5738                 temp |= FDI_BC_BIFURCATION_SELECT;
5739
5740         drm_dbg_kms(&dev_priv->drm, "%sabling fdi C rx\n",
5741                     enable ? "en" : "dis");
5742         intel_de_write(dev_priv, SOUTH_CHICKEN1, temp);
5743         intel_de_posting_read(dev_priv, SOUTH_CHICKEN1);
5744 }
5745
5746 static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
5747 {
5748         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5749         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5750
5751         switch (crtc->pipe) {
5752         case PIPE_A:
5753                 break;
5754         case PIPE_B:
5755                 if (crtc_state->fdi_lanes > 2)
5756                         cpt_set_fdi_bc_bifurcation(dev_priv, false);
5757                 else
5758                         cpt_set_fdi_bc_bifurcation(dev_priv, true);
5759
5760                 break;
5761         case PIPE_C:
5762                 cpt_set_fdi_bc_bifurcation(dev_priv, true);
5763
5764                 break;
5765         default:
5766                 BUG();
5767         }
5768 }
5769
5770 /*
5771  * Finds the encoder associated with the given CRTC. This can only be
5772  * used when we know that the CRTC isn't feeding multiple encoders!
5773  */
5774 static struct intel_encoder *
5775 intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
5776                            const struct intel_crtc_state *crtc_state)
5777 {
5778         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5779         const struct drm_connector_state *connector_state;
5780         const struct drm_connector *connector;
5781         struct intel_encoder *encoder = NULL;
5782         int num_encoders = 0;
5783         int i;
5784
5785         for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
5786                 if (connector_state->crtc != &crtc->base)
5787                         continue;
5788
5789                 encoder = to_intel_encoder(connector_state->best_encoder);
5790                 num_encoders++;
5791         }
5792
5793         drm_WARN(encoder->base.dev, num_encoders != 1,
5794                  "%d encoders for pipe %c\n",
5795                  num_encoders, pipe_name(crtc->pipe));
5796
5797         return encoder;
5798 }
5799
5800 /*
5801  * Enable PCH resources required for PCH ports:
5802  *   - PCH PLLs
5803  *   - FDI training & RX/TX
5804  *   - update transcoder timings
5805  *   - DP transcoding bits
5806  *   - transcoder
5807  */
5808 static void ilk_pch_enable(const struct intel_atomic_state *state,
5809                            const struct intel_crtc_state *crtc_state)
5810 {
5811         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5812         struct drm_device *dev = crtc->base.dev;
5813         struct drm_i915_private *dev_priv = to_i915(dev);
5814         enum pipe pipe = crtc->pipe;
5815         u32 temp;
5816
5817         assert_pch_transcoder_disabled(dev_priv, pipe);
5818
5819         if (IS_IVYBRIDGE(dev_priv))
5820                 ivb_update_fdi_bc_bifurcation(crtc_state);
5821
5822         /* Write the TU size bits before fdi link training, so that error
5823          * detection works. */
5824         intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
5825                        intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
5826
5827         /* For PCH output, training FDI link */
5828         dev_priv->display.fdi_link_train(crtc, crtc_state);
5829
5830         /* We need to program the right clock selection before writing the pixel
5831          * mutliplier into the DPLL. */
5832         if (HAS_PCH_CPT(dev_priv)) {
5833                 u32 sel;
5834
5835                 temp = intel_de_read(dev_priv, PCH_DPLL_SEL);
5836                 temp |= TRANS_DPLL_ENABLE(pipe);
5837                 sel = TRANS_DPLLB_SEL(pipe);
5838                 if (crtc_state->shared_dpll ==
5839                     intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
5840                         temp |= sel;
5841                 else
5842                         temp &= ~sel;
5843                 intel_de_write(dev_priv, PCH_DPLL_SEL, temp);
5844         }
5845
5846         /* XXX: pch pll's can be enabled any time before we enable the PCH
5847          * transcoder, and we actually should do this to not upset any PCH
5848          * transcoder that already use the clock when we share it.
5849          *
5850          * Note that enable_shared_dpll tries to do the right thing, but
5851          * get_shared_dpll unconditionally resets the pll - we need that to have
5852          * the right LVDS enable sequence. */
5853         intel_enable_shared_dpll(crtc_state);
5854
5855         /* set transcoder timing, panel must allow it */
5856         assert_panel_unlocked(dev_priv, pipe);
5857         ilk_pch_transcoder_set_timings(crtc_state, pipe);
5858
5859         intel_fdi_normal_train(crtc);
5860
5861         /* For PCH DP, enable TRANS_DP_CTL */
5862         if (HAS_PCH_CPT(dev_priv) &&
5863             intel_crtc_has_dp_encoder(crtc_state)) {
5864                 const struct drm_display_mode *adjusted_mode =
5865                         &crtc_state->hw.adjusted_mode;
5866                 u32 bpc = (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
5867                 i915_reg_t reg = TRANS_DP_CTL(pipe);
5868                 enum port port;
5869
5870                 temp = intel_de_read(dev_priv, reg);
5871                 temp &= ~(TRANS_DP_PORT_SEL_MASK |
5872                           TRANS_DP_SYNC_MASK |
5873                           TRANS_DP_BPC_MASK);
5874                 temp |= TRANS_DP_OUTPUT_ENABLE;
5875                 temp |= bpc << 9; /* same format but at 11:9 */
5876
5877                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
5878                         temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
5879                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
5880                         temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
5881
5882                 port = intel_get_crtc_new_encoder(state, crtc_state)->port;
5883                 WARN_ON(port < PORT_B || port > PORT_D);
5884                 temp |= TRANS_DP_PORT_SEL(port);
5885
5886                 intel_de_write(dev_priv, reg, temp);
5887         }
5888
5889         ilk_enable_pch_transcoder(crtc_state);
5890 }
5891
5892 void lpt_pch_enable(const struct intel_crtc_state *crtc_state)
5893 {
5894         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5895         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5896         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
5897
5898         assert_pch_transcoder_disabled(dev_priv, PIPE_A);
5899
5900         lpt_program_iclkip(crtc_state);
5901
5902         /* Set transcoder timing. */
5903         ilk_pch_transcoder_set_timings(crtc_state, PIPE_A);
5904
5905         lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
5906 }
5907
5908 static void cpt_verify_modeset(struct drm_i915_private *dev_priv,
5909                                enum pipe pipe)
5910 {
5911         i915_reg_t dslreg = PIPEDSL(pipe);
5912         u32 temp;
5913
5914         temp = intel_de_read(dev_priv, dslreg);
5915         udelay(500);
5916         if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5)) {
5917                 if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5))
5918                         drm_err(&dev_priv->drm,
5919                                 "mode set failed: pipe %c stuck\n",
5920                                 pipe_name(pipe));
5921         }
5922 }
5923
5924 /*
5925  * The hardware phase 0.0 refers to the center of the pixel.
5926  * We want to start from the top/left edge which is phase
5927  * -0.5. That matches how the hardware calculates the scaling
5928  * factors (from top-left of the first pixel to bottom-right
5929  * of the last pixel, as opposed to the pixel centers).
5930  *
5931  * For 4:2:0 subsampled chroma planes we obviously have to
5932  * adjust that so that the chroma sample position lands in
5933  * the right spot.
5934  *
5935  * Note that for packed YCbCr 4:2:2 formats there is no way to
5936  * control chroma siting. The hardware simply replicates the
5937  * chroma samples for both of the luma samples, and thus we don't
5938  * actually get the expected MPEG2 chroma siting convention :(
5939  * The same behaviour is observed on pre-SKL platforms as well.
5940  *
5941  * Theory behind the formula (note that we ignore sub-pixel
5942  * source coordinates):
5943  * s = source sample position
5944  * d = destination sample position
5945  *
5946  * Downscaling 4:1:
5947  * -0.5
5948  * | 0.0
5949  * | |     1.5 (initial phase)
5950  * | |     |
5951  * v v     v
5952  * | s | s | s | s |
5953  * |       d       |
5954  *
5955  * Upscaling 1:4:
5956  * -0.5
5957  * | -0.375 (initial phase)
5958  * | |     0.0
5959  * | |     |
5960  * v v     v
5961  * |       s       |
5962  * | d | d | d | d |
5963  */
5964 u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited)
5965 {
5966         int phase = -0x8000;
5967         u16 trip = 0;
5968
5969         if (chroma_cosited)
5970                 phase += (sub - 1) * 0x8000 / sub;
5971
5972         phase += scale / (2 * sub);
5973
5974         /*
5975          * Hardware initial phase limited to [-0.5:1.5].
5976          * Since the max hardware scale factor is 3.0, we
5977          * should never actually excdeed 1.0 here.
5978          */
5979         WARN_ON(phase < -0x8000 || phase > 0x18000);
5980
5981         if (phase < 0)
5982                 phase = 0x10000 + phase;
5983         else
5984                 trip = PS_PHASE_TRIP;
5985
5986         return ((phase >> 2) & PS_PHASE_MASK) | trip;
5987 }
5988
5989 #define SKL_MIN_SRC_W 8
5990 #define SKL_MAX_SRC_W 4096
5991 #define SKL_MIN_SRC_H 8
5992 #define SKL_MAX_SRC_H 4096
5993 #define SKL_MIN_DST_W 8
5994 #define SKL_MAX_DST_W 4096
5995 #define SKL_MIN_DST_H 8
5996 #define SKL_MAX_DST_H 4096
5997 #define ICL_MAX_SRC_W 5120
5998 #define ICL_MAX_SRC_H 4096
5999 #define ICL_MAX_DST_W 5120
6000 #define ICL_MAX_DST_H 4096
6001 #define SKL_MIN_YUV_420_SRC_W 16
6002 #define SKL_MIN_YUV_420_SRC_H 16
6003
6004 static int
6005 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
6006                   unsigned int scaler_user, int *scaler_id,
6007                   int src_w, int src_h, int dst_w, int dst_h,
6008                   const struct drm_format_info *format,
6009                   u64 modifier, bool need_scaler)
6010 {
6011         struct intel_crtc_scaler_state *scaler_state =
6012                 &crtc_state->scaler_state;
6013         struct intel_crtc *intel_crtc =
6014                 to_intel_crtc(crtc_state->uapi.crtc);
6015         struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
6016         const struct drm_display_mode *adjusted_mode =
6017                 &crtc_state->hw.adjusted_mode;
6018
6019         /*
6020          * Src coordinates are already rotated by 270 degrees for
6021          * the 90/270 degree plane rotation cases (to match the
6022          * GTT mapping), hence no need to account for rotation here.
6023          */
6024         if (src_w != dst_w || src_h != dst_h)
6025                 need_scaler = true;
6026
6027         /*
6028          * Scaling/fitting not supported in IF-ID mode in GEN9+
6029          * TODO: Interlace fetch mode doesn't support YUV420 planar formats.
6030          * Once NV12 is enabled, handle it here while allocating scaler
6031          * for NV12.
6032          */
6033         if (INTEL_GEN(dev_priv) >= 9 && crtc_state->hw.enable &&
6034             need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
6035                 drm_dbg_kms(&dev_priv->drm,
6036                             "Pipe/Plane scaling not supported with IF-ID mode\n");
6037                 return -EINVAL;
6038         }
6039
6040         /*
6041          * if plane is being disabled or scaler is no more required or force detach
6042          *  - free scaler binded to this plane/crtc
6043          *  - in order to do this, update crtc->scaler_usage
6044          *
6045          * Here scaler state in crtc_state is set free so that
6046          * scaler can be assigned to other user. Actual register
6047          * update to free the scaler is done in plane/panel-fit programming.
6048          * For this purpose crtc/plane_state->scaler_id isn't reset here.
6049          */
6050         if (force_detach || !need_scaler) {
6051                 if (*scaler_id >= 0) {
6052                         scaler_state->scaler_users &= ~(1 << scaler_user);
6053                         scaler_state->scalers[*scaler_id].in_use = 0;
6054
6055                         drm_dbg_kms(&dev_priv->drm,
6056                                     "scaler_user index %u.%u: "
6057                                     "Staged freeing scaler id %d scaler_users = 0x%x\n",
6058                                     intel_crtc->pipe, scaler_user, *scaler_id,
6059                                     scaler_state->scaler_users);
6060                         *scaler_id = -1;
6061                 }
6062                 return 0;
6063         }
6064
6065         if (format && intel_format_info_is_yuv_semiplanar(format, modifier) &&
6066             (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
6067                 drm_dbg_kms(&dev_priv->drm,
6068                             "Planar YUV: src dimensions not met\n");
6069                 return -EINVAL;
6070         }
6071
6072         /* range checks */
6073         if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
6074             dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
6075             (INTEL_GEN(dev_priv) >= 11 &&
6076              (src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H ||
6077               dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) ||
6078             (INTEL_GEN(dev_priv) < 11 &&
6079              (src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
6080               dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H))) {
6081                 drm_dbg_kms(&dev_priv->drm,
6082                             "scaler_user index %u.%u: src %ux%u dst %ux%u "
6083                             "size is out of scaler range\n",
6084                             intel_crtc->pipe, scaler_user, src_w, src_h,
6085                             dst_w, dst_h);
6086                 return -EINVAL;
6087         }
6088
6089         /* mark this plane as a scaler user in crtc_state */
6090         scaler_state->scaler_users |= (1 << scaler_user);
6091         drm_dbg_kms(&dev_priv->drm, "scaler_user index %u.%u: "
6092                     "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
6093                     intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
6094                     scaler_state->scaler_users);
6095
6096         return 0;
6097 }
6098
6099 /**
6100  * skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
6101  *
6102  * @state: crtc's scaler state
6103  *
6104  * Return
6105  *     0 - scaler_usage updated successfully
6106  *    error - requested scaling cannot be supported or other error condition
6107  */
6108 int skl_update_scaler_crtc(struct intel_crtc_state *state)
6109 {
6110         const struct drm_display_mode *adjusted_mode = &state->hw.adjusted_mode;
6111         bool need_scaler = false;
6112
6113         if (state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
6114             state->pch_pfit.enabled)
6115                 need_scaler = true;
6116
6117         return skl_update_scaler(state, !state->hw.active, SKL_CRTC_INDEX,
6118                                  &state->scaler_state.scaler_id,
6119                                  state->pipe_src_w, state->pipe_src_h,
6120                                  adjusted_mode->crtc_hdisplay,
6121                                  adjusted_mode->crtc_vdisplay, NULL, 0,
6122                                  need_scaler);
6123 }
6124
6125 /**
6126  * skl_update_scaler_plane - Stages update to scaler state for a given plane.
6127  * @crtc_state: crtc's scaler state
6128  * @plane_state: atomic plane state to update
6129  *
6130  * Return
6131  *     0 - scaler_usage updated successfully
6132  *    error - requested scaling cannot be supported or other error condition
6133  */
6134 static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
6135                                    struct intel_plane_state *plane_state)
6136 {
6137         struct intel_plane *intel_plane =
6138                 to_intel_plane(plane_state->uapi.plane);
6139         struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
6140         struct drm_framebuffer *fb = plane_state->hw.fb;
6141         int ret;
6142         bool force_detach = !fb || !plane_state->uapi.visible;
6143         bool need_scaler = false;
6144
6145         /* Pre-gen11 and SDR planes always need a scaler for planar formats. */
6146         if (!icl_is_hdr_plane(dev_priv, intel_plane->id) &&
6147             fb && intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier))
6148                 need_scaler = true;
6149
6150         ret = skl_update_scaler(crtc_state, force_detach,
6151                                 drm_plane_index(&intel_plane->base),
6152                                 &plane_state->scaler_id,
6153                                 drm_rect_width(&plane_state->uapi.src) >> 16,
6154                                 drm_rect_height(&plane_state->uapi.src) >> 16,
6155                                 drm_rect_width(&plane_state->uapi.dst),
6156                                 drm_rect_height(&plane_state->uapi.dst),
6157                                 fb ? fb->format : NULL,
6158                                 fb ? fb->modifier : 0,
6159                                 need_scaler);
6160
6161         if (ret || plane_state->scaler_id < 0)
6162                 return ret;
6163
6164         /* check colorkey */
6165         if (plane_state->ckey.flags) {
6166                 drm_dbg_kms(&dev_priv->drm,
6167                             "[PLANE:%d:%s] scaling with color key not allowed",
6168                             intel_plane->base.base.id,
6169                             intel_plane->base.name);
6170                 return -EINVAL;
6171         }
6172
6173         /* Check src format */
6174         switch (fb->format->format) {
6175         case DRM_FORMAT_RGB565:
6176         case DRM_FORMAT_XBGR8888:
6177         case DRM_FORMAT_XRGB8888:
6178         case DRM_FORMAT_ABGR8888:
6179         case DRM_FORMAT_ARGB8888:
6180         case DRM_FORMAT_XRGB2101010:
6181         case DRM_FORMAT_XBGR2101010:
6182         case DRM_FORMAT_ARGB2101010:
6183         case DRM_FORMAT_ABGR2101010:
6184         case DRM_FORMAT_YUYV:
6185         case DRM_FORMAT_YVYU:
6186         case DRM_FORMAT_UYVY:
6187         case DRM_FORMAT_VYUY:
6188         case DRM_FORMAT_NV12:
6189         case DRM_FORMAT_P010:
6190         case DRM_FORMAT_P012:
6191         case DRM_FORMAT_P016:
6192         case DRM_FORMAT_Y210:
6193         case DRM_FORMAT_Y212:
6194         case DRM_FORMAT_Y216:
6195         case DRM_FORMAT_XVYU2101010:
6196         case DRM_FORMAT_XVYU12_16161616:
6197         case DRM_FORMAT_XVYU16161616:
6198                 break;
6199         case DRM_FORMAT_XBGR16161616F:
6200         case DRM_FORMAT_ABGR16161616F:
6201         case DRM_FORMAT_XRGB16161616F:
6202         case DRM_FORMAT_ARGB16161616F:
6203                 if (INTEL_GEN(dev_priv) >= 11)
6204                         break;
6205                 /* fall through */
6206         default:
6207                 drm_dbg_kms(&dev_priv->drm,
6208                             "[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
6209                             intel_plane->base.base.id, intel_plane->base.name,
6210                             fb->base.id, fb->format->format);
6211                 return -EINVAL;
6212         }
6213
6214         return 0;
6215 }
6216
6217 void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state)
6218 {
6219         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
6220         int i;
6221
6222         for (i = 0; i < crtc->num_scalers; i++)
6223                 skl_detach_scaler(crtc, i);
6224 }
6225
6226 static void skl_pfit_enable(const struct intel_crtc_state *crtc_state)
6227 {
6228         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6229         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6230         enum pipe pipe = crtc->pipe;
6231         const struct intel_crtc_scaler_state *scaler_state =
6232                 &crtc_state->scaler_state;
6233
6234         if (crtc_state->pch_pfit.enabled) {
6235                 u16 uv_rgb_hphase, uv_rgb_vphase;
6236                 int pfit_w, pfit_h, hscale, vscale;
6237                 int id;
6238
6239                 if (WARN_ON(crtc_state->scaler_state.scaler_id < 0))
6240                         return;
6241
6242                 pfit_w = (crtc_state->pch_pfit.size >> 16) & 0xFFFF;
6243                 pfit_h = crtc_state->pch_pfit.size & 0xFFFF;
6244
6245                 hscale = (crtc_state->pipe_src_w << 16) / pfit_w;
6246                 vscale = (crtc_state->pipe_src_h << 16) / pfit_h;
6247
6248                 uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
6249                 uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
6250
6251                 id = scaler_state->scaler_id;
6252                 intel_de_write(dev_priv, SKL_PS_CTRL(pipe, id),
6253                                PS_SCALER_EN | PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
6254                 intel_de_write_fw(dev_priv, SKL_PS_VPHASE(pipe, id),
6255                                   PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
6256                 intel_de_write_fw(dev_priv, SKL_PS_HPHASE(pipe, id),
6257                                   PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
6258                 intel_de_write(dev_priv, SKL_PS_WIN_POS(pipe, id),
6259                                crtc_state->pch_pfit.pos);
6260                 intel_de_write(dev_priv, SKL_PS_WIN_SZ(pipe, id),
6261                                crtc_state->pch_pfit.size);
6262         }
6263 }
6264
6265 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state)
6266 {
6267         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6268         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6269         enum pipe pipe = crtc->pipe;
6270
6271         if (crtc_state->pch_pfit.enabled) {
6272                 /* Force use of hard-coded filter coefficients
6273                  * as some pre-programmed values are broken,
6274                  * e.g. x201.
6275                  */
6276                 if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
6277                         intel_de_write(dev_priv, PF_CTL(pipe),
6278                                        PF_ENABLE | PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe));
6279                 else
6280                         intel_de_write(dev_priv, PF_CTL(pipe),
6281                                        PF_ENABLE | PF_FILTER_MED_3x3);
6282                 intel_de_write(dev_priv, PF_WIN_POS(pipe),
6283                                crtc_state->pch_pfit.pos);
6284                 intel_de_write(dev_priv, PF_WIN_SZ(pipe),
6285                                crtc_state->pch_pfit.size);
6286         }
6287 }
6288
6289 void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
6290 {
6291         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6292         struct drm_device *dev = crtc->base.dev;
6293         struct drm_i915_private *dev_priv = to_i915(dev);
6294
6295         if (!crtc_state->ips_enabled)
6296                 return;
6297
6298         /*
6299          * We can only enable IPS after we enable a plane and wait for a vblank
6300          * This function is called from post_plane_update, which is run after
6301          * a vblank wait.
6302          */
6303         WARN_ON(!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
6304
6305         if (IS_BROADWELL(dev_priv)) {
6306                 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
6307                                                 IPS_ENABLE | IPS_PCODE_CONTROL));
6308                 /* Quoting Art Runyan: "its not safe to expect any particular
6309                  * value in IPS_CTL bit 31 after enabling IPS through the
6310                  * mailbox." Moreover, the mailbox may return a bogus state,
6311                  * so we need to just enable it and continue on.
6312                  */
6313         } else {
6314                 intel_de_write(dev_priv, IPS_CTL, IPS_ENABLE);
6315                 /* The bit only becomes 1 in the next vblank, so this wait here
6316                  * is essentially intel_wait_for_vblank. If we don't have this
6317                  * and don't wait for vblanks until the end of crtc_enable, then
6318                  * the HW state readout code will complain that the expected
6319                  * IPS_CTL value is not the one we read. */
6320                 if (intel_de_wait_for_set(dev_priv, IPS_CTL, IPS_ENABLE, 50))
6321                         drm_err(&dev_priv->drm,
6322                                 "Timed out waiting for IPS enable\n");
6323         }
6324 }
6325
6326 void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
6327 {
6328         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6329         struct drm_device *dev = crtc->base.dev;
6330         struct drm_i915_private *dev_priv = to_i915(dev);
6331
6332         if (!crtc_state->ips_enabled)
6333                 return;
6334
6335         if (IS_BROADWELL(dev_priv)) {
6336                 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
6337                 /*
6338                  * Wait for PCODE to finish disabling IPS. The BSpec specified
6339                  * 42ms timeout value leads to occasional timeouts so use 100ms
6340                  * instead.
6341                  */
6342                 if (intel_de_wait_for_clear(dev_priv, IPS_CTL, IPS_ENABLE, 100))
6343                         drm_err(&dev_priv->drm,
6344                                 "Timed out waiting for IPS disable\n");
6345         } else {
6346                 intel_de_write(dev_priv, IPS_CTL, 0);
6347                 intel_de_posting_read(dev_priv, IPS_CTL);
6348         }
6349
6350         /* We need to wait for a vblank before we can disable the plane. */
6351         intel_wait_for_vblank(dev_priv, crtc->pipe);
6352 }
6353
6354 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
6355 {
6356         if (intel_crtc->overlay)
6357                 (void) intel_overlay_switch_off(intel_crtc->overlay);
6358
6359         /* Let userspace switch the overlay on again. In most cases userspace
6360          * has to recompute where to put it anyway.
6361          */
6362 }
6363
6364 static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
6365                                        const struct intel_crtc_state *new_crtc_state)
6366 {
6367         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
6368         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6369
6370         if (!old_crtc_state->ips_enabled)
6371                 return false;
6372
6373         if (needs_modeset(new_crtc_state))
6374                 return true;
6375
6376         /*
6377          * Workaround : Do not read or write the pipe palette/gamma data while
6378          * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
6379          *
6380          * Disable IPS before we program the LUT.
6381          */
6382         if (IS_HASWELL(dev_priv) &&
6383             (new_crtc_state->uapi.color_mgmt_changed ||
6384              new_crtc_state->update_pipe) &&
6385             new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
6386                 return true;
6387
6388         return !new_crtc_state->ips_enabled;
6389 }
6390
6391 static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
6392                                        const struct intel_crtc_state *new_crtc_state)
6393 {
6394         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
6395         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6396
6397         if (!new_crtc_state->ips_enabled)
6398                 return false;
6399
6400         if (needs_modeset(new_crtc_state))
6401                 return true;
6402
6403         /*
6404          * Workaround : Do not read or write the pipe palette/gamma data while
6405          * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
6406          *
6407          * Re-enable IPS after the LUT has been programmed.
6408          */
6409         if (IS_HASWELL(dev_priv) &&
6410             (new_crtc_state->uapi.color_mgmt_changed ||
6411              new_crtc_state->update_pipe) &&
6412             new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
6413                 return true;
6414
6415         /*
6416          * We can't read out IPS on broadwell, assume the worst and
6417          * forcibly enable IPS on the first fastset.
6418          */
6419         if (new_crtc_state->update_pipe &&
6420             old_crtc_state->hw.adjusted_mode.private_flags & I915_MODE_FLAG_INHERITED)
6421                 return true;
6422
6423         return !old_crtc_state->ips_enabled;
6424 }
6425
6426 static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state)
6427 {
6428         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
6429
6430         if (!crtc_state->nv12_planes)
6431                 return false;
6432
6433         /* WA Display #0827: Gen9:all */
6434         if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv))
6435                 return true;
6436
6437         return false;
6438 }
6439
6440 static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state)
6441 {
6442         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
6443
6444         /* Wa_2006604312:icl */
6445         if (crtc_state->scaler_state.scaler_users > 0 && IS_ICELAKE(dev_priv))
6446                 return true;
6447
6448         return false;
6449 }
6450
6451 static bool planes_enabling(const struct intel_crtc_state *old_crtc_state,
6452                             const struct intel_crtc_state *new_crtc_state)
6453 {
6454         return (!old_crtc_state->active_planes || needs_modeset(new_crtc_state)) &&
6455                 new_crtc_state->active_planes;
6456 }
6457
6458 static bool planes_disabling(const struct intel_crtc_state *old_crtc_state,
6459                              const struct intel_crtc_state *new_crtc_state)
6460 {
6461         return old_crtc_state->active_planes &&
6462                 (!new_crtc_state->active_planes || needs_modeset(new_crtc_state));
6463 }
6464
6465 static void intel_post_plane_update(struct intel_atomic_state *state,
6466                                     struct intel_crtc *crtc)
6467 {
6468         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6469         const struct intel_crtc_state *old_crtc_state =
6470                 intel_atomic_get_old_crtc_state(state, crtc);
6471         const struct intel_crtc_state *new_crtc_state =
6472                 intel_atomic_get_new_crtc_state(state, crtc);
6473         enum pipe pipe = crtc->pipe;
6474
6475         intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits);
6476
6477         if (new_crtc_state->update_wm_post && new_crtc_state->hw.active)
6478                 intel_update_watermarks(crtc);
6479
6480         if (hsw_post_update_enable_ips(old_crtc_state, new_crtc_state))
6481                 hsw_enable_ips(new_crtc_state);
6482
6483         intel_fbc_post_update(state, crtc);
6484
6485         if (needs_nv12_wa(old_crtc_state) &&
6486             !needs_nv12_wa(new_crtc_state))
6487                 skl_wa_827(dev_priv, pipe, false);
6488
6489         if (needs_scalerclk_wa(old_crtc_state) &&
6490             !needs_scalerclk_wa(new_crtc_state))
6491                 icl_wa_scalerclkgating(dev_priv, pipe, false);
6492 }
6493
6494 static void intel_pre_plane_update(struct intel_atomic_state *state,
6495                                    struct intel_crtc *crtc)
6496 {
6497         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6498         const struct intel_crtc_state *old_crtc_state =
6499                 intel_atomic_get_old_crtc_state(state, crtc);
6500         const struct intel_crtc_state *new_crtc_state =
6501                 intel_atomic_get_new_crtc_state(state, crtc);
6502         enum pipe pipe = crtc->pipe;
6503
6504         if (hsw_pre_update_disable_ips(old_crtc_state, new_crtc_state))
6505                 hsw_disable_ips(old_crtc_state);
6506
6507         if (intel_fbc_pre_update(state, crtc))
6508                 intel_wait_for_vblank(dev_priv, pipe);
6509
6510         /* Display WA 827 */
6511         if (!needs_nv12_wa(old_crtc_state) &&
6512             needs_nv12_wa(new_crtc_state))
6513                 skl_wa_827(dev_priv, pipe, true);
6514
6515         /* Wa_2006604312:icl */
6516         if (!needs_scalerclk_wa(old_crtc_state) &&
6517             needs_scalerclk_wa(new_crtc_state))
6518                 icl_wa_scalerclkgating(dev_priv, pipe, true);
6519
6520         /*
6521          * Vblank time updates from the shadow to live plane control register
6522          * are blocked if the memory self-refresh mode is active at that
6523          * moment. So to make sure the plane gets truly disabled, disable
6524          * first the self-refresh mode. The self-refresh enable bit in turn
6525          * will be checked/applied by the HW only at the next frame start
6526          * event which is after the vblank start event, so we need to have a
6527          * wait-for-vblank between disabling the plane and the pipe.
6528          */
6529         if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active &&
6530             new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
6531                 intel_wait_for_vblank(dev_priv, pipe);
6532
6533         /*
6534          * IVB workaround: must disable low power watermarks for at least
6535          * one frame before enabling scaling.  LP watermarks can be re-enabled
6536          * when scaling is disabled.
6537          *
6538          * WaCxSRDisabledForSpriteScaling:ivb
6539          */
6540         if (old_crtc_state->hw.active &&
6541             new_crtc_state->disable_lp_wm && ilk_disable_lp_wm(dev_priv))
6542                 intel_wait_for_vblank(dev_priv, pipe);
6543
6544         /*
6545          * If we're doing a modeset we don't need to do any
6546          * pre-vblank watermark programming here.
6547          */
6548         if (!needs_modeset(new_crtc_state)) {
6549                 /*
6550                  * For platforms that support atomic watermarks, program the
6551                  * 'intermediate' watermarks immediately.  On pre-gen9 platforms, these
6552                  * will be the intermediate values that are safe for both pre- and
6553                  * post- vblank; when vblank happens, the 'active' values will be set
6554                  * to the final 'target' values and we'll do this again to get the
6555                  * optimal watermarks.  For gen9+ platforms, the values we program here
6556                  * will be the final target values which will get automatically latched
6557                  * at vblank time; no further programming will be necessary.
6558                  *
6559                  * If a platform hasn't been transitioned to atomic watermarks yet,
6560                  * we'll continue to update watermarks the old way, if flags tell
6561                  * us to.
6562                  */
6563                 if (dev_priv->display.initial_watermarks)
6564                         dev_priv->display.initial_watermarks(state, crtc);
6565                 else if (new_crtc_state->update_wm_pre)
6566                         intel_update_watermarks(crtc);
6567         }
6568
6569         /*
6570          * Gen2 reports pipe underruns whenever all planes are disabled.
6571          * So disable underrun reporting before all the planes get disabled.
6572          *
6573          * We do this after .initial_watermarks() so that we have a
6574          * chance of catching underruns with the intermediate watermarks
6575          * vs. the old plane configuration.
6576          */
6577         if (IS_GEN(dev_priv, 2) && planes_disabling(old_crtc_state, new_crtc_state))
6578                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6579 }
6580
6581 static void intel_crtc_disable_planes(struct intel_atomic_state *state,
6582                                       struct intel_crtc *crtc)
6583 {
6584         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6585         const struct intel_crtc_state *new_crtc_state =
6586                 intel_atomic_get_new_crtc_state(state, crtc);
6587         unsigned int update_mask = new_crtc_state->update_planes;
6588         const struct intel_plane_state *old_plane_state;
6589         struct intel_plane *plane;
6590         unsigned fb_bits = 0;
6591         int i;
6592
6593         intel_crtc_dpms_overlay_disable(crtc);
6594
6595         for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
6596                 if (crtc->pipe != plane->pipe ||
6597                     !(update_mask & BIT(plane->id)))
6598                         continue;
6599
6600                 intel_disable_plane(plane, new_crtc_state);
6601
6602                 if (old_plane_state->uapi.visible)
6603                         fb_bits |= plane->frontbuffer_bit;
6604         }
6605
6606         intel_frontbuffer_flip(dev_priv, fb_bits);
6607 }
6608
6609 /*
6610  * intel_connector_primary_encoder - get the primary encoder for a connector
6611  * @connector: connector for which to return the encoder
6612  *
6613  * Returns the primary encoder for a connector. There is a 1:1 mapping from
6614  * all connectors to their encoder, except for DP-MST connectors which have
6615  * both a virtual and a primary encoder. These DP-MST primary encoders can be
6616  * pointed to by as many DP-MST connectors as there are pipes.
6617  */
6618 static struct intel_encoder *
6619 intel_connector_primary_encoder(struct intel_connector *connector)
6620 {
6621         struct intel_encoder *encoder;
6622
6623         if (connector->mst_port)
6624                 return &dp_to_dig_port(connector->mst_port)->base;
6625
6626         encoder = intel_attached_encoder(connector);
6627         WARN_ON(!encoder);
6628
6629         return encoder;
6630 }
6631
6632 static void intel_encoders_update_prepare(struct intel_atomic_state *state)
6633 {
6634         struct drm_connector_state *new_conn_state;
6635         struct drm_connector *connector;
6636         int i;
6637
6638         for_each_new_connector_in_state(&state->base, connector, new_conn_state,
6639                                         i) {
6640                 struct intel_connector *intel_connector;
6641                 struct intel_encoder *encoder;
6642                 struct intel_crtc *crtc;
6643
6644                 if (!intel_connector_needs_modeset(state, connector))
6645                         continue;
6646
6647                 intel_connector = to_intel_connector(connector);
6648                 encoder = intel_connector_primary_encoder(intel_connector);
6649                 if (!encoder->update_prepare)
6650                         continue;
6651
6652                 crtc = new_conn_state->crtc ?
6653                         to_intel_crtc(new_conn_state->crtc) : NULL;
6654                 encoder->update_prepare(state, encoder, crtc);
6655         }
6656 }
6657
6658 static void intel_encoders_update_complete(struct intel_atomic_state *state)
6659 {
6660         struct drm_connector_state *new_conn_state;
6661         struct drm_connector *connector;
6662         int i;
6663
6664         for_each_new_connector_in_state(&state->base, connector, new_conn_state,
6665                                         i) {
6666                 struct intel_connector *intel_connector;
6667                 struct intel_encoder *encoder;
6668                 struct intel_crtc *crtc;
6669
6670                 if (!intel_connector_needs_modeset(state, connector))
6671                         continue;
6672
6673                 intel_connector = to_intel_connector(connector);
6674                 encoder = intel_connector_primary_encoder(intel_connector);
6675                 if (!encoder->update_complete)
6676                         continue;
6677
6678                 crtc = new_conn_state->crtc ?
6679                         to_intel_crtc(new_conn_state->crtc) : NULL;
6680                 encoder->update_complete(state, encoder, crtc);
6681         }
6682 }
6683
6684 static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state,
6685                                           struct intel_crtc *crtc)
6686 {
6687         const struct intel_crtc_state *crtc_state =
6688                 intel_atomic_get_new_crtc_state(state, crtc);
6689         const struct drm_connector_state *conn_state;
6690         struct drm_connector *conn;
6691         int i;
6692
6693         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6694                 struct intel_encoder *encoder =
6695                         to_intel_encoder(conn_state->best_encoder);
6696
6697                 if (conn_state->crtc != &crtc->base)
6698                         continue;
6699
6700                 if (encoder->pre_pll_enable)
6701                         encoder->pre_pll_enable(encoder, crtc_state, conn_state);
6702         }
6703 }
6704
6705 static void intel_encoders_pre_enable(struct intel_atomic_state *state,
6706                                       struct intel_crtc *crtc)
6707 {
6708         const struct intel_crtc_state *crtc_state =
6709                 intel_atomic_get_new_crtc_state(state, crtc);
6710         const struct drm_connector_state *conn_state;
6711         struct drm_connector *conn;
6712         int i;
6713
6714         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6715                 struct intel_encoder *encoder =
6716                         to_intel_encoder(conn_state->best_encoder);
6717
6718                 if (conn_state->crtc != &crtc->base)
6719                         continue;
6720
6721                 if (encoder->pre_enable)
6722                         encoder->pre_enable(encoder, crtc_state, conn_state);
6723         }
6724 }
6725
6726 static void intel_encoders_enable(struct intel_atomic_state *state,
6727                                   struct intel_crtc *crtc)
6728 {
6729         const struct intel_crtc_state *crtc_state =
6730                 intel_atomic_get_new_crtc_state(state, crtc);
6731         const struct drm_connector_state *conn_state;
6732         struct drm_connector *conn;
6733         int i;
6734
6735         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6736                 struct intel_encoder *encoder =
6737                         to_intel_encoder(conn_state->best_encoder);
6738
6739                 if (conn_state->crtc != &crtc->base)
6740                         continue;
6741
6742                 if (encoder->enable)
6743                         encoder->enable(encoder, crtc_state, conn_state);
6744                 intel_opregion_notify_encoder(encoder, true);
6745         }
6746 }
6747
6748 static void intel_encoders_disable(struct intel_atomic_state *state,
6749                                    struct intel_crtc *crtc)
6750 {
6751         const struct intel_crtc_state *old_crtc_state =
6752                 intel_atomic_get_old_crtc_state(state, crtc);
6753         const struct drm_connector_state *old_conn_state;
6754         struct drm_connector *conn;
6755         int i;
6756
6757         for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
6758                 struct intel_encoder *encoder =
6759                         to_intel_encoder(old_conn_state->best_encoder);
6760
6761                 if (old_conn_state->crtc != &crtc->base)
6762                         continue;
6763
6764                 intel_opregion_notify_encoder(encoder, false);
6765                 if (encoder->disable)
6766                         encoder->disable(encoder, old_crtc_state, old_conn_state);
6767         }
6768 }
6769
6770 static void intel_encoders_post_disable(struct intel_atomic_state *state,
6771                                         struct intel_crtc *crtc)
6772 {
6773         const struct intel_crtc_state *old_crtc_state =
6774                 intel_atomic_get_old_crtc_state(state, crtc);
6775         const struct drm_connector_state *old_conn_state;
6776         struct drm_connector *conn;
6777         int i;
6778
6779         for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
6780                 struct intel_encoder *encoder =
6781                         to_intel_encoder(old_conn_state->best_encoder);
6782
6783                 if (old_conn_state->crtc != &crtc->base)
6784                         continue;
6785
6786                 if (encoder->post_disable)
6787                         encoder->post_disable(encoder, old_crtc_state, old_conn_state);
6788         }
6789 }
6790
6791 static void intel_encoders_post_pll_disable(struct intel_atomic_state *state,
6792                                             struct intel_crtc *crtc)
6793 {
6794         const struct intel_crtc_state *old_crtc_state =
6795                 intel_atomic_get_old_crtc_state(state, crtc);
6796         const struct drm_connector_state *old_conn_state;
6797         struct drm_connector *conn;
6798         int i;
6799
6800         for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
6801                 struct intel_encoder *encoder =
6802                         to_intel_encoder(old_conn_state->best_encoder);
6803
6804                 if (old_conn_state->crtc != &crtc->base)
6805                         continue;
6806
6807                 if (encoder->post_pll_disable)
6808                         encoder->post_pll_disable(encoder, old_crtc_state, old_conn_state);
6809         }
6810 }
6811
6812 static void intel_encoders_update_pipe(struct intel_atomic_state *state,
6813                                        struct intel_crtc *crtc)
6814 {
6815         const struct intel_crtc_state *crtc_state =
6816                 intel_atomic_get_new_crtc_state(state, crtc);
6817         const struct drm_connector_state *conn_state;
6818         struct drm_connector *conn;
6819         int i;
6820
6821         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6822                 struct intel_encoder *encoder =
6823                         to_intel_encoder(conn_state->best_encoder);
6824
6825                 if (conn_state->crtc != &crtc->base)
6826                         continue;
6827
6828                 if (encoder->update_pipe)
6829                         encoder->update_pipe(encoder, crtc_state, conn_state);
6830         }
6831 }
6832
6833 static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state)
6834 {
6835         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6836         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
6837
6838         plane->disable_plane(plane, crtc_state);
6839 }
6840
6841 static void ilk_crtc_enable(struct intel_atomic_state *state,
6842                             struct intel_crtc *crtc)
6843 {
6844         const struct intel_crtc_state *new_crtc_state =
6845                 intel_atomic_get_new_crtc_state(state, crtc);
6846         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6847         enum pipe pipe = crtc->pipe;
6848
6849         if (WARN_ON(crtc->active))
6850                 return;
6851
6852         /*
6853          * Sometimes spurious CPU pipe underruns happen during FDI
6854          * training, at least with VGA+HDMI cloning. Suppress them.
6855          *
6856          * On ILK we get an occasional spurious CPU pipe underruns
6857          * between eDP port A enable and vdd enable. Also PCH port
6858          * enable seems to result in the occasional CPU pipe underrun.
6859          *
6860          * Spurious PCH underruns also occur during PCH enabling.
6861          */
6862         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6863         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
6864
6865         if (new_crtc_state->has_pch_encoder)
6866                 intel_prepare_shared_dpll(new_crtc_state);
6867
6868         if (intel_crtc_has_dp_encoder(new_crtc_state))
6869                 intel_dp_set_m_n(new_crtc_state, M1_N1);
6870
6871         intel_set_pipe_timings(new_crtc_state);
6872         intel_set_pipe_src_size(new_crtc_state);
6873
6874         if (new_crtc_state->has_pch_encoder)
6875                 intel_cpu_transcoder_set_m_n(new_crtc_state,
6876                                              &new_crtc_state->fdi_m_n, NULL);
6877
6878         ilk_set_pipeconf(new_crtc_state);
6879
6880         crtc->active = true;
6881
6882         intel_encoders_pre_enable(state, crtc);
6883
6884         if (new_crtc_state->has_pch_encoder) {
6885                 /* Note: FDI PLL enabling _must_ be done before we enable the
6886                  * cpu pipes, hence this is separate from all the other fdi/pch
6887                  * enabling. */
6888                 ilk_fdi_pll_enable(new_crtc_state);
6889         } else {
6890                 assert_fdi_tx_disabled(dev_priv, pipe);
6891                 assert_fdi_rx_disabled(dev_priv, pipe);
6892         }
6893
6894         ilk_pfit_enable(new_crtc_state);
6895
6896         /*
6897          * On ILK+ LUT must be loaded before the pipe is running but with
6898          * clocks enabled
6899          */
6900         intel_color_load_luts(new_crtc_state);
6901         intel_color_commit(new_crtc_state);
6902         /* update DSPCNTR to configure gamma for pipe bottom color */
6903         intel_disable_primary_plane(new_crtc_state);
6904
6905         if (dev_priv->display.initial_watermarks)
6906                 dev_priv->display.initial_watermarks(state, crtc);
6907         intel_enable_pipe(new_crtc_state);
6908
6909         if (new_crtc_state->has_pch_encoder)
6910                 ilk_pch_enable(state, new_crtc_state);
6911
6912         intel_crtc_vblank_on(new_crtc_state);
6913
6914         intel_encoders_enable(state, crtc);
6915
6916         if (HAS_PCH_CPT(dev_priv))
6917                 cpt_verify_modeset(dev_priv, pipe);
6918
6919         /*
6920          * Must wait for vblank to avoid spurious PCH FIFO underruns.
6921          * And a second vblank wait is needed at least on ILK with
6922          * some interlaced HDMI modes. Let's do the double wait always
6923          * in case there are more corner cases we don't know about.
6924          */
6925         if (new_crtc_state->has_pch_encoder) {
6926                 intel_wait_for_vblank(dev_priv, pipe);
6927                 intel_wait_for_vblank(dev_priv, pipe);
6928         }
6929         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6930         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
6931 }
6932
6933 /* IPS only exists on ULT machines and is tied to pipe A. */
6934 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
6935 {
6936         return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
6937 }
6938
6939 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
6940                                             enum pipe pipe, bool apply)
6941 {
6942         u32 val = intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe));
6943         u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
6944
6945         if (apply)
6946                 val |= mask;
6947         else
6948                 val &= ~mask;
6949
6950         intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), val);
6951 }
6952
6953 static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
6954 {
6955         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6956         enum pipe pipe = crtc->pipe;
6957         u32 val;
6958
6959         val = MBUS_DBOX_A_CREDIT(2);
6960
6961         if (INTEL_GEN(dev_priv) >= 12) {
6962                 val |= MBUS_DBOX_BW_CREDIT(2);
6963                 val |= MBUS_DBOX_B_CREDIT(12);
6964         } else {
6965                 val |= MBUS_DBOX_BW_CREDIT(1);
6966                 val |= MBUS_DBOX_B_CREDIT(8);
6967         }
6968
6969         intel_de_write(dev_priv, PIPE_MBUS_DBOX_CTL(pipe), val);
6970 }
6971
6972 static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state)
6973 {
6974         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6975         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6976
6977         intel_de_write(dev_priv, WM_LINETIME(crtc->pipe),
6978                        HSW_LINETIME(crtc_state->linetime) |
6979                        HSW_IPS_LINETIME(crtc_state->ips_linetime));
6980 }
6981
6982 static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
6983 {
6984         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6985         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6986         i915_reg_t reg = CHICKEN_TRANS(crtc_state->cpu_transcoder);
6987         u32 val;
6988
6989         val = intel_de_read(dev_priv, reg);
6990         val &= ~HSW_FRAME_START_DELAY_MASK;
6991         val |= HSW_FRAME_START_DELAY(0);
6992         intel_de_write(dev_priv, reg, val);
6993 }
6994
6995 static void hsw_crtc_enable(struct intel_atomic_state *state,
6996                             struct intel_crtc *crtc)
6997 {
6998         const struct intel_crtc_state *new_crtc_state =
6999                 intel_atomic_get_new_crtc_state(state, crtc);
7000         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7001         enum pipe pipe = crtc->pipe, hsw_workaround_pipe;
7002         enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
7003         bool psl_clkgate_wa;
7004
7005         if (WARN_ON(crtc->active))
7006                 return;
7007
7008         intel_encoders_pre_pll_enable(state, crtc);
7009
7010         if (new_crtc_state->shared_dpll)
7011                 intel_enable_shared_dpll(new_crtc_state);
7012
7013         intel_encoders_pre_enable(state, crtc);
7014
7015         if (!transcoder_is_dsi(cpu_transcoder))
7016                 intel_set_pipe_timings(new_crtc_state);
7017
7018         if (INTEL_GEN(dev_priv) >= 11)
7019                 icl_enable_trans_port_sync(new_crtc_state);
7020
7021         intel_set_pipe_src_size(new_crtc_state);
7022
7023         if (cpu_transcoder != TRANSCODER_EDP &&
7024             !transcoder_is_dsi(cpu_transcoder))
7025                 intel_de_write(dev_priv, PIPE_MULT(cpu_transcoder),
7026                                new_crtc_state->pixel_multiplier - 1);
7027
7028         if (new_crtc_state->has_pch_encoder)
7029                 intel_cpu_transcoder_set_m_n(new_crtc_state,
7030                                              &new_crtc_state->fdi_m_n, NULL);
7031
7032         if (!transcoder_is_dsi(cpu_transcoder)) {
7033                 hsw_set_frame_start_delay(new_crtc_state);
7034                 hsw_set_pipeconf(new_crtc_state);
7035         }
7036
7037         if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
7038                 bdw_set_pipemisc(new_crtc_state);
7039
7040         crtc->active = true;
7041
7042         /* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
7043         psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
7044                 new_crtc_state->pch_pfit.enabled;
7045         if (psl_clkgate_wa)
7046                 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
7047
7048         if (INTEL_GEN(dev_priv) >= 9)
7049                 skl_pfit_enable(new_crtc_state);
7050         else
7051                 ilk_pfit_enable(new_crtc_state);
7052
7053         /*
7054          * On ILK+ LUT must be loaded before the pipe is running but with
7055          * clocks enabled
7056          */
7057         intel_color_load_luts(new_crtc_state);
7058         intel_color_commit(new_crtc_state);
7059         /* update DSPCNTR to configure gamma/csc for pipe bottom color */
7060         if (INTEL_GEN(dev_priv) < 9)
7061                 intel_disable_primary_plane(new_crtc_state);
7062
7063         hsw_set_linetime_wm(new_crtc_state);
7064
7065         if (INTEL_GEN(dev_priv) >= 11)
7066                 icl_set_pipe_chicken(crtc);
7067
7068         if (!transcoder_is_dsi(cpu_transcoder))
7069                 intel_ddi_enable_transcoder_func(new_crtc_state);
7070
7071         if (dev_priv->display.initial_watermarks)
7072                 dev_priv->display.initial_watermarks(state, crtc);
7073
7074         if (INTEL_GEN(dev_priv) >= 11)
7075                 icl_pipe_mbus_enable(crtc);
7076
7077         intel_encoders_enable(state, crtc);
7078
7079         if (psl_clkgate_wa) {
7080                 intel_wait_for_vblank(dev_priv, pipe);
7081                 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
7082         }
7083
7084         /* If we change the relative order between pipe/planes enabling, we need
7085          * to change the workaround. */
7086         hsw_workaround_pipe = new_crtc_state->hsw_workaround_pipe;
7087         if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
7088                 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
7089                 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
7090         }
7091 }
7092
7093 void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state)
7094 {
7095         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
7096         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7097         enum pipe pipe = crtc->pipe;
7098
7099         /* To avoid upsetting the power well on haswell only disable the pfit if
7100          * it's in use. The hw state code will make sure we get this right. */
7101         if (old_crtc_state->pch_pfit.enabled) {
7102                 intel_de_write(dev_priv, PF_CTL(pipe), 0);
7103                 intel_de_write(dev_priv, PF_WIN_POS(pipe), 0);
7104                 intel_de_write(dev_priv, PF_WIN_SZ(pipe), 0);
7105         }
7106 }
7107
7108 static void ilk_crtc_disable(struct intel_atomic_state *state,
7109                              struct intel_crtc *crtc)
7110 {
7111         const struct intel_crtc_state *old_crtc_state =
7112                 intel_atomic_get_old_crtc_state(state, crtc);
7113         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7114         enum pipe pipe = crtc->pipe;
7115
7116         /*
7117          * Sometimes spurious CPU pipe underruns happen when the
7118          * pipe is already disabled, but FDI RX/TX is still enabled.
7119          * Happens at least with VGA+HDMI cloning. Suppress them.
7120          */
7121         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
7122         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
7123
7124         intel_encoders_disable(state, crtc);
7125
7126         intel_crtc_vblank_off(old_crtc_state);
7127
7128         intel_disable_pipe(old_crtc_state);
7129
7130         ilk_pfit_disable(old_crtc_state);
7131
7132         if (old_crtc_state->has_pch_encoder)
7133                 ilk_fdi_disable(crtc);
7134
7135         intel_encoders_post_disable(state, crtc);
7136
7137         if (old_crtc_state->has_pch_encoder) {
7138                 ilk_disable_pch_transcoder(dev_priv, pipe);
7139
7140                 if (HAS_PCH_CPT(dev_priv)) {
7141                         i915_reg_t reg;
7142                         u32 temp;
7143
7144                         /* disable TRANS_DP_CTL */
7145                         reg = TRANS_DP_CTL(pipe);
7146                         temp = intel_de_read(dev_priv, reg);
7147                         temp &= ~(TRANS_DP_OUTPUT_ENABLE |
7148                                   TRANS_DP_PORT_SEL_MASK);
7149                         temp |= TRANS_DP_PORT_SEL_NONE;
7150                         intel_de_write(dev_priv, reg, temp);
7151
7152                         /* disable DPLL_SEL */
7153                         temp = intel_de_read(dev_priv, PCH_DPLL_SEL);
7154                         temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
7155                         intel_de_write(dev_priv, PCH_DPLL_SEL, temp);
7156                 }
7157
7158                 ilk_fdi_pll_disable(crtc);
7159         }
7160
7161         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
7162         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
7163 }
7164
7165 static void hsw_crtc_disable(struct intel_atomic_state *state,
7166                              struct intel_crtc *crtc)
7167 {
7168         /*
7169          * FIXME collapse everything to one hook.
7170          * Need care with mst->ddi interactions.
7171          */
7172         intel_encoders_disable(state, crtc);
7173         intel_encoders_post_disable(state, crtc);
7174 }
7175
7176 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
7177 {
7178         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7179         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7180
7181         if (!crtc_state->gmch_pfit.control)
7182                 return;
7183
7184         /*
7185          * The panel fitter should only be adjusted whilst the pipe is disabled,
7186          * according to register description and PRM.
7187          */
7188         WARN_ON(intel_de_read(dev_priv, PFIT_CONTROL) & PFIT_ENABLE);
7189         assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
7190
7191         intel_de_write(dev_priv, PFIT_PGM_RATIOS,
7192                        crtc_state->gmch_pfit.pgm_ratios);
7193         intel_de_write(dev_priv, PFIT_CONTROL, crtc_state->gmch_pfit.control);
7194
7195         /* Border color in case we don't scale up to the full screen. Black by
7196          * default, change to something else for debugging. */
7197         intel_de_write(dev_priv, BCLRPAT(crtc->pipe), 0);
7198 }
7199
7200 bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
7201 {
7202         if (phy == PHY_NONE)
7203                 return false;
7204
7205         if (IS_ELKHARTLAKE(dev_priv))
7206                 return phy <= PHY_C;
7207
7208         if (INTEL_GEN(dev_priv) >= 11)
7209                 return phy <= PHY_B;
7210
7211         return false;
7212 }
7213
7214 bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
7215 {
7216         if (INTEL_GEN(dev_priv) >= 12)
7217                 return phy >= PHY_D && phy <= PHY_I;
7218
7219         if (INTEL_GEN(dev_priv) >= 11 && !IS_ELKHARTLAKE(dev_priv))
7220                 return phy >= PHY_C && phy <= PHY_F;
7221
7222         return false;
7223 }
7224
7225 enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
7226 {
7227         if (IS_ELKHARTLAKE(i915) && port == PORT_D)
7228                 return PHY_A;
7229
7230         return (enum phy)port;
7231 }
7232
7233 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
7234 {
7235         if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port)))
7236                 return PORT_TC_NONE;
7237
7238         if (INTEL_GEN(dev_priv) >= 12)
7239                 return port - PORT_D;
7240
7241         return port - PORT_C;
7242 }
7243
7244 enum intel_display_power_domain intel_port_to_power_domain(enum port port)
7245 {
7246         switch (port) {
7247         case PORT_A:
7248                 return POWER_DOMAIN_PORT_DDI_A_LANES;
7249         case PORT_B:
7250                 return POWER_DOMAIN_PORT_DDI_B_LANES;
7251         case PORT_C:
7252                 return POWER_DOMAIN_PORT_DDI_C_LANES;
7253         case PORT_D:
7254                 return POWER_DOMAIN_PORT_DDI_D_LANES;
7255         case PORT_E:
7256                 return POWER_DOMAIN_PORT_DDI_E_LANES;
7257         case PORT_F:
7258                 return POWER_DOMAIN_PORT_DDI_F_LANES;
7259         case PORT_G:
7260                 return POWER_DOMAIN_PORT_DDI_G_LANES;
7261         default:
7262                 MISSING_CASE(port);
7263                 return POWER_DOMAIN_PORT_OTHER;
7264         }
7265 }
7266
7267 enum intel_display_power_domain
7268 intel_aux_power_domain(struct intel_digital_port *dig_port)
7269 {
7270         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
7271         enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
7272
7273         if (intel_phy_is_tc(dev_priv, phy) &&
7274             dig_port->tc_mode == TC_PORT_TBT_ALT) {
7275                 switch (dig_port->aux_ch) {
7276                 case AUX_CH_C:
7277                         return POWER_DOMAIN_AUX_C_TBT;
7278                 case AUX_CH_D:
7279                         return POWER_DOMAIN_AUX_D_TBT;
7280                 case AUX_CH_E:
7281                         return POWER_DOMAIN_AUX_E_TBT;
7282                 case AUX_CH_F:
7283                         return POWER_DOMAIN_AUX_F_TBT;
7284                 case AUX_CH_G:
7285                         return POWER_DOMAIN_AUX_G_TBT;
7286                 default:
7287                         MISSING_CASE(dig_port->aux_ch);
7288                         return POWER_DOMAIN_AUX_C_TBT;
7289                 }
7290         }
7291
7292         switch (dig_port->aux_ch) {
7293         case AUX_CH_A:
7294                 return POWER_DOMAIN_AUX_A;
7295         case AUX_CH_B:
7296                 return POWER_DOMAIN_AUX_B;
7297         case AUX_CH_C:
7298                 return POWER_DOMAIN_AUX_C;
7299         case AUX_CH_D:
7300                 return POWER_DOMAIN_AUX_D;
7301         case AUX_CH_E:
7302                 return POWER_DOMAIN_AUX_E;
7303         case AUX_CH_F:
7304                 return POWER_DOMAIN_AUX_F;
7305         case AUX_CH_G:
7306                 return POWER_DOMAIN_AUX_G;
7307         default:
7308                 MISSING_CASE(dig_port->aux_ch);
7309                 return POWER_DOMAIN_AUX_A;
7310         }
7311 }
7312
7313 static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state)
7314 {
7315         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7316         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7317         struct drm_encoder *encoder;
7318         enum pipe pipe = crtc->pipe;
7319         u64 mask;
7320         enum transcoder transcoder = crtc_state->cpu_transcoder;
7321
7322         if (!crtc_state->hw.active)
7323                 return 0;
7324
7325         mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
7326         mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder));
7327         if (crtc_state->pch_pfit.enabled ||
7328             crtc_state->pch_pfit.force_thru)
7329                 mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
7330
7331         drm_for_each_encoder_mask(encoder, &dev_priv->drm,
7332                                   crtc_state->uapi.encoder_mask) {
7333                 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
7334
7335                 mask |= BIT_ULL(intel_encoder->power_domain);
7336         }
7337
7338         if (HAS_DDI(dev_priv) && crtc_state->has_audio)
7339                 mask |= BIT_ULL(POWER_DOMAIN_AUDIO);
7340
7341         if (crtc_state->shared_dpll)
7342                 mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE);
7343
7344         return mask;
7345 }
7346
7347 static u64
7348 modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state)
7349 {
7350         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7351         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7352         enum intel_display_power_domain domain;
7353         u64 domains, new_domains, old_domains;
7354
7355         old_domains = crtc->enabled_power_domains;
7356         crtc->enabled_power_domains = new_domains =
7357                 get_crtc_power_domains(crtc_state);
7358
7359         domains = new_domains & ~old_domains;
7360
7361         for_each_power_domain(domain, domains)
7362                 intel_display_power_get(dev_priv, domain);
7363
7364         return old_domains & ~new_domains;
7365 }
7366
7367 static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
7368                                       u64 domains)
7369 {
7370         enum intel_display_power_domain domain;
7371
7372         for_each_power_domain(domain, domains)
7373                 intel_display_power_put_unchecked(dev_priv, domain);
7374 }
7375
7376 static void valleyview_crtc_enable(struct intel_atomic_state *state,
7377                                    struct intel_crtc *crtc)
7378 {
7379         const struct intel_crtc_state *new_crtc_state =
7380                 intel_atomic_get_new_crtc_state(state, crtc);
7381         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7382         enum pipe pipe = crtc->pipe;
7383
7384         if (WARN_ON(crtc->active))
7385                 return;
7386
7387         if (intel_crtc_has_dp_encoder(new_crtc_state))
7388                 intel_dp_set_m_n(new_crtc_state, M1_N1);
7389
7390         intel_set_pipe_timings(new_crtc_state);
7391         intel_set_pipe_src_size(new_crtc_state);
7392
7393         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
7394                 intel_de_write(dev_priv, CHV_BLEND(pipe), CHV_BLEND_LEGACY);
7395                 intel_de_write(dev_priv, CHV_CANVAS(pipe), 0);
7396         }
7397
7398         i9xx_set_pipeconf(new_crtc_state);
7399
7400         crtc->active = true;
7401
7402         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
7403
7404         intel_encoders_pre_pll_enable(state, crtc);
7405
7406         if (IS_CHERRYVIEW(dev_priv)) {
7407                 chv_prepare_pll(crtc, new_crtc_state);
7408                 chv_enable_pll(crtc, new_crtc_state);
7409         } else {
7410                 vlv_prepare_pll(crtc, new_crtc_state);
7411                 vlv_enable_pll(crtc, new_crtc_state);
7412         }
7413
7414         intel_encoders_pre_enable(state, crtc);
7415
7416         i9xx_pfit_enable(new_crtc_state);
7417
7418         intel_color_load_luts(new_crtc_state);
7419         intel_color_commit(new_crtc_state);
7420         /* update DSPCNTR to configure gamma for pipe bottom color */
7421         intel_disable_primary_plane(new_crtc_state);
7422
7423         dev_priv->display.initial_watermarks(state, crtc);
7424         intel_enable_pipe(new_crtc_state);
7425
7426         intel_crtc_vblank_on(new_crtc_state);
7427
7428         intel_encoders_enable(state, crtc);
7429 }
7430
7431 static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state)
7432 {
7433         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7434         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7435
7436         intel_de_write(dev_priv, FP0(crtc->pipe),
7437                        crtc_state->dpll_hw_state.fp0);
7438         intel_de_write(dev_priv, FP1(crtc->pipe),
7439                        crtc_state->dpll_hw_state.fp1);
7440 }
7441
7442 static void i9xx_crtc_enable(struct intel_atomic_state *state,
7443                              struct intel_crtc *crtc)
7444 {
7445         const struct intel_crtc_state *new_crtc_state =
7446                 intel_atomic_get_new_crtc_state(state, crtc);
7447         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7448         enum pipe pipe = crtc->pipe;
7449
7450         if (WARN_ON(crtc->active))
7451                 return;
7452
7453         i9xx_set_pll_dividers(new_crtc_state);
7454
7455         if (intel_crtc_has_dp_encoder(new_crtc_state))
7456                 intel_dp_set_m_n(new_crtc_state, M1_N1);
7457
7458         intel_set_pipe_timings(new_crtc_state);
7459         intel_set_pipe_src_size(new_crtc_state);
7460
7461         i9xx_set_pipeconf(new_crtc_state);
7462
7463         crtc->active = true;
7464
7465         if (!IS_GEN(dev_priv, 2))
7466                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
7467
7468         intel_encoders_pre_enable(state, crtc);
7469
7470         i9xx_enable_pll(crtc, new_crtc_state);
7471
7472         i9xx_pfit_enable(new_crtc_state);
7473
7474         intel_color_load_luts(new_crtc_state);
7475         intel_color_commit(new_crtc_state);
7476         /* update DSPCNTR to configure gamma for pipe bottom color */
7477         intel_disable_primary_plane(new_crtc_state);
7478
7479         if (dev_priv->display.initial_watermarks)
7480                 dev_priv->display.initial_watermarks(state, crtc);
7481         else
7482                 intel_update_watermarks(crtc);
7483         intel_enable_pipe(new_crtc_state);
7484
7485         intel_crtc_vblank_on(new_crtc_state);
7486
7487         intel_encoders_enable(state, crtc);
7488 }
7489
7490 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
7491 {
7492         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
7493         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7494
7495         if (!old_crtc_state->gmch_pfit.control)
7496                 return;
7497
7498         assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder);
7499
7500         drm_dbg_kms(&dev_priv->drm, "disabling pfit, current: 0x%08x\n",
7501                     intel_de_read(dev_priv, PFIT_CONTROL));
7502         intel_de_write(dev_priv, PFIT_CONTROL, 0);
7503 }
7504
7505 static void i9xx_crtc_disable(struct intel_atomic_state *state,
7506                               struct intel_crtc *crtc)
7507 {
7508         struct intel_crtc_state *old_crtc_state =
7509                 intel_atomic_get_old_crtc_state(state, crtc);
7510         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7511         enum pipe pipe = crtc->pipe;
7512
7513         /*
7514          * On gen2 planes are double buffered but the pipe isn't, so we must
7515          * wait for planes to fully turn off before disabling the pipe.
7516          */
7517         if (IS_GEN(dev_priv, 2))
7518                 intel_wait_for_vblank(dev_priv, pipe);
7519
7520         intel_encoders_disable(state, crtc);
7521
7522         intel_crtc_vblank_off(old_crtc_state);
7523
7524         intel_disable_pipe(old_crtc_state);
7525
7526         i9xx_pfit_disable(old_crtc_state);
7527
7528         intel_encoders_post_disable(state, crtc);
7529
7530         if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
7531                 if (IS_CHERRYVIEW(dev_priv))
7532                         chv_disable_pll(dev_priv, pipe);
7533                 else if (IS_VALLEYVIEW(dev_priv))
7534                         vlv_disable_pll(dev_priv, pipe);
7535                 else
7536                         i9xx_disable_pll(old_crtc_state);
7537         }
7538
7539         intel_encoders_post_pll_disable(state, crtc);
7540
7541         if (!IS_GEN(dev_priv, 2))
7542                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
7543
7544         if (!dev_priv->display.initial_watermarks)
7545                 intel_update_watermarks(crtc);
7546
7547         /* clock the pipe down to 640x480@60 to potentially save power */
7548         if (IS_I830(dev_priv))
7549                 i830_enable_pipe(dev_priv, pipe);
7550 }
7551
7552 static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
7553                                         struct drm_modeset_acquire_ctx *ctx)
7554 {
7555         struct intel_encoder *encoder;
7556         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7557         struct intel_bw_state *bw_state =
7558                 to_intel_bw_state(dev_priv->bw_obj.state);
7559         struct intel_cdclk_state *cdclk_state =
7560                 to_intel_cdclk_state(dev_priv->cdclk.obj.state);
7561         struct intel_crtc_state *crtc_state =
7562                 to_intel_crtc_state(crtc->base.state);
7563         enum intel_display_power_domain domain;
7564         struct intel_plane *plane;
7565         struct drm_atomic_state *state;
7566         struct intel_crtc_state *temp_crtc_state;
7567         enum pipe pipe = crtc->pipe;
7568         u64 domains;
7569         int ret;
7570
7571         if (!crtc_state->hw.active)
7572                 return;
7573
7574         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
7575                 const struct intel_plane_state *plane_state =
7576                         to_intel_plane_state(plane->base.state);
7577
7578                 if (plane_state->uapi.visible)
7579                         intel_plane_disable_noatomic(crtc, plane);
7580         }
7581
7582         state = drm_atomic_state_alloc(&dev_priv->drm);
7583         if (!state) {
7584                 drm_dbg_kms(&dev_priv->drm,
7585                             "failed to disable [CRTC:%d:%s], out of memory",
7586                             crtc->base.base.id, crtc->base.name);
7587                 return;
7588         }
7589
7590         state->acquire_ctx = ctx;
7591
7592         /* Everything's already locked, -EDEADLK can't happen. */
7593         temp_crtc_state = intel_atomic_get_crtc_state(state, crtc);
7594         ret = drm_atomic_add_affected_connectors(state, &crtc->base);
7595
7596         WARN_ON(IS_ERR(temp_crtc_state) || ret);
7597
7598         dev_priv->display.crtc_disable(to_intel_atomic_state(state), crtc);
7599
7600         drm_atomic_state_put(state);
7601
7602         drm_dbg_kms(&dev_priv->drm,
7603                     "[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
7604                     crtc->base.base.id, crtc->base.name);
7605
7606         crtc->active = false;
7607         crtc->base.enabled = false;
7608
7609         WARN_ON(drm_atomic_set_mode_for_crtc(&crtc_state->uapi, NULL) < 0);
7610         crtc_state->uapi.active = false;
7611         crtc_state->uapi.connector_mask = 0;
7612         crtc_state->uapi.encoder_mask = 0;
7613         intel_crtc_free_hw_state(crtc_state);
7614         memset(&crtc_state->hw, 0, sizeof(crtc_state->hw));
7615
7616         for_each_encoder_on_crtc(&dev_priv->drm, &crtc->base, encoder)
7617                 encoder->base.crtc = NULL;
7618
7619         intel_fbc_disable(crtc);
7620         intel_update_watermarks(crtc);
7621         intel_disable_shared_dpll(crtc_state);
7622
7623         domains = crtc->enabled_power_domains;
7624         for_each_power_domain(domain, domains)
7625                 intel_display_power_put_unchecked(dev_priv, domain);
7626         crtc->enabled_power_domains = 0;
7627
7628         dev_priv->active_pipes &= ~BIT(pipe);
7629         cdclk_state->min_cdclk[pipe] = 0;
7630         cdclk_state->min_voltage_level[pipe] = 0;
7631         cdclk_state->active_pipes &= ~BIT(pipe);
7632
7633         bw_state->data_rate[pipe] = 0;
7634         bw_state->num_active_planes[pipe] = 0;
7635 }
7636
7637 /*
7638  * turn all crtc's off, but do not adjust state
7639  * This has to be paired with a call to intel_modeset_setup_hw_state.
7640  */
7641 int intel_display_suspend(struct drm_device *dev)
7642 {
7643         struct drm_i915_private *dev_priv = to_i915(dev);
7644         struct drm_atomic_state *state;
7645         int ret;
7646
7647         state = drm_atomic_helper_suspend(dev);
7648         ret = PTR_ERR_OR_ZERO(state);
7649         if (ret)
7650                 drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
7651                         ret);
7652         else
7653                 dev_priv->modeset_restore_state = state;
7654         return ret;
7655 }
7656
7657 void intel_encoder_destroy(struct drm_encoder *encoder)
7658 {
7659         struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
7660
7661         drm_encoder_cleanup(encoder);
7662         kfree(intel_encoder);
7663 }
7664
7665 /* Cross check the actual hw state with our own modeset state tracking (and it's
7666  * internal consistency). */
7667 static void intel_connector_verify_state(struct intel_crtc_state *crtc_state,
7668                                          struct drm_connector_state *conn_state)
7669 {
7670         struct intel_connector *connector = to_intel_connector(conn_state->connector);
7671         struct drm_i915_private *i915 = to_i915(connector->base.dev);
7672
7673         drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
7674                     connector->base.base.id, connector->base.name);
7675
7676         if (connector->get_hw_state(connector)) {
7677                 struct intel_encoder *encoder = intel_attached_encoder(connector);
7678
7679                 I915_STATE_WARN(!crtc_state,
7680                          "connector enabled without attached crtc\n");
7681
7682                 if (!crtc_state)
7683                         return;
7684
7685                 I915_STATE_WARN(!crtc_state->hw.active,
7686                                 "connector is active, but attached crtc isn't\n");
7687
7688                 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
7689                         return;
7690
7691                 I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
7692                         "atomic encoder doesn't match attached encoder\n");
7693
7694                 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
7695                         "attached encoder crtc differs from connector crtc\n");
7696         } else {
7697                 I915_STATE_WARN(crtc_state && crtc_state->hw.active,
7698                                 "attached crtc is active, but connector isn't\n");
7699                 I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
7700                         "best encoder set without crtc!\n");
7701         }
7702 }
7703
7704 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
7705 {
7706         if (crtc_state->hw.enable && crtc_state->has_pch_encoder)
7707                 return crtc_state->fdi_lanes;
7708
7709         return 0;
7710 }
7711
7712 static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
7713                                struct intel_crtc_state *pipe_config)
7714 {
7715         struct drm_i915_private *dev_priv = to_i915(dev);
7716         struct drm_atomic_state *state = pipe_config->uapi.state;
7717         struct intel_crtc *other_crtc;
7718         struct intel_crtc_state *other_crtc_state;
7719
7720         drm_dbg_kms(&dev_priv->drm,
7721                     "checking fdi config on pipe %c, lanes %i\n",
7722                     pipe_name(pipe), pipe_config->fdi_lanes);
7723         if (pipe_config->fdi_lanes > 4) {
7724                 drm_dbg_kms(&dev_priv->drm,
7725                             "invalid fdi lane config on pipe %c: %i lanes\n",
7726                             pipe_name(pipe), pipe_config->fdi_lanes);
7727                 return -EINVAL;
7728         }
7729
7730         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
7731                 if (pipe_config->fdi_lanes > 2) {
7732                         drm_dbg_kms(&dev_priv->drm,
7733                                     "only 2 lanes on haswell, required: %i lanes\n",
7734                                     pipe_config->fdi_lanes);
7735                         return -EINVAL;
7736                 } else {
7737                         return 0;
7738                 }
7739         }
7740
7741         if (INTEL_NUM_PIPES(dev_priv) == 2)
7742                 return 0;
7743
7744         /* Ivybridge 3 pipe is really complicated */
7745         switch (pipe) {
7746         case PIPE_A:
7747                 return 0;
7748         case PIPE_B:
7749                 if (pipe_config->fdi_lanes <= 2)
7750                         return 0;
7751
7752                 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C);
7753                 other_crtc_state =
7754                         intel_atomic_get_crtc_state(state, other_crtc);
7755                 if (IS_ERR(other_crtc_state))
7756                         return PTR_ERR(other_crtc_state);
7757
7758                 if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
7759                         drm_dbg_kms(&dev_priv->drm,
7760                                     "invalid shared fdi lane config on pipe %c: %i lanes\n",
7761                                     pipe_name(pipe), pipe_config->fdi_lanes);
7762                         return -EINVAL;
7763                 }
7764                 return 0;
7765         case PIPE_C:
7766                 if (pipe_config->fdi_lanes > 2) {
7767                         drm_dbg_kms(&dev_priv->drm,
7768                                     "only 2 lanes on pipe %c: required %i lanes\n",
7769                                     pipe_name(pipe), pipe_config->fdi_lanes);
7770                         return -EINVAL;
7771                 }
7772
7773                 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B);
7774                 other_crtc_state =
7775                         intel_atomic_get_crtc_state(state, other_crtc);
7776                 if (IS_ERR(other_crtc_state))
7777                         return PTR_ERR(other_crtc_state);
7778
7779                 if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
7780                         drm_dbg_kms(&dev_priv->drm,
7781                                     "fdi link B uses too many lanes to enable link C\n");
7782                         return -EINVAL;
7783                 }
7784                 return 0;
7785         default:
7786                 BUG();
7787         }
7788 }
7789
7790 #define RETRY 1
7791 static int ilk_fdi_compute_config(struct intel_crtc *intel_crtc,
7792                                   struct intel_crtc_state *pipe_config)
7793 {
7794         struct drm_device *dev = intel_crtc->base.dev;
7795         struct drm_i915_private *i915 = to_i915(dev);
7796         const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
7797         int lane, link_bw, fdi_dotclock, ret;
7798         bool needs_recompute = false;
7799
7800 retry:
7801         /* FDI is a binary signal running at ~2.7GHz, encoding
7802          * each output octet as 10 bits. The actual frequency
7803          * is stored as a divider into a 100MHz clock, and the
7804          * mode pixel clock is stored in units of 1KHz.
7805          * Hence the bw of each lane in terms of the mode signal
7806          * is:
7807          */
7808         link_bw = intel_fdi_link_freq(i915, pipe_config);
7809
7810         fdi_dotclock = adjusted_mode->crtc_clock;
7811
7812         lane = ilk_get_lanes_required(fdi_dotclock, link_bw,
7813                                       pipe_config->pipe_bpp);
7814
7815         pipe_config->fdi_lanes = lane;
7816
7817         intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
7818                                link_bw, &pipe_config->fdi_m_n, false, false);
7819
7820         ret = ilk_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
7821         if (ret == -EDEADLK)
7822                 return ret;
7823
7824         if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
7825                 pipe_config->pipe_bpp -= 2*3;
7826                 drm_dbg_kms(&i915->drm,
7827                             "fdi link bw constraint, reducing pipe bpp to %i\n",
7828                             pipe_config->pipe_bpp);
7829                 needs_recompute = true;
7830                 pipe_config->bw_constrained = true;
7831
7832                 goto retry;
7833         }
7834
7835         if (needs_recompute)
7836                 return RETRY;
7837
7838         return ret;
7839 }
7840
7841 bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
7842 {
7843         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7844         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7845
7846         /* IPS only exists on ULT machines and is tied to pipe A. */
7847         if (!hsw_crtc_supports_ips(crtc))
7848                 return false;
7849
7850         if (!i915_modparams.enable_ips)
7851                 return false;
7852
7853         if (crtc_state->pipe_bpp > 24)
7854                 return false;
7855
7856         /*
7857          * We compare against max which means we must take
7858          * the increased cdclk requirement into account when
7859          * calculating the new cdclk.
7860          *
7861          * Should measure whether using a lower cdclk w/o IPS
7862          */
7863         if (IS_BROADWELL(dev_priv) &&
7864             crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100)
7865                 return false;
7866
7867         return true;
7868 }
7869
7870 static int hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
7871 {
7872         struct drm_i915_private *dev_priv =
7873                 to_i915(crtc_state->uapi.crtc->dev);
7874         struct intel_atomic_state *state =
7875                 to_intel_atomic_state(crtc_state->uapi.state);
7876
7877         crtc_state->ips_enabled = false;
7878
7879         if (!hsw_crtc_state_ips_capable(crtc_state))
7880                 return 0;
7881
7882         /*
7883          * When IPS gets enabled, the pipe CRC changes. Since IPS gets
7884          * enabled and disabled dynamically based on package C states,
7885          * user space can't make reliable use of the CRCs, so let's just
7886          * completely disable it.
7887          */
7888         if (crtc_state->crc_enabled)
7889                 return 0;
7890
7891         /* IPS should be fine as long as at least one plane is enabled. */
7892         if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
7893                 return 0;
7894
7895         if (IS_BROADWELL(dev_priv)) {
7896                 const struct intel_cdclk_state *cdclk_state;
7897
7898                 cdclk_state = intel_atomic_get_cdclk_state(state);
7899                 if (IS_ERR(cdclk_state))
7900                         return PTR_ERR(cdclk_state);
7901
7902                 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
7903                 if (crtc_state->pixel_rate > cdclk_state->logical.cdclk * 95 / 100)
7904                         return 0;
7905         }
7906
7907         crtc_state->ips_enabled = true;
7908
7909         return 0;
7910 }
7911
7912 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
7913 {
7914         const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7915
7916         /* GDG double wide on either pipe, otherwise pipe A only */
7917         return INTEL_GEN(dev_priv) < 4 &&
7918                 (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
7919 }
7920
7921 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
7922 {
7923         u32 pixel_rate;
7924
7925         pixel_rate = pipe_config->hw.adjusted_mode.crtc_clock;
7926
7927         /*
7928          * We only use IF-ID interlacing. If we ever use
7929          * PF-ID we'll need to adjust the pixel_rate here.
7930          */
7931
7932         if (pipe_config->pch_pfit.enabled) {
7933                 u64 pipe_w, pipe_h, pfit_w, pfit_h;
7934                 u32 pfit_size = pipe_config->pch_pfit.size;
7935
7936                 pipe_w = pipe_config->pipe_src_w;
7937                 pipe_h = pipe_config->pipe_src_h;
7938
7939                 pfit_w = (pfit_size >> 16) & 0xFFFF;
7940                 pfit_h = pfit_size & 0xFFFF;
7941                 if (pipe_w < pfit_w)
7942                         pipe_w = pfit_w;
7943                 if (pipe_h < pfit_h)
7944                         pipe_h = pfit_h;
7945
7946                 if (WARN_ON(!pfit_w || !pfit_h))
7947                         return pixel_rate;
7948
7949                 pixel_rate = div_u64(mul_u32_u32(pixel_rate, pipe_w * pipe_h),
7950                                      pfit_w * pfit_h);
7951         }
7952
7953         return pixel_rate;
7954 }
7955
7956 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
7957 {
7958         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
7959
7960         if (HAS_GMCH(dev_priv))
7961                 /* FIXME calculate proper pipe pixel rate for GMCH pfit */
7962                 crtc_state->pixel_rate =
7963                         crtc_state->hw.adjusted_mode.crtc_clock;
7964         else
7965                 crtc_state->pixel_rate =
7966                         ilk_pipe_pixel_rate(crtc_state);
7967 }
7968
7969 static int intel_crtc_compute_config(struct intel_crtc *crtc,
7970                                      struct intel_crtc_state *pipe_config)
7971 {
7972         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7973         const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
7974         int clock_limit = dev_priv->max_dotclk_freq;
7975
7976         if (INTEL_GEN(dev_priv) < 4) {
7977                 clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
7978
7979                 /*
7980                  * Enable double wide mode when the dot clock
7981                  * is > 90% of the (display) core speed.
7982                  */
7983                 if (intel_crtc_supports_double_wide(crtc) &&
7984                     adjusted_mode->crtc_clock > clock_limit) {
7985                         clock_limit = dev_priv->max_dotclk_freq;
7986                         pipe_config->double_wide = true;
7987                 }
7988         }
7989
7990         if (adjusted_mode->crtc_clock > clock_limit) {
7991                 drm_dbg_kms(&dev_priv->drm,
7992                             "requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
7993                             adjusted_mode->crtc_clock, clock_limit,
7994                             yesno(pipe_config->double_wide));
7995                 return -EINVAL;
7996         }
7997
7998         if ((pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
7999              pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) &&
8000              pipe_config->hw.ctm) {
8001                 /*
8002                  * There is only one pipe CSC unit per pipe, and we need that
8003                  * for output conversion from RGB->YCBCR. So if CTM is already
8004                  * applied we can't support YCBCR420 output.
8005                  */
8006                 drm_dbg_kms(&dev_priv->drm,
8007                             "YCBCR420 and CTM together are not possible\n");
8008                 return -EINVAL;
8009         }
8010
8011         /*
8012          * Pipe horizontal size must be even in:
8013          * - DVO ganged mode
8014          * - LVDS dual channel mode
8015          * - Double wide pipe
8016          */
8017         if (pipe_config->pipe_src_w & 1) {
8018                 if (pipe_config->double_wide) {
8019                         drm_dbg_kms(&dev_priv->drm,
8020                                     "Odd pipe source width not supported with double wide pipe\n");
8021                         return -EINVAL;
8022                 }
8023
8024                 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
8025                     intel_is_dual_link_lvds(dev_priv)) {
8026                         drm_dbg_kms(&dev_priv->drm,
8027                                     "Odd pipe source width not supported with dual link LVDS\n");
8028                         return -EINVAL;
8029                 }
8030         }
8031
8032         /* Cantiga+ cannot handle modes with a hsync front porch of 0.
8033          * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
8034          */
8035         if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) &&
8036                 adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
8037                 return -EINVAL;
8038
8039         intel_crtc_compute_pixel_rate(pipe_config);
8040
8041         if (pipe_config->has_pch_encoder)
8042                 return ilk_fdi_compute_config(crtc, pipe_config);
8043
8044         return 0;
8045 }
8046
8047 static void
8048 intel_reduce_m_n_ratio(u32 *num, u32 *den)
8049 {
8050         while (*num > DATA_LINK_M_N_MASK ||
8051                *den > DATA_LINK_M_N_MASK) {
8052                 *num >>= 1;
8053                 *den >>= 1;
8054         }
8055 }
8056
8057 static void compute_m_n(unsigned int m, unsigned int n,
8058                         u32 *ret_m, u32 *ret_n,
8059                         bool constant_n)
8060 {
8061         /*
8062          * Several DP dongles in particular seem to be fussy about
8063          * too large link M/N values. Give N value as 0x8000 that
8064          * should be acceptable by specific devices. 0x8000 is the
8065          * specified fixed N value for asynchronous clock mode,
8066          * which the devices expect also in synchronous clock mode.
8067          */
8068         if (constant_n)
8069                 *ret_n = 0x8000;
8070         else
8071                 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
8072
8073         *ret_m = div_u64(mul_u32_u32(m, *ret_n), n);
8074         intel_reduce_m_n_ratio(ret_m, ret_n);
8075 }
8076
8077 void
8078 intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
8079                        int pixel_clock, int link_clock,
8080                        struct intel_link_m_n *m_n,
8081                        bool constant_n, bool fec_enable)
8082 {
8083         u32 data_clock = bits_per_pixel * pixel_clock;
8084
8085         if (fec_enable)
8086                 data_clock = intel_dp_mode_to_fec_clock(data_clock);
8087
8088         m_n->tu = 64;
8089         compute_m_n(data_clock,
8090                     link_clock * nlanes * 8,
8091                     &m_n->gmch_m, &m_n->gmch_n,
8092                     constant_n);
8093
8094         compute_m_n(pixel_clock, link_clock,
8095                     &m_n->link_m, &m_n->link_n,
8096                     constant_n);
8097 }
8098
8099 static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
8100 {
8101         /*
8102          * There may be no VBT; and if the BIOS enabled SSC we can
8103          * just keep using it to avoid unnecessary flicker.  Whereas if the
8104          * BIOS isn't using it, don't assume it will work even if the VBT
8105          * indicates as much.
8106          */
8107         if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
8108                 bool bios_lvds_use_ssc = intel_de_read(dev_priv,
8109                                                        PCH_DREF_CONTROL) &
8110                         DREF_SSC1_ENABLE;
8111
8112                 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
8113                         drm_dbg_kms(&dev_priv->drm,
8114                                     "SSC %s by BIOS, overriding VBT which says %s\n",
8115                                     enableddisabled(bios_lvds_use_ssc),
8116                                     enableddisabled(dev_priv->vbt.lvds_use_ssc));
8117                         dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
8118                 }
8119         }
8120 }
8121
8122 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
8123 {
8124         if (i915_modparams.panel_use_ssc >= 0)
8125                 return i915_modparams.panel_use_ssc != 0;
8126         return dev_priv->vbt.lvds_use_ssc
8127                 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
8128 }
8129
8130 static u32 pnv_dpll_compute_fp(struct dpll *dpll)
8131 {
8132         return (1 << dpll->n) << 16 | dpll->m2;
8133 }
8134
8135 static u32 i9xx_dpll_compute_fp(struct dpll *dpll)
8136 {
8137         return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
8138 }
8139
8140 static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
8141                                      struct intel_crtc_state *crtc_state,
8142                                      struct dpll *reduced_clock)
8143 {
8144         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8145         u32 fp, fp2 = 0;
8146
8147         if (IS_PINEVIEW(dev_priv)) {
8148                 fp = pnv_dpll_compute_fp(&crtc_state->dpll);
8149                 if (reduced_clock)
8150                         fp2 = pnv_dpll_compute_fp(reduced_clock);
8151         } else {
8152                 fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
8153                 if (reduced_clock)
8154                         fp2 = i9xx_dpll_compute_fp(reduced_clock);
8155         }
8156
8157         crtc_state->dpll_hw_state.fp0 = fp;
8158
8159         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
8160             reduced_clock) {
8161                 crtc_state->dpll_hw_state.fp1 = fp2;
8162         } else {
8163                 crtc_state->dpll_hw_state.fp1 = fp;
8164         }
8165 }
8166
8167 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
8168                 pipe)
8169 {
8170         u32 reg_val;
8171
8172         /*
8173          * PLLB opamp always calibrates to max value of 0x3f, force enable it
8174          * and set it to a reasonable value instead.
8175          */
8176         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
8177         reg_val &= 0xffffff00;
8178         reg_val |= 0x00000030;
8179         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
8180
8181         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
8182         reg_val &= 0x00ffffff;
8183         reg_val |= 0x8c000000;
8184         vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
8185
8186         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
8187         reg_val &= 0xffffff00;
8188         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
8189
8190         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
8191         reg_val &= 0x00ffffff;
8192         reg_val |= 0xb0000000;
8193         vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
8194 }
8195
8196 static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
8197                                          const struct intel_link_m_n *m_n)
8198 {
8199         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8200         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8201         enum pipe pipe = crtc->pipe;
8202
8203         intel_de_write(dev_priv, PCH_TRANS_DATA_M1(pipe),
8204                        TU_SIZE(m_n->tu) | m_n->gmch_m);
8205         intel_de_write(dev_priv, PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
8206         intel_de_write(dev_priv, PCH_TRANS_LINK_M1(pipe), m_n->link_m);
8207         intel_de_write(dev_priv, PCH_TRANS_LINK_N1(pipe), m_n->link_n);
8208 }
8209
8210 static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
8211                                  enum transcoder transcoder)
8212 {
8213         if (IS_HASWELL(dev_priv))
8214                 return transcoder == TRANSCODER_EDP;
8215
8216         /*
8217          * Strictly speaking some registers are available before
8218          * gen7, but we only support DRRS on gen7+
8219          */
8220         return IS_GEN(dev_priv, 7) || IS_CHERRYVIEW(dev_priv);
8221 }
8222
8223 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
8224                                          const struct intel_link_m_n *m_n,
8225                                          const struct intel_link_m_n *m2_n2)
8226 {
8227         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8228         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8229         enum pipe pipe = crtc->pipe;
8230         enum transcoder transcoder = crtc_state->cpu_transcoder;
8231
8232         if (INTEL_GEN(dev_priv) >= 5) {
8233                 intel_de_write(dev_priv, PIPE_DATA_M1(transcoder),
8234                                TU_SIZE(m_n->tu) | m_n->gmch_m);
8235                 intel_de_write(dev_priv, PIPE_DATA_N1(transcoder),
8236                                m_n->gmch_n);
8237                 intel_de_write(dev_priv, PIPE_LINK_M1(transcoder),
8238                                m_n->link_m);
8239                 intel_de_write(dev_priv, PIPE_LINK_N1(transcoder),
8240                                m_n->link_n);
8241                 /*
8242                  *  M2_N2 registers are set only if DRRS is supported
8243                  * (to make sure the registers are not unnecessarily accessed).
8244                  */
8245                 if (m2_n2 && crtc_state->has_drrs &&
8246                     transcoder_has_m2_n2(dev_priv, transcoder)) {
8247                         intel_de_write(dev_priv, PIPE_DATA_M2(transcoder),
8248                                        TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
8249                         intel_de_write(dev_priv, PIPE_DATA_N2(transcoder),
8250                                        m2_n2->gmch_n);
8251                         intel_de_write(dev_priv, PIPE_LINK_M2(transcoder),
8252                                        m2_n2->link_m);
8253                         intel_de_write(dev_priv, PIPE_LINK_N2(transcoder),
8254                                        m2_n2->link_n);
8255                 }
8256         } else {
8257                 intel_de_write(dev_priv, PIPE_DATA_M_G4X(pipe),
8258                                TU_SIZE(m_n->tu) | m_n->gmch_m);
8259                 intel_de_write(dev_priv, PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
8260                 intel_de_write(dev_priv, PIPE_LINK_M_G4X(pipe), m_n->link_m);
8261                 intel_de_write(dev_priv, PIPE_LINK_N_G4X(pipe), m_n->link_n);
8262         }
8263 }
8264
8265 void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n)
8266 {
8267         const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
8268         struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
8269
8270         if (m_n == M1_N1) {
8271                 dp_m_n = &crtc_state->dp_m_n;
8272                 dp_m2_n2 = &crtc_state->dp_m2_n2;
8273         } else if (m_n == M2_N2) {
8274
8275                 /*
8276                  * M2_N2 registers are not supported. Hence m2_n2 divider value
8277                  * needs to be programmed into M1_N1.
8278                  */
8279                 dp_m_n = &crtc_state->dp_m2_n2;
8280         } else {
8281                 drm_err(&i915->drm, "Unsupported divider value\n");
8282                 return;
8283         }
8284
8285         if (crtc_state->has_pch_encoder)
8286                 intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n);
8287         else
8288                 intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2);
8289 }
8290
8291 static void vlv_compute_dpll(struct intel_crtc *crtc,
8292                              struct intel_crtc_state *pipe_config)
8293 {
8294         pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
8295                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
8296         if (crtc->pipe != PIPE_A)
8297                 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
8298
8299         /* DPLL not used with DSI, but still need the rest set up */
8300         if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
8301                 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
8302                         DPLL_EXT_BUFFER_ENABLE_VLV;
8303
8304         pipe_config->dpll_hw_state.dpll_md =
8305                 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
8306 }
8307
8308 static void chv_compute_dpll(struct intel_crtc *crtc,
8309                              struct intel_crtc_state *pipe_config)
8310 {
8311         pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
8312                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
8313         if (crtc->pipe != PIPE_A)
8314                 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
8315
8316         /* DPLL not used with DSI, but still need the rest set up */
8317         if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
8318                 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
8319
8320         pipe_config->dpll_hw_state.dpll_md =
8321                 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
8322 }
8323
8324 static void vlv_prepare_pll(struct intel_crtc *crtc,
8325                             const struct intel_crtc_state *pipe_config)
8326 {
8327         struct drm_device *dev = crtc->base.dev;
8328         struct drm_i915_private *dev_priv = to_i915(dev);
8329         enum pipe pipe = crtc->pipe;
8330         u32 mdiv;
8331         u32 bestn, bestm1, bestm2, bestp1, bestp2;
8332         u32 coreclk, reg_val;
8333
8334         /* Enable Refclk */
8335         intel_de_write(dev_priv, DPLL(pipe),
8336                        pipe_config->dpll_hw_state.dpll & ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
8337
8338         /* No need to actually set up the DPLL with DSI */
8339         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8340                 return;
8341
8342         vlv_dpio_get(dev_priv);
8343
8344         bestn = pipe_config->dpll.n;
8345         bestm1 = pipe_config->dpll.m1;
8346         bestm2 = pipe_config->dpll.m2;
8347         bestp1 = pipe_config->dpll.p1;
8348         bestp2 = pipe_config->dpll.p2;
8349
8350         /* See eDP HDMI DPIO driver vbios notes doc */
8351
8352         /* PLL B needs special handling */
8353         if (pipe == PIPE_B)
8354                 vlv_pllb_recal_opamp(dev_priv, pipe);
8355
8356         /* Set up Tx target for periodic Rcomp update */
8357         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
8358
8359         /* Disable target IRef on PLL */
8360         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
8361         reg_val &= 0x00ffffff;
8362         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
8363
8364         /* Disable fast lock */
8365         vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
8366
8367         /* Set idtafcrecal before PLL is enabled */
8368         mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
8369         mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
8370         mdiv |= ((bestn << DPIO_N_SHIFT));
8371         mdiv |= (1 << DPIO_K_SHIFT);
8372
8373         /*
8374          * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
8375          * but we don't support that).
8376          * Note: don't use the DAC post divider as it seems unstable.
8377          */
8378         mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
8379         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
8380
8381         mdiv |= DPIO_ENABLE_CALIBRATION;
8382         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
8383
8384         /* Set HBR and RBR LPF coefficients */
8385         if (pipe_config->port_clock == 162000 ||
8386             intel_crtc_has_type(pipe_config, INTEL_OUTPUT_ANALOG) ||
8387             intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
8388                 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
8389                                  0x009f0003);
8390         else
8391                 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
8392                                  0x00d0000f);
8393
8394         if (intel_crtc_has_dp_encoder(pipe_config)) {
8395                 /* Use SSC source */
8396                 if (pipe == PIPE_A)
8397                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
8398                                          0x0df40000);
8399                 else
8400                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
8401                                          0x0df70000);
8402         } else { /* HDMI or VGA */
8403                 /* Use bend source */
8404                 if (pipe == PIPE_A)
8405                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
8406                                          0x0df70000);
8407                 else
8408                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
8409                                          0x0df40000);
8410         }
8411
8412         coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
8413         coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
8414         if (intel_crtc_has_dp_encoder(pipe_config))
8415                 coreclk |= 0x01000000;
8416         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
8417
8418         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
8419
8420         vlv_dpio_put(dev_priv);
8421 }
8422
8423 static void chv_prepare_pll(struct intel_crtc *crtc,
8424                             const struct intel_crtc_state *pipe_config)
8425 {
8426         struct drm_device *dev = crtc->base.dev;
8427         struct drm_i915_private *dev_priv = to_i915(dev);
8428         enum pipe pipe = crtc->pipe;
8429         enum dpio_channel port = vlv_pipe_to_channel(pipe);
8430         u32 loopfilter, tribuf_calcntr;
8431         u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
8432         u32 dpio_val;
8433         int vco;
8434
8435         /* Enable Refclk and SSC */
8436         intel_de_write(dev_priv, DPLL(pipe),
8437                        pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
8438
8439         /* No need to actually set up the DPLL with DSI */
8440         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8441                 return;
8442
8443         bestn = pipe_config->dpll.n;
8444         bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
8445         bestm1 = pipe_config->dpll.m1;
8446         bestm2 = pipe_config->dpll.m2 >> 22;
8447         bestp1 = pipe_config->dpll.p1;
8448         bestp2 = pipe_config->dpll.p2;
8449         vco = pipe_config->dpll.vco;
8450         dpio_val = 0;
8451         loopfilter = 0;
8452
8453         vlv_dpio_get(dev_priv);
8454
8455         /* p1 and p2 divider */
8456         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
8457                         5 << DPIO_CHV_S1_DIV_SHIFT |
8458                         bestp1 << DPIO_CHV_P1_DIV_SHIFT |
8459                         bestp2 << DPIO_CHV_P2_DIV_SHIFT |
8460                         1 << DPIO_CHV_K_DIV_SHIFT);
8461
8462         /* Feedback post-divider - m2 */
8463         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
8464
8465         /* Feedback refclk divider - n and m1 */
8466         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
8467                         DPIO_CHV_M1_DIV_BY_2 |
8468                         1 << DPIO_CHV_N_DIV_SHIFT);
8469
8470         /* M2 fraction division */
8471         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
8472
8473         /* M2 fraction division enable */
8474         dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
8475         dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
8476         dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
8477         if (bestm2_frac)
8478                 dpio_val |= DPIO_CHV_FRAC_DIV_EN;
8479         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
8480
8481         /* Program digital lock detect threshold */
8482         dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
8483         dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
8484                                         DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
8485         dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
8486         if (!bestm2_frac)
8487                 dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
8488         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
8489
8490         /* Loop filter */
8491         if (vco == 5400000) {
8492                 loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
8493                 loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
8494                 loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
8495                 tribuf_calcntr = 0x9;
8496         } else if (vco <= 6200000) {
8497                 loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
8498                 loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
8499                 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
8500                 tribuf_calcntr = 0x9;
8501         } else if (vco <= 6480000) {
8502                 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
8503                 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
8504                 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
8505                 tribuf_calcntr = 0x8;
8506         } else {
8507                 /* Not supported. Apply the same limits as in the max case */
8508                 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
8509                 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
8510                 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
8511                 tribuf_calcntr = 0;
8512         }
8513         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
8514
8515         dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
8516         dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
8517         dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
8518         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
8519
8520         /* AFC Recal */
8521         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
8522                         vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
8523                         DPIO_AFC_RECAL);
8524
8525         vlv_dpio_put(dev_priv);
8526 }
8527
8528 /**
8529  * vlv_force_pll_on - forcibly enable just the PLL
8530  * @dev_priv: i915 private structure
8531  * @pipe: pipe PLL to enable
8532  * @dpll: PLL configuration
8533  *
8534  * Enable the PLL for @pipe using the supplied @dpll config. To be used
8535  * in cases where we need the PLL enabled even when @pipe is not going to
8536  * be enabled.
8537  */
8538 int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
8539                      const struct dpll *dpll)
8540 {
8541         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
8542         struct intel_crtc_state *pipe_config;
8543
8544         pipe_config = intel_crtc_state_alloc(crtc);
8545         if (!pipe_config)
8546                 return -ENOMEM;
8547
8548         pipe_config->cpu_transcoder = (enum transcoder)pipe;
8549         pipe_config->pixel_multiplier = 1;
8550         pipe_config->dpll = *dpll;
8551
8552         if (IS_CHERRYVIEW(dev_priv)) {
8553                 chv_compute_dpll(crtc, pipe_config);
8554                 chv_prepare_pll(crtc, pipe_config);
8555                 chv_enable_pll(crtc, pipe_config);
8556         } else {
8557                 vlv_compute_dpll(crtc, pipe_config);
8558                 vlv_prepare_pll(crtc, pipe_config);
8559                 vlv_enable_pll(crtc, pipe_config);
8560         }
8561
8562         kfree(pipe_config);
8563
8564         return 0;
8565 }
8566
8567 /**
8568  * vlv_force_pll_off - forcibly disable just the PLL
8569  * @dev_priv: i915 private structure
8570  * @pipe: pipe PLL to disable
8571  *
8572  * Disable the PLL for @pipe. To be used in cases where we need
8573  * the PLL enabled even when @pipe is not going to be enabled.
8574  */
8575 void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
8576 {
8577         if (IS_CHERRYVIEW(dev_priv))
8578                 chv_disable_pll(dev_priv, pipe);
8579         else
8580                 vlv_disable_pll(dev_priv, pipe);
8581 }
8582
8583 static void i9xx_compute_dpll(struct intel_crtc *crtc,
8584                               struct intel_crtc_state *crtc_state,
8585                               struct dpll *reduced_clock)
8586 {
8587         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8588         u32 dpll;
8589         struct dpll *clock = &crtc_state->dpll;
8590
8591         i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
8592
8593         dpll = DPLL_VGA_MODE_DIS;
8594
8595         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
8596                 dpll |= DPLLB_MODE_LVDS;
8597         else
8598                 dpll |= DPLLB_MODE_DAC_SERIAL;
8599
8600         if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
8601             IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
8602                 dpll |= (crtc_state->pixel_multiplier - 1)
8603                         << SDVO_MULTIPLIER_SHIFT_HIRES;
8604         }
8605
8606         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
8607             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
8608                 dpll |= DPLL_SDVO_HIGH_SPEED;
8609
8610         if (intel_crtc_has_dp_encoder(crtc_state))
8611                 dpll |= DPLL_SDVO_HIGH_SPEED;
8612
8613         /* compute bitmask from p1 value */
8614         if (IS_PINEVIEW(dev_priv))
8615                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
8616         else {
8617                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8618                 if (IS_G4X(dev_priv) && reduced_clock)
8619                         dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
8620         }
8621         switch (clock->p2) {
8622         case 5:
8623                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
8624                 break;
8625         case 7:
8626                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
8627                 break;
8628         case 10:
8629                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
8630                 break;
8631         case 14:
8632                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
8633                 break;
8634         }
8635         if (INTEL_GEN(dev_priv) >= 4)
8636                 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
8637
8638         if (crtc_state->sdvo_tv_clock)
8639                 dpll |= PLL_REF_INPUT_TVCLKINBC;
8640         else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
8641                  intel_panel_use_ssc(dev_priv))
8642                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
8643         else
8644                 dpll |= PLL_REF_INPUT_DREFCLK;
8645
8646         dpll |= DPLL_VCO_ENABLE;
8647         crtc_state->dpll_hw_state.dpll = dpll;
8648
8649         if (INTEL_GEN(dev_priv) >= 4) {
8650                 u32 dpll_md = (crtc_state->pixel_multiplier - 1)
8651                         << DPLL_MD_UDI_MULTIPLIER_SHIFT;
8652                 crtc_state->dpll_hw_state.dpll_md = dpll_md;
8653         }
8654 }
8655
8656 static void i8xx_compute_dpll(struct intel_crtc *crtc,
8657                               struct intel_crtc_state *crtc_state,
8658                               struct dpll *reduced_clock)
8659 {
8660         struct drm_device *dev = crtc->base.dev;
8661         struct drm_i915_private *dev_priv = to_i915(dev);
8662         u32 dpll;
8663         struct dpll *clock = &crtc_state->dpll;
8664
8665         i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
8666
8667         dpll = DPLL_VGA_MODE_DIS;
8668
8669         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8670                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8671         } else {
8672                 if (clock->p1 == 2)
8673                         dpll |= PLL_P1_DIVIDE_BY_TWO;
8674                 else
8675                         dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8676                 if (clock->p2 == 4)
8677                         dpll |= PLL_P2_DIVIDE_BY_4;
8678         }
8679
8680         /*
8681          * Bspec:
8682          * "[Almador Errata}: For the correct operation of the muxed DVO pins
8683          *  (GDEVSELB/I2Cdata, GIRDBY/I2CClk) and (GFRAMEB/DVI_Data,
8684          *  GTRDYB/DVI_Clk): Bit 31 (DPLL VCO Enable) and Bit 30 (2X Clock
8685          *  Enable) must be set to “1” in both the DPLL A Control Register
8686          *  (06014h-06017h) and DPLL B Control Register (06018h-0601Bh)."
8687          *
8688          * For simplicity We simply keep both bits always enabled in
8689          * both DPLLS. The spec says we should disable the DVO 2X clock
8690          * when not needed, but this seems to work fine in practice.
8691          */
8692         if (IS_I830(dev_priv) ||
8693             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
8694                 dpll |= DPLL_DVO_2X_MODE;
8695
8696         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
8697             intel_panel_use_ssc(dev_priv))
8698                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
8699         else
8700                 dpll |= PLL_REF_INPUT_DREFCLK;
8701
8702         dpll |= DPLL_VCO_ENABLE;
8703         crtc_state->dpll_hw_state.dpll = dpll;
8704 }
8705
8706 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state)
8707 {
8708         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8709         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8710         enum pipe pipe = crtc->pipe;
8711         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
8712         const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
8713         u32 crtc_vtotal, crtc_vblank_end;
8714         int vsyncshift = 0;
8715
8716         /* We need to be careful not to changed the adjusted mode, for otherwise
8717          * the hw state checker will get angry at the mismatch. */
8718         crtc_vtotal = adjusted_mode->crtc_vtotal;
8719         crtc_vblank_end = adjusted_mode->crtc_vblank_end;
8720
8721         if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
8722                 /* the chip adds 2 halflines automatically */
8723                 crtc_vtotal -= 1;
8724                 crtc_vblank_end -= 1;
8725
8726                 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
8727                         vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
8728                 else
8729                         vsyncshift = adjusted_mode->crtc_hsync_start -
8730                                 adjusted_mode->crtc_htotal / 2;
8731                 if (vsyncshift < 0)
8732                         vsyncshift += adjusted_mode->crtc_htotal;
8733         }
8734
8735         if (INTEL_GEN(dev_priv) > 3)
8736                 intel_de_write(dev_priv, VSYNCSHIFT(cpu_transcoder),
8737                                vsyncshift);
8738
8739         intel_de_write(dev_priv, HTOTAL(cpu_transcoder),
8740                        (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16));
8741         intel_de_write(dev_priv, HBLANK(cpu_transcoder),
8742                        (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
8743         intel_de_write(dev_priv, HSYNC(cpu_transcoder),
8744                        (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
8745
8746         intel_de_write(dev_priv, VTOTAL(cpu_transcoder),
8747                        (adjusted_mode->crtc_vdisplay - 1) | ((crtc_vtotal - 1) << 16));
8748         intel_de_write(dev_priv, VBLANK(cpu_transcoder),
8749                        (adjusted_mode->crtc_vblank_start - 1) | ((crtc_vblank_end - 1) << 16));
8750         intel_de_write(dev_priv, VSYNC(cpu_transcoder),
8751                        (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
8752
8753         /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
8754          * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
8755          * documented on the DDI_FUNC_CTL register description, EDP Input Select
8756          * bits. */
8757         if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
8758             (pipe == PIPE_B || pipe == PIPE_C))
8759                 intel_de_write(dev_priv, VTOTAL(pipe),
8760                                intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
8761
8762 }
8763
8764 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
8765 {
8766         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8767         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8768         enum pipe pipe = crtc->pipe;
8769
8770         /* pipesrc controls the size that is scaled from, which should
8771          * always be the user's requested size.
8772          */
8773         intel_de_write(dev_priv, PIPESRC(pipe),
8774                        ((crtc_state->pipe_src_w - 1) << 16) | (crtc_state->pipe_src_h - 1));
8775 }
8776
8777 static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
8778 {
8779         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
8780         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
8781
8782         if (IS_GEN(dev_priv, 2))
8783                 return false;
8784
8785         if (INTEL_GEN(dev_priv) >= 9 ||
8786             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
8787                 return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW;
8788         else
8789                 return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK;
8790 }
8791
8792 static void intel_get_pipe_timings(struct intel_crtc *crtc,
8793                                    struct intel_crtc_state *pipe_config)
8794 {
8795         struct drm_device *dev = crtc->base.dev;
8796         struct drm_i915_private *dev_priv = to_i915(dev);
8797         enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
8798         u32 tmp;
8799
8800         tmp = intel_de_read(dev_priv, HTOTAL(cpu_transcoder));
8801         pipe_config->hw.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
8802         pipe_config->hw.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
8803
8804         if (!transcoder_is_dsi(cpu_transcoder)) {
8805                 tmp = intel_de_read(dev_priv, HBLANK(cpu_transcoder));
8806                 pipe_config->hw.adjusted_mode.crtc_hblank_start =
8807                                                         (tmp & 0xffff) + 1;
8808                 pipe_config->hw.adjusted_mode.crtc_hblank_end =
8809                                                 ((tmp >> 16) & 0xffff) + 1;
8810         }
8811         tmp = intel_de_read(dev_priv, HSYNC(cpu_transcoder));
8812         pipe_config->hw.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
8813         pipe_config->hw.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
8814
8815         tmp = intel_de_read(dev_priv, VTOTAL(cpu_transcoder));
8816         pipe_config->hw.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
8817         pipe_config->hw.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
8818
8819         if (!transcoder_is_dsi(cpu_transcoder)) {
8820                 tmp = intel_de_read(dev_priv, VBLANK(cpu_transcoder));
8821                 pipe_config->hw.adjusted_mode.crtc_vblank_start =
8822                                                         (tmp & 0xffff) + 1;
8823                 pipe_config->hw.adjusted_mode.crtc_vblank_end =
8824                                                 ((tmp >> 16) & 0xffff) + 1;
8825         }
8826         tmp = intel_de_read(dev_priv, VSYNC(cpu_transcoder));
8827         pipe_config->hw.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
8828         pipe_config->hw.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
8829
8830         if (intel_pipe_is_interlaced(pipe_config)) {
8831                 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
8832                 pipe_config->hw.adjusted_mode.crtc_vtotal += 1;
8833                 pipe_config->hw.adjusted_mode.crtc_vblank_end += 1;
8834         }
8835 }
8836
8837 static void intel_get_pipe_src_size(struct intel_crtc *crtc,
8838                                     struct intel_crtc_state *pipe_config)
8839 {
8840         struct drm_device *dev = crtc->base.dev;
8841         struct drm_i915_private *dev_priv = to_i915(dev);
8842         u32 tmp;
8843
8844         tmp = intel_de_read(dev_priv, PIPESRC(crtc->pipe));
8845         pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
8846         pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
8847
8848         pipe_config->hw.mode.vdisplay = pipe_config->pipe_src_h;
8849         pipe_config->hw.mode.hdisplay = pipe_config->pipe_src_w;
8850 }
8851
8852 void intel_mode_from_pipe_config(struct drm_display_mode *mode,
8853                                  struct intel_crtc_state *pipe_config)
8854 {
8855         mode->hdisplay = pipe_config->hw.adjusted_mode.crtc_hdisplay;
8856         mode->htotal = pipe_config->hw.adjusted_mode.crtc_htotal;
8857         mode->hsync_start = pipe_config->hw.adjusted_mode.crtc_hsync_start;
8858         mode->hsync_end = pipe_config->hw.adjusted_mode.crtc_hsync_end;
8859
8860         mode->vdisplay = pipe_config->hw.adjusted_mode.crtc_vdisplay;
8861         mode->vtotal = pipe_config->hw.adjusted_mode.crtc_vtotal;
8862         mode->vsync_start = pipe_config->hw.adjusted_mode.crtc_vsync_start;
8863         mode->vsync_end = pipe_config->hw.adjusted_mode.crtc_vsync_end;
8864
8865         mode->flags = pipe_config->hw.adjusted_mode.flags;
8866         mode->type = DRM_MODE_TYPE_DRIVER;
8867
8868         mode->clock = pipe_config->hw.adjusted_mode.crtc_clock;
8869
8870         mode->hsync = drm_mode_hsync(mode);
8871         mode->vrefresh = drm_mode_vrefresh(mode);
8872         drm_mode_set_name(mode);
8873 }
8874
8875 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
8876 {
8877         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8878         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8879         u32 pipeconf;
8880
8881         pipeconf = 0;
8882
8883         /* we keep both pipes enabled on 830 */
8884         if (IS_I830(dev_priv))
8885                 pipeconf |= intel_de_read(dev_priv, PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
8886
8887         if (crtc_state->double_wide)
8888                 pipeconf |= PIPECONF_DOUBLE_WIDE;
8889
8890         /* only g4x and later have fancy bpc/dither controls */
8891         if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
8892             IS_CHERRYVIEW(dev_priv)) {
8893                 /* Bspec claims that we can't use dithering for 30bpp pipes. */
8894                 if (crtc_state->dither && crtc_state->pipe_bpp != 30)
8895                         pipeconf |= PIPECONF_DITHER_EN |
8896                                     PIPECONF_DITHER_TYPE_SP;
8897
8898                 switch (crtc_state->pipe_bpp) {
8899                 case 18:
8900                         pipeconf |= PIPECONF_6BPC;
8901                         break;
8902                 case 24:
8903                         pipeconf |= PIPECONF_8BPC;
8904                         break;
8905                 case 30:
8906                         pipeconf |= PIPECONF_10BPC;
8907                         break;
8908                 default:
8909                         /* Case prevented by intel_choose_pipe_bpp_dither. */
8910                         BUG();
8911                 }
8912         }
8913
8914         if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
8915                 if (INTEL_GEN(dev_priv) < 4 ||
8916                     intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
8917                         pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
8918                 else
8919                         pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
8920         } else {
8921                 pipeconf |= PIPECONF_PROGRESSIVE;
8922         }
8923
8924         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
8925              crtc_state->limited_color_range)
8926                 pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
8927
8928         pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
8929
8930         pipeconf |= PIPECONF_FRAME_START_DELAY(0);
8931
8932         intel_de_write(dev_priv, PIPECONF(crtc->pipe), pipeconf);
8933         intel_de_posting_read(dev_priv, PIPECONF(crtc->pipe));
8934 }
8935
8936 static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
8937                                    struct intel_crtc_state *crtc_state)
8938 {
8939         struct drm_device *dev = crtc->base.dev;
8940         struct drm_i915_private *dev_priv = to_i915(dev);
8941         const struct intel_limit *limit;
8942         int refclk = 48000;
8943
8944         memset(&crtc_state->dpll_hw_state, 0,
8945                sizeof(crtc_state->dpll_hw_state));
8946
8947         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8948                 if (intel_panel_use_ssc(dev_priv)) {
8949                         refclk = dev_priv->vbt.lvds_ssc_freq;
8950                         drm_dbg_kms(&dev_priv->drm,
8951                                     "using SSC reference clock of %d kHz\n",
8952                                     refclk);
8953                 }
8954
8955                 limit = &intel_limits_i8xx_lvds;
8956         } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) {
8957                 limit = &intel_limits_i8xx_dvo;
8958         } else {
8959                 limit = &intel_limits_i8xx_dac;
8960         }
8961
8962         if (!crtc_state->clock_set &&
8963             !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8964                                  refclk, NULL, &crtc_state->dpll)) {
8965                 drm_err(&dev_priv->drm,
8966                         "Couldn't find PLL settings for mode!\n");
8967                 return -EINVAL;
8968         }
8969
8970         i8xx_compute_dpll(crtc, crtc_state, NULL);
8971
8972         return 0;
8973 }
8974
8975 static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
8976                                   struct intel_crtc_state *crtc_state)
8977 {
8978         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8979         const struct intel_limit *limit;
8980         int refclk = 96000;
8981
8982         memset(&crtc_state->dpll_hw_state, 0,
8983                sizeof(crtc_state->dpll_hw_state));
8984
8985         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8986                 if (intel_panel_use_ssc(dev_priv)) {
8987                         refclk = dev_priv->vbt.lvds_ssc_freq;
8988                         drm_dbg_kms(&dev_priv->drm,
8989                                     "using SSC reference clock of %d kHz\n",
8990                                     refclk);
8991                 }
8992
8993                 if (intel_is_dual_link_lvds(dev_priv))
8994                         limit = &intel_limits_g4x_dual_channel_lvds;
8995                 else
8996                         limit = &intel_limits_g4x_single_channel_lvds;
8997         } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
8998                    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
8999                 limit = &intel_limits_g4x_hdmi;
9000         } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) {
9001                 limit = &intel_limits_g4x_sdvo;
9002         } else {
9003                 /* The option is for other outputs */
9004                 limit = &intel_limits_i9xx_sdvo;
9005         }
9006
9007         if (!crtc_state->clock_set &&
9008             !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
9009                                 refclk, NULL, &crtc_state->dpll)) {
9010                 drm_err(&dev_priv->drm,
9011                         "Couldn't find PLL settings for mode!\n");
9012                 return -EINVAL;
9013         }
9014
9015         i9xx_compute_dpll(crtc, crtc_state, NULL);
9016
9017         return 0;
9018 }
9019
9020 static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
9021                                   struct intel_crtc_state *crtc_state)
9022 {
9023         struct drm_device *dev = crtc->base.dev;
9024         struct drm_i915_private *dev_priv = to_i915(dev);
9025         const struct intel_limit *limit;
9026         int refclk = 96000;
9027
9028         memset(&crtc_state->dpll_hw_state, 0,
9029                sizeof(crtc_state->dpll_hw_state));
9030
9031         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
9032                 if (intel_panel_use_ssc(dev_priv)) {
9033                         refclk = dev_priv->vbt.lvds_ssc_freq;
9034                         drm_dbg_kms(&dev_priv->drm,
9035                                     "using SSC reference clock of %d kHz\n",
9036                                     refclk);
9037                 }
9038
9039                 limit = &pnv_limits_lvds;
9040         } else {
9041                 limit = &pnv_limits_sdvo;
9042         }
9043
9044         if (!crtc_state->clock_set &&
9045             !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
9046                                 refclk, NULL, &crtc_state->dpll)) {
9047                 drm_err(&dev_priv->drm,
9048                         "Couldn't find PLL settings for mode!\n");
9049                 return -EINVAL;
9050         }
9051
9052         i9xx_compute_dpll(crtc, crtc_state, NULL);
9053
9054         return 0;
9055 }
9056
9057 static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
9058                                    struct intel_crtc_state *crtc_state)
9059 {
9060         struct drm_device *dev = crtc->base.dev;
9061         struct drm_i915_private *dev_priv = to_i915(dev);
9062         const struct intel_limit *limit;
9063         int refclk = 96000;
9064
9065         memset(&crtc_state->dpll_hw_state, 0,
9066                sizeof(crtc_state->dpll_hw_state));
9067
9068         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
9069                 if (intel_panel_use_ssc(dev_priv)) {
9070                         refclk = dev_priv->vbt.lvds_ssc_freq;
9071                         drm_dbg_kms(&dev_priv->drm,
9072                                     "using SSC reference clock of %d kHz\n",
9073                                     refclk);
9074                 }
9075
9076                 limit = &intel_limits_i9xx_lvds;
9077         } else {
9078                 limit = &intel_limits_i9xx_sdvo;
9079         }
9080
9081         if (!crtc_state->clock_set &&
9082             !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
9083                                  refclk, NULL, &crtc_state->dpll)) {
9084                 drm_err(&dev_priv->drm,
9085                         "Couldn't find PLL settings for mode!\n");
9086                 return -EINVAL;
9087         }
9088
9089         i9xx_compute_dpll(crtc, crtc_state, NULL);
9090
9091         return 0;
9092 }
9093
9094 static int chv_crtc_compute_clock(struct intel_crtc *crtc,
9095                                   struct intel_crtc_state *crtc_state)
9096 {
9097         int refclk = 100000;
9098         const struct intel_limit *limit = &intel_limits_chv;
9099         struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
9100
9101         memset(&crtc_state->dpll_hw_state, 0,
9102                sizeof(crtc_state->dpll_hw_state));
9103
9104         if (!crtc_state->clock_set &&
9105             !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
9106                                 refclk, NULL, &crtc_state->dpll)) {
9107                 drm_err(&i915->drm, "Couldn't find PLL settings for mode!\n");
9108                 return -EINVAL;
9109         }
9110
9111         chv_compute_dpll(crtc, crtc_state);
9112
9113         return 0;
9114 }
9115
9116 static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
9117                                   struct intel_crtc_state *crtc_state)
9118 {
9119         int refclk = 100000;
9120         const struct intel_limit *limit = &intel_limits_vlv;
9121         struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
9122
9123         memset(&crtc_state->dpll_hw_state, 0,
9124                sizeof(crtc_state->dpll_hw_state));
9125
9126         if (!crtc_state->clock_set &&
9127             !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
9128                                 refclk, NULL, &crtc_state->dpll)) {
9129                 drm_err(&i915->drm,  "Couldn't find PLL settings for mode!\n");
9130                 return -EINVAL;
9131         }
9132
9133         vlv_compute_dpll(crtc, crtc_state);
9134
9135         return 0;
9136 }
9137
9138 static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
9139 {
9140         if (IS_I830(dev_priv))
9141                 return false;
9142
9143         return INTEL_GEN(dev_priv) >= 4 ||
9144                 IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
9145 }
9146
9147 static void i9xx_get_pfit_config(struct intel_crtc *crtc,
9148                                  struct intel_crtc_state *pipe_config)
9149 {
9150         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9151         u32 tmp;
9152
9153         if (!i9xx_has_pfit(dev_priv))
9154                 return;
9155
9156         tmp = intel_de_read(dev_priv, PFIT_CONTROL);
9157         if (!(tmp & PFIT_ENABLE))
9158                 return;
9159
9160         /* Check whether the pfit is attached to our pipe. */
9161         if (INTEL_GEN(dev_priv) < 4) {
9162                 if (crtc->pipe != PIPE_B)
9163                         return;
9164         } else {
9165                 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
9166                         return;
9167         }
9168
9169         pipe_config->gmch_pfit.control = tmp;
9170         pipe_config->gmch_pfit.pgm_ratios = intel_de_read(dev_priv,
9171                                                           PFIT_PGM_RATIOS);
9172 }
9173
9174 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
9175                                struct intel_crtc_state *pipe_config)
9176 {
9177         struct drm_device *dev = crtc->base.dev;
9178         struct drm_i915_private *dev_priv = to_i915(dev);
9179         enum pipe pipe = crtc->pipe;
9180         struct dpll clock;
9181         u32 mdiv;
9182         int refclk = 100000;
9183
9184         /* In case of DSI, DPLL will not be used */
9185         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
9186                 return;
9187
9188         vlv_dpio_get(dev_priv);
9189         mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
9190         vlv_dpio_put(dev_priv);
9191
9192         clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
9193         clock.m2 = mdiv & DPIO_M2DIV_MASK;
9194         clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
9195         clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
9196         clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
9197
9198         pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
9199 }
9200
9201 static void
9202 i9xx_get_initial_plane_config(struct intel_crtc *crtc,
9203                               struct intel_initial_plane_config *plane_config)
9204 {
9205         struct drm_device *dev = crtc->base.dev;
9206         struct drm_i915_private *dev_priv = to_i915(dev);
9207         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
9208         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
9209         enum pipe pipe;
9210         u32 val, base, offset;
9211         int fourcc, pixel_format;
9212         unsigned int aligned_height;
9213         struct drm_framebuffer *fb;
9214         struct intel_framebuffer *intel_fb;
9215
9216         if (!plane->get_hw_state(plane, &pipe))
9217                 return;
9218
9219         WARN_ON(pipe != crtc->pipe);
9220
9221         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
9222         if (!intel_fb) {
9223                 drm_dbg_kms(&dev_priv->drm, "failed to alloc fb\n");
9224                 return;
9225         }
9226
9227         fb = &intel_fb->base;
9228
9229         fb->dev = dev;
9230
9231         val = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
9232
9233         if (INTEL_GEN(dev_priv) >= 4) {
9234                 if (val & DISPPLANE_TILED) {
9235                         plane_config->tiling = I915_TILING_X;
9236                         fb->modifier = I915_FORMAT_MOD_X_TILED;
9237                 }
9238
9239                 if (val & DISPPLANE_ROTATE_180)
9240                         plane_config->rotation = DRM_MODE_ROTATE_180;
9241         }
9242
9243         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B &&
9244             val & DISPPLANE_MIRROR)
9245                 plane_config->rotation |= DRM_MODE_REFLECT_X;
9246
9247         pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
9248         fourcc = i9xx_format_to_fourcc(pixel_format);
9249         fb->format = drm_format_info(fourcc);
9250
9251         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
9252                 offset = intel_de_read(dev_priv, DSPOFFSET(i9xx_plane));
9253                 base = intel_de_read(dev_priv, DSPSURF(i9xx_plane)) & 0xfffff000;
9254         } else if (INTEL_GEN(dev_priv) >= 4) {
9255                 if (plane_config->tiling)
9256                         offset = intel_de_read(dev_priv,
9257                                                DSPTILEOFF(i9xx_plane));
9258                 else
9259                         offset = intel_de_read(dev_priv,
9260                                                DSPLINOFF(i9xx_plane));
9261                 base = intel_de_read(dev_priv, DSPSURF(i9xx_plane)) & 0xfffff000;
9262         } else {
9263                 base = intel_de_read(dev_priv, DSPADDR(i9xx_plane));
9264         }
9265         plane_config->base = base;
9266
9267         val = intel_de_read(dev_priv, PIPESRC(pipe));
9268         fb->width = ((val >> 16) & 0xfff) + 1;
9269         fb->height = ((val >> 0) & 0xfff) + 1;
9270
9271         val = intel_de_read(dev_priv, DSPSTRIDE(i9xx_plane));
9272         fb->pitches[0] = val & 0xffffffc0;
9273
9274         aligned_height = intel_fb_align_height(fb, 0, fb->height);
9275
9276         plane_config->size = fb->pitches[0] * aligned_height;
9277
9278         drm_dbg_kms(&dev_priv->drm,
9279                     "%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
9280                     crtc->base.name, plane->base.name, fb->width, fb->height,
9281                     fb->format->cpp[0] * 8, base, fb->pitches[0],
9282                     plane_config->size);
9283
9284         plane_config->fb = intel_fb;
9285 }
9286
9287 static void chv_crtc_clock_get(struct intel_crtc *crtc,
9288                                struct intel_crtc_state *pipe_config)
9289 {
9290         struct drm_device *dev = crtc->base.dev;
9291         struct drm_i915_private *dev_priv = to_i915(dev);
9292         enum pipe pipe = crtc->pipe;
9293         enum dpio_channel port = vlv_pipe_to_channel(pipe);
9294         struct dpll clock;
9295         u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
9296         int refclk = 100000;
9297
9298         /* In case of DSI, DPLL will not be used */
9299         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
9300                 return;
9301
9302         vlv_dpio_get(dev_priv);
9303         cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
9304         pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
9305         pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
9306         pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
9307         pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
9308         vlv_dpio_put(dev_priv);
9309
9310         clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
9311         clock.m2 = (pll_dw0 & 0xff) << 22;
9312         if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
9313                 clock.m2 |= pll_dw2 & 0x3fffff;
9314         clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
9315         clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
9316         clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
9317
9318         pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
9319 }
9320
9321 static enum intel_output_format
9322 bdw_get_pipemisc_output_format(struct intel_crtc *crtc)
9323 {
9324         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9325         u32 tmp;
9326
9327         tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
9328
9329         if (tmp & PIPEMISC_YUV420_ENABLE) {
9330                 /* We support 4:2:0 in full blend mode only */
9331                 WARN_ON((tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0);
9332
9333                 return INTEL_OUTPUT_FORMAT_YCBCR420;
9334         } else if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
9335                 return INTEL_OUTPUT_FORMAT_YCBCR444;
9336         } else {
9337                 return INTEL_OUTPUT_FORMAT_RGB;
9338         }
9339 }
9340
9341 static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
9342 {
9343         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
9344         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
9345         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9346         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
9347         u32 tmp;
9348
9349         tmp = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
9350
9351         if (tmp & DISPPLANE_GAMMA_ENABLE)
9352                 crtc_state->gamma_enable = true;
9353
9354         if (!HAS_GMCH(dev_priv) &&
9355             tmp & DISPPLANE_PIPE_CSC_ENABLE)
9356                 crtc_state->csc_enable = true;
9357 }
9358
9359 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
9360                                  struct intel_crtc_state *pipe_config)
9361 {
9362         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9363         enum intel_display_power_domain power_domain;
9364         intel_wakeref_t wakeref;
9365         u32 tmp;
9366         bool ret;
9367
9368         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
9369         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
9370         if (!wakeref)
9371                 return false;
9372
9373         pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
9374         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9375         pipe_config->shared_dpll = NULL;
9376         pipe_config->master_transcoder = INVALID_TRANSCODER;
9377
9378         ret = false;
9379
9380         tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
9381         if (!(tmp & PIPECONF_ENABLE))
9382                 goto out;
9383
9384         if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
9385             IS_CHERRYVIEW(dev_priv)) {
9386                 switch (tmp & PIPECONF_BPC_MASK) {
9387                 case PIPECONF_6BPC:
9388                         pipe_config->pipe_bpp = 18;
9389                         break;
9390                 case PIPECONF_8BPC:
9391                         pipe_config->pipe_bpp = 24;
9392                         break;
9393                 case PIPECONF_10BPC:
9394                         pipe_config->pipe_bpp = 30;
9395                         break;
9396                 default:
9397                         break;
9398                 }
9399         }
9400
9401         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
9402             (tmp & PIPECONF_COLOR_RANGE_SELECT))
9403                 pipe_config->limited_color_range = true;
9404
9405         pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_I9XX) >>
9406                 PIPECONF_GAMMA_MODE_SHIFT;
9407
9408         if (IS_CHERRYVIEW(dev_priv))
9409                 pipe_config->cgm_mode = intel_de_read(dev_priv,
9410                                                       CGM_PIPE_MODE(crtc->pipe));
9411
9412         i9xx_get_pipe_color_config(pipe_config);
9413         intel_color_get_config(pipe_config);
9414
9415         if (INTEL_GEN(dev_priv) < 4)
9416                 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
9417
9418         intel_get_pipe_timings(crtc, pipe_config);
9419         intel_get_pipe_src_size(crtc, pipe_config);
9420
9421         i9xx_get_pfit_config(crtc, pipe_config);
9422
9423         if (INTEL_GEN(dev_priv) >= 4) {
9424                 /* No way to read it out on pipes B and C */
9425                 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
9426                         tmp = dev_priv->chv_dpll_md[crtc->pipe];
9427                 else
9428                         tmp = intel_de_read(dev_priv, DPLL_MD(crtc->pipe));
9429                 pipe_config->pixel_multiplier =
9430                         ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
9431                          >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
9432                 pipe_config->dpll_hw_state.dpll_md = tmp;
9433         } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
9434                    IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
9435                 tmp = intel_de_read(dev_priv, DPLL(crtc->pipe));
9436                 pipe_config->pixel_multiplier =
9437                         ((tmp & SDVO_MULTIPLIER_MASK)
9438                          >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
9439         } else {
9440                 /* Note that on i915G/GM the pixel multiplier is in the sdvo
9441                  * port and will be fixed up in the encoder->get_config
9442                  * function. */
9443                 pipe_config->pixel_multiplier = 1;
9444         }
9445         pipe_config->dpll_hw_state.dpll = intel_de_read(dev_priv,
9446                                                         DPLL(crtc->pipe));
9447         if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
9448                 pipe_config->dpll_hw_state.fp0 = intel_de_read(dev_priv,
9449                                                                FP0(crtc->pipe));
9450                 pipe_config->dpll_hw_state.fp1 = intel_de_read(dev_priv,
9451                                                                FP1(crtc->pipe));
9452         } else {
9453                 /* Mask out read-only status bits. */
9454                 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
9455                                                      DPLL_PORTC_READY_MASK |
9456                                                      DPLL_PORTB_READY_MASK);
9457         }
9458
9459         if (IS_CHERRYVIEW(dev_priv))
9460                 chv_crtc_clock_get(crtc, pipe_config);
9461         else if (IS_VALLEYVIEW(dev_priv))
9462                 vlv_crtc_clock_get(crtc, pipe_config);
9463         else
9464                 i9xx_crtc_clock_get(crtc, pipe_config);
9465
9466         /*
9467          * Normally the dotclock is filled in by the encoder .get_config()
9468          * but in case the pipe is enabled w/o any ports we need a sane
9469          * default.
9470          */
9471         pipe_config->hw.adjusted_mode.crtc_clock =
9472                 pipe_config->port_clock / pipe_config->pixel_multiplier;
9473
9474         ret = true;
9475
9476 out:
9477         intel_display_power_put(dev_priv, power_domain, wakeref);
9478
9479         return ret;
9480 }
9481
9482 static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
9483 {
9484         struct intel_encoder *encoder;
9485         int i;
9486         u32 val, final;
9487         bool has_lvds = false;
9488         bool has_cpu_edp = false;
9489         bool has_panel = false;
9490         bool has_ck505 = false;
9491         bool can_ssc = false;
9492         bool using_ssc_source = false;
9493
9494         /* We need to take the global config into account */
9495         for_each_intel_encoder(&dev_priv->drm, encoder) {
9496                 switch (encoder->type) {
9497                 case INTEL_OUTPUT_LVDS:
9498                         has_panel = true;
9499                         has_lvds = true;
9500                         break;
9501                 case INTEL_OUTPUT_EDP:
9502                         has_panel = true;
9503                         if (encoder->port == PORT_A)
9504                                 has_cpu_edp = true;
9505                         break;
9506                 default:
9507                         break;
9508                 }
9509         }
9510
9511         if (HAS_PCH_IBX(dev_priv)) {
9512                 has_ck505 = dev_priv->vbt.display_clock_mode;
9513                 can_ssc = has_ck505;
9514         } else {
9515                 has_ck505 = false;
9516                 can_ssc = true;
9517         }
9518
9519         /* Check if any DPLLs are using the SSC source */
9520         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
9521                 u32 temp = intel_de_read(dev_priv, PCH_DPLL(i));
9522
9523                 if (!(temp & DPLL_VCO_ENABLE))
9524                         continue;
9525
9526                 if ((temp & PLL_REF_INPUT_MASK) ==
9527                     PLLB_REF_INPUT_SPREADSPECTRUMIN) {
9528                         using_ssc_source = true;
9529                         break;
9530                 }
9531         }
9532
9533         drm_dbg_kms(&dev_priv->drm,
9534                     "has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
9535                     has_panel, has_lvds, has_ck505, using_ssc_source);
9536
9537         /* Ironlake: try to setup display ref clock before DPLL
9538          * enabling. This is only under driver's control after
9539          * PCH B stepping, previous chipset stepping should be
9540          * ignoring this setting.
9541          */
9542         val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
9543
9544         /* As we must carefully and slowly disable/enable each source in turn,
9545          * compute the final state we want first and check if we need to
9546          * make any changes at all.
9547          */
9548         final = val;
9549         final &= ~DREF_NONSPREAD_SOURCE_MASK;
9550         if (has_ck505)
9551                 final |= DREF_NONSPREAD_CK505_ENABLE;
9552         else
9553                 final |= DREF_NONSPREAD_SOURCE_ENABLE;
9554
9555         final &= ~DREF_SSC_SOURCE_MASK;
9556         final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
9557         final &= ~DREF_SSC1_ENABLE;
9558
9559         if (has_panel) {
9560                 final |= DREF_SSC_SOURCE_ENABLE;
9561
9562                 if (intel_panel_use_ssc(dev_priv) && can_ssc)
9563                         final |= DREF_SSC1_ENABLE;
9564
9565                 if (has_cpu_edp) {
9566                         if (intel_panel_use_ssc(dev_priv) && can_ssc)
9567                                 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
9568                         else
9569                                 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
9570                 } else
9571                         final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
9572         } else if (using_ssc_source) {
9573                 final |= DREF_SSC_SOURCE_ENABLE;
9574                 final |= DREF_SSC1_ENABLE;
9575         }
9576
9577         if (final == val)
9578                 return;
9579
9580         /* Always enable nonspread source */
9581         val &= ~DREF_NONSPREAD_SOURCE_MASK;
9582
9583         if (has_ck505)
9584                 val |= DREF_NONSPREAD_CK505_ENABLE;
9585         else
9586                 val |= DREF_NONSPREAD_SOURCE_ENABLE;
9587
9588         if (has_panel) {
9589                 val &= ~DREF_SSC_SOURCE_MASK;
9590                 val |= DREF_SSC_SOURCE_ENABLE;
9591
9592                 /* SSC must be turned on before enabling the CPU output  */
9593                 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
9594                         drm_dbg_kms(&dev_priv->drm, "Using SSC on panel\n");
9595                         val |= DREF_SSC1_ENABLE;
9596                 } else
9597                         val &= ~DREF_SSC1_ENABLE;
9598
9599                 /* Get SSC going before enabling the outputs */
9600                 intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
9601                 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
9602                 udelay(200);
9603
9604                 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
9605
9606                 /* Enable CPU source on CPU attached eDP */
9607                 if (has_cpu_edp) {
9608                         if (intel_panel_use_ssc(dev_priv) && can_ssc) {
9609                                 drm_dbg_kms(&dev_priv->drm,
9610                                             "Using SSC on eDP\n");
9611                                 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
9612                         } else
9613                                 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
9614                 } else
9615                         val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
9616
9617                 intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
9618                 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
9619                 udelay(200);
9620         } else {
9621                 drm_dbg_kms(&dev_priv->drm, "Disabling CPU source output\n");
9622
9623                 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
9624
9625                 /* Turn off CPU output */
9626                 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
9627
9628                 intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
9629                 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
9630                 udelay(200);
9631
9632                 if (!using_ssc_source) {
9633                         drm_dbg_kms(&dev_priv->drm, "Disabling SSC source\n");
9634
9635                         /* Turn off the SSC source */
9636                         val &= ~DREF_SSC_SOURCE_MASK;
9637                         val |= DREF_SSC_SOURCE_DISABLE;
9638
9639                         /* Turn off SSC1 */
9640                         val &= ~DREF_SSC1_ENABLE;
9641
9642                         intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
9643                         intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
9644                         udelay(200);
9645                 }
9646         }
9647
9648         BUG_ON(val != final);
9649 }
9650
9651 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
9652 {
9653         u32 tmp;
9654
9655         tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
9656         tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
9657         intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
9658
9659         if (wait_for_us(intel_de_read(dev_priv, SOUTH_CHICKEN2) &
9660                         FDI_MPHY_IOSFSB_RESET_STATUS, 100))
9661                 drm_err(&dev_priv->drm, "FDI mPHY reset assert timeout\n");
9662
9663         tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
9664         tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
9665         intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
9666
9667         if (wait_for_us((intel_de_read(dev_priv, SOUTH_CHICKEN2) &
9668                          FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
9669                 drm_err(&dev_priv->drm, "FDI mPHY reset de-assert timeout\n");
9670 }
9671
9672 /* WaMPhyProgramming:hsw */
9673 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
9674 {
9675         u32 tmp;
9676
9677         tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
9678         tmp &= ~(0xFF << 24);
9679         tmp |= (0x12 << 24);
9680         intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
9681
9682         tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
9683         tmp |= (1 << 11);
9684         intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
9685
9686         tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
9687         tmp |= (1 << 11);
9688         intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
9689
9690         tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
9691         tmp |= (1 << 24) | (1 << 21) | (1 << 18);
9692         intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
9693
9694         tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
9695         tmp |= (1 << 24) | (1 << 21) | (1 << 18);
9696         intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
9697
9698         tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
9699         tmp &= ~(7 << 13);
9700         tmp |= (5 << 13);
9701         intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
9702
9703         tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
9704         tmp &= ~(7 << 13);
9705         tmp |= (5 << 13);
9706         intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
9707
9708         tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
9709         tmp &= ~0xFF;
9710         tmp |= 0x1C;
9711         intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
9712
9713         tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
9714         tmp &= ~0xFF;
9715         tmp |= 0x1C;
9716         intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
9717
9718         tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
9719         tmp &= ~(0xFF << 16);
9720         tmp |= (0x1C << 16);
9721         intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
9722
9723         tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
9724         tmp &= ~(0xFF << 16);
9725         tmp |= (0x1C << 16);
9726         intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
9727
9728         tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
9729         tmp |= (1 << 27);
9730         intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
9731
9732         tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
9733         tmp |= (1 << 27);
9734         intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
9735
9736         tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
9737         tmp &= ~(0xF << 28);
9738         tmp |= (4 << 28);
9739         intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
9740
9741         tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
9742         tmp &= ~(0xF << 28);
9743         tmp |= (4 << 28);
9744         intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
9745 }
9746
9747 /* Implements 3 different sequences from BSpec chapter "Display iCLK
9748  * Programming" based on the parameters passed:
9749  * - Sequence to enable CLKOUT_DP
9750  * - Sequence to enable CLKOUT_DP without spread
9751  * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
9752  */
9753 static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
9754                                  bool with_spread, bool with_fdi)
9755 {
9756         u32 reg, tmp;
9757
9758         if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
9759                 with_spread = true;
9760         if (WARN(HAS_PCH_LPT_LP(dev_priv) &&
9761             with_fdi, "LP PCH doesn't have FDI\n"))
9762                 with_fdi = false;
9763
9764         mutex_lock(&dev_priv->sb_lock);
9765
9766         tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
9767         tmp &= ~SBI_SSCCTL_DISABLE;
9768         tmp |= SBI_SSCCTL_PATHALT;
9769         intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9770
9771         udelay(24);
9772
9773         if (with_spread) {
9774                 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
9775                 tmp &= ~SBI_SSCCTL_PATHALT;
9776                 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9777
9778                 if (with_fdi) {
9779                         lpt_reset_fdi_mphy(dev_priv);
9780                         lpt_program_fdi_mphy(dev_priv);
9781                 }
9782         }
9783
9784         reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
9785         tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
9786         tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
9787         intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
9788
9789         mutex_unlock(&dev_priv->sb_lock);
9790 }
9791
9792 /* Sequence to disable CLKOUT_DP */
9793 void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
9794 {
9795         u32 reg, tmp;
9796
9797         mutex_lock(&dev_priv->sb_lock);
9798
9799         reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
9800         tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
9801         tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
9802         intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
9803
9804         tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
9805         if (!(tmp & SBI_SSCCTL_DISABLE)) {
9806                 if (!(tmp & SBI_SSCCTL_PATHALT)) {
9807                         tmp |= SBI_SSCCTL_PATHALT;
9808                         intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9809                         udelay(32);
9810                 }
9811                 tmp |= SBI_SSCCTL_DISABLE;
9812                 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9813         }
9814
9815         mutex_unlock(&dev_priv->sb_lock);
9816 }
9817
9818 #define BEND_IDX(steps) ((50 + (steps)) / 5)
9819
9820 static const u16 sscdivintphase[] = {
9821         [BEND_IDX( 50)] = 0x3B23,
9822         [BEND_IDX( 45)] = 0x3B23,
9823         [BEND_IDX( 40)] = 0x3C23,
9824         [BEND_IDX( 35)] = 0x3C23,
9825         [BEND_IDX( 30)] = 0x3D23,
9826         [BEND_IDX( 25)] = 0x3D23,
9827         [BEND_IDX( 20)] = 0x3E23,
9828         [BEND_IDX( 15)] = 0x3E23,
9829         [BEND_IDX( 10)] = 0x3F23,
9830         [BEND_IDX(  5)] = 0x3F23,
9831         [BEND_IDX(  0)] = 0x0025,
9832         [BEND_IDX( -5)] = 0x0025,
9833         [BEND_IDX(-10)] = 0x0125,
9834         [BEND_IDX(-15)] = 0x0125,
9835         [BEND_IDX(-20)] = 0x0225,
9836         [BEND_IDX(-25)] = 0x0225,
9837         [BEND_IDX(-30)] = 0x0325,
9838         [BEND_IDX(-35)] = 0x0325,
9839         [BEND_IDX(-40)] = 0x0425,
9840         [BEND_IDX(-45)] = 0x0425,
9841         [BEND_IDX(-50)] = 0x0525,
9842 };
9843
9844 /*
9845  * Bend CLKOUT_DP
9846  * steps -50 to 50 inclusive, in steps of 5
9847  * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
9848  * change in clock period = -(steps / 10) * 5.787 ps
9849  */
9850 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
9851 {
9852         u32 tmp;
9853         int idx = BEND_IDX(steps);
9854
9855         if (WARN_ON(steps % 5 != 0))
9856                 return;
9857
9858         if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase)))
9859                 return;
9860
9861         mutex_lock(&dev_priv->sb_lock);
9862
9863         if (steps % 10 != 0)
9864                 tmp = 0xAAAAAAAB;
9865         else
9866                 tmp = 0x00000000;
9867         intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
9868
9869         tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
9870         tmp &= 0xffff0000;
9871         tmp |= sscdivintphase[idx];
9872         intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
9873
9874         mutex_unlock(&dev_priv->sb_lock);
9875 }
9876
9877 #undef BEND_IDX
9878
9879 static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv)
9880 {
9881         u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
9882         u32 ctl = intel_de_read(dev_priv, SPLL_CTL);
9883
9884         if ((ctl & SPLL_PLL_ENABLE) == 0)
9885                 return false;
9886
9887         if ((ctl & SPLL_REF_MASK) == SPLL_REF_MUXED_SSC &&
9888             (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
9889                 return true;
9890
9891         if (IS_BROADWELL(dev_priv) &&
9892             (ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW)
9893                 return true;
9894
9895         return false;
9896 }
9897
9898 static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
9899                                enum intel_dpll_id id)
9900 {
9901         u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
9902         u32 ctl = intel_de_read(dev_priv, WRPLL_CTL(id));
9903
9904         if ((ctl & WRPLL_PLL_ENABLE) == 0)
9905                 return false;
9906
9907         if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC)
9908                 return true;
9909
9910         if ((IS_BROADWELL(dev_priv) || IS_HSW_ULT(dev_priv)) &&
9911             (ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW &&
9912             (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
9913                 return true;
9914
9915         return false;
9916 }
9917
9918 static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
9919 {
9920         struct intel_encoder *encoder;
9921         bool has_fdi = false;
9922
9923         for_each_intel_encoder(&dev_priv->drm, encoder) {
9924                 switch (encoder->type) {
9925                 case INTEL_OUTPUT_ANALOG:
9926                         has_fdi = true;
9927                         break;
9928                 default:
9929                         break;
9930                 }
9931         }
9932
9933         /*
9934          * The BIOS may have decided to use the PCH SSC
9935          * reference so we must not disable it until the
9936          * relevant PLLs have stopped relying on it. We'll
9937          * just leave the PCH SSC reference enabled in case
9938          * any active PLL is using it. It will get disabled
9939          * after runtime suspend if we don't have FDI.
9940          *
9941          * TODO: Move the whole reference clock handling
9942          * to the modeset sequence proper so that we can
9943          * actually enable/disable/reconfigure these things
9944          * safely. To do that we need to introduce a real
9945          * clock hierarchy. That would also allow us to do
9946          * clock bending finally.
9947          */
9948         dev_priv->pch_ssc_use = 0;
9949
9950         if (spll_uses_pch_ssc(dev_priv)) {
9951                 drm_dbg_kms(&dev_priv->drm, "SPLL using PCH SSC\n");
9952                 dev_priv->pch_ssc_use |= BIT(DPLL_ID_SPLL);
9953         }
9954
9955         if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) {
9956                 drm_dbg_kms(&dev_priv->drm, "WRPLL1 using PCH SSC\n");
9957                 dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL1);
9958         }
9959
9960         if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) {
9961                 drm_dbg_kms(&dev_priv->drm, "WRPLL2 using PCH SSC\n");
9962                 dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL2);
9963         }
9964
9965         if (dev_priv->pch_ssc_use)
9966                 return;
9967
9968         if (has_fdi) {
9969                 lpt_bend_clkout_dp(dev_priv, 0);
9970                 lpt_enable_clkout_dp(dev_priv, true, true);
9971         } else {
9972                 lpt_disable_clkout_dp(dev_priv);
9973         }
9974 }
9975
9976 /*
9977  * Initialize reference clocks when the driver loads
9978  */
9979 void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
9980 {
9981         if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
9982                 ilk_init_pch_refclk(dev_priv);
9983         else if (HAS_PCH_LPT(dev_priv))
9984                 lpt_init_pch_refclk(dev_priv);
9985 }
9986
9987 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
9988 {
9989         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
9990         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9991         enum pipe pipe = crtc->pipe;
9992         u32 val;
9993
9994         val = 0;
9995
9996         switch (crtc_state->pipe_bpp) {
9997         case 18:
9998                 val |= PIPECONF_6BPC;
9999                 break;
10000         case 24:
10001                 val |= PIPECONF_8BPC;
10002                 break;
10003         case 30:
10004                 val |= PIPECONF_10BPC;
10005                 break;
10006         case 36:
10007                 val |= PIPECONF_12BPC;
10008                 break;
10009         default:
10010                 /* Case prevented by intel_choose_pipe_bpp_dither. */
10011                 BUG();
10012         }
10013
10014         if (crtc_state->dither)
10015                 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
10016
10017         if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
10018                 val |= PIPECONF_INTERLACED_ILK;
10019         else
10020                 val |= PIPECONF_PROGRESSIVE;
10021
10022         /*
10023          * This would end up with an odd purple hue over
10024          * the entire display. Make sure we don't do it.
10025          */
10026         WARN_ON(crtc_state->limited_color_range &&
10027                 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
10028
10029         if (crtc_state->limited_color_range)
10030                 val |= PIPECONF_COLOR_RANGE_SELECT;
10031
10032         if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
10033                 val |= PIPECONF_OUTPUT_COLORSPACE_YUV709;
10034
10035         val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
10036
10037         val |= PIPECONF_FRAME_START_DELAY(0);
10038
10039         intel_de_write(dev_priv, PIPECONF(pipe), val);
10040         intel_de_posting_read(dev_priv, PIPECONF(pipe));
10041 }
10042
10043 static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state)
10044 {
10045         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
10046         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10047         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
10048         u32 val = 0;
10049
10050         if (IS_HASWELL(dev_priv) && crtc_state->dither)
10051                 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
10052
10053         if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
10054                 val |= PIPECONF_INTERLACED_ILK;
10055         else
10056                 val |= PIPECONF_PROGRESSIVE;
10057
10058         if (IS_HASWELL(dev_priv) &&
10059             crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
10060                 val |= PIPECONF_OUTPUT_COLORSPACE_YUV_HSW;
10061
10062         intel_de_write(dev_priv, PIPECONF(cpu_transcoder), val);
10063         intel_de_posting_read(dev_priv, PIPECONF(cpu_transcoder));
10064 }
10065
10066 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
10067 {
10068         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
10069         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10070         u32 val = 0;
10071
10072         switch (crtc_state->pipe_bpp) {
10073         case 18:
10074                 val |= PIPEMISC_DITHER_6_BPC;
10075                 break;
10076         case 24:
10077                 val |= PIPEMISC_DITHER_8_BPC;
10078                 break;
10079         case 30:
10080                 val |= PIPEMISC_DITHER_10_BPC;
10081                 break;
10082         case 36:
10083                 val |= PIPEMISC_DITHER_12_BPC;
10084                 break;
10085         default:
10086                 MISSING_CASE(crtc_state->pipe_bpp);
10087                 break;
10088         }
10089
10090         if (crtc_state->dither)
10091                 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
10092
10093         if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
10094             crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
10095                 val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
10096
10097         if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
10098                 val |= PIPEMISC_YUV420_ENABLE |
10099                         PIPEMISC_YUV420_MODE_FULL_BLEND;
10100
10101         if (INTEL_GEN(dev_priv) >= 11 &&
10102             (crtc_state->active_planes & ~(icl_hdr_plane_mask() |
10103                                            BIT(PLANE_CURSOR))) == 0)
10104                 val |= PIPEMISC_HDR_MODE_PRECISION;
10105
10106         intel_de_write(dev_priv, PIPEMISC(crtc->pipe), val);
10107 }
10108
10109 int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
10110 {
10111         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10112         u32 tmp;
10113
10114         tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
10115
10116         switch (tmp & PIPEMISC_DITHER_BPC_MASK) {
10117         case PIPEMISC_DITHER_6_BPC:
10118                 return 18;
10119         case PIPEMISC_DITHER_8_BPC:
10120                 return 24;
10121         case PIPEMISC_DITHER_10_BPC:
10122                 return 30;
10123         case PIPEMISC_DITHER_12_BPC:
10124                 return 36;
10125         default:
10126                 MISSING_CASE(tmp);
10127                 return 0;
10128         }
10129 }
10130
10131 int ilk_get_lanes_required(int target_clock, int link_bw, int bpp)
10132 {
10133         /*
10134          * Account for spread spectrum to avoid
10135          * oversubscribing the link. Max center spread
10136          * is 2.5%; use 5% for safety's sake.
10137          */
10138         u32 bps = target_clock * bpp * 21 / 20;
10139         return DIV_ROUND_UP(bps, link_bw * 8);
10140 }
10141
10142 static bool ilk_needs_fb_cb_tune(struct dpll *dpll, int factor)
10143 {
10144         return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
10145 }
10146
10147 static void ilk_compute_dpll(struct intel_crtc *crtc,
10148                              struct intel_crtc_state *crtc_state,
10149                              struct dpll *reduced_clock)
10150 {
10151         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10152         u32 dpll, fp, fp2;
10153         int factor;
10154
10155         /* Enable autotuning of the PLL clock (if permissible) */
10156         factor = 21;
10157         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
10158                 if ((intel_panel_use_ssc(dev_priv) &&
10159                      dev_priv->vbt.lvds_ssc_freq == 100000) ||
10160                     (HAS_PCH_IBX(dev_priv) &&
10161                      intel_is_dual_link_lvds(dev_priv)))
10162                         factor = 25;
10163         } else if (crtc_state->sdvo_tv_clock) {
10164                 factor = 20;
10165         }
10166
10167         fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
10168
10169         if (ilk_needs_fb_cb_tune(&crtc_state->dpll, factor))
10170                 fp |= FP_CB_TUNE;
10171
10172         if (reduced_clock) {
10173                 fp2 = i9xx_dpll_compute_fp(reduced_clock);
10174
10175                 if (reduced_clock->m < factor * reduced_clock->n)
10176                         fp2 |= FP_CB_TUNE;
10177         } else {
10178                 fp2 = fp;
10179         }
10180
10181         dpll = 0;
10182
10183         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
10184                 dpll |= DPLLB_MODE_LVDS;
10185         else
10186                 dpll |= DPLLB_MODE_DAC_SERIAL;
10187
10188         dpll |= (crtc_state->pixel_multiplier - 1)
10189                 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
10190
10191         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
10192             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
10193                 dpll |= DPLL_SDVO_HIGH_SPEED;
10194
10195         if (intel_crtc_has_dp_encoder(crtc_state))
10196                 dpll |= DPLL_SDVO_HIGH_SPEED;
10197
10198         /*
10199          * The high speed IO clock is only really required for
10200          * SDVO/HDMI/DP, but we also enable it for CRT to make it
10201          * possible to share the DPLL between CRT and HDMI. Enabling
10202          * the clock needlessly does no real harm, except use up a
10203          * bit of power potentially.
10204          *
10205          * We'll limit this to IVB with 3 pipes, since it has only two
10206          * DPLLs and so DPLL sharing is the only way to get three pipes
10207          * driving PCH ports at the same time. On SNB we could do this,
10208          * and potentially avoid enabling the second DPLL, but it's not
10209          * clear if it''s a win or loss power wise. No point in doing
10210          * this on ILK at all since it has a fixed DPLL<->pipe mapping.
10211          */
10212         if (INTEL_NUM_PIPES(dev_priv) == 3 &&
10213             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
10214                 dpll |= DPLL_SDVO_HIGH_SPEED;
10215
10216         /* compute bitmask from p1 value */
10217         dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
10218         /* also FPA1 */
10219         dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
10220
10221         switch (crtc_state->dpll.p2) {
10222         case 5:
10223                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
10224                 break;
10225         case 7:
10226                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
10227                 break;
10228         case 10:
10229                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
10230                 break;
10231         case 14:
10232                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
10233                 break;
10234         }
10235
10236         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
10237             intel_panel_use_ssc(dev_priv))
10238                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
10239         else
10240                 dpll |= PLL_REF_INPUT_DREFCLK;
10241
10242         dpll |= DPLL_VCO_ENABLE;
10243
10244         crtc_state->dpll_hw_state.dpll = dpll;
10245         crtc_state->dpll_hw_state.fp0 = fp;
10246         crtc_state->dpll_hw_state.fp1 = fp2;
10247 }
10248
10249 static int ilk_crtc_compute_clock(struct intel_crtc *crtc,
10250                                   struct intel_crtc_state *crtc_state)
10251 {
10252         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10253         struct intel_atomic_state *state =
10254                 to_intel_atomic_state(crtc_state->uapi.state);
10255         const struct intel_limit *limit;
10256         int refclk = 120000;
10257
10258         memset(&crtc_state->dpll_hw_state, 0,
10259                sizeof(crtc_state->dpll_hw_state));
10260
10261         /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
10262         if (!crtc_state->has_pch_encoder)
10263                 return 0;
10264
10265         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
10266                 if (intel_panel_use_ssc(dev_priv)) {
10267                         drm_dbg_kms(&dev_priv->drm,
10268                                     "using SSC reference clock of %d kHz\n",
10269                                     dev_priv->vbt.lvds_ssc_freq);
10270                         refclk = dev_priv->vbt.lvds_ssc_freq;
10271                 }
10272
10273                 if (intel_is_dual_link_lvds(dev_priv)) {
10274                         if (refclk == 100000)
10275                                 limit = &ilk_limits_dual_lvds_100m;
10276                         else
10277                                 limit = &ilk_limits_dual_lvds;
10278                 } else {
10279                         if (refclk == 100000)
10280                                 limit = &ilk_limits_single_lvds_100m;
10281                         else
10282                                 limit = &ilk_limits_single_lvds;
10283                 }
10284         } else {
10285                 limit = &ilk_limits_dac;
10286         }
10287
10288         if (!crtc_state->clock_set &&
10289             !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
10290                                 refclk, NULL, &crtc_state->dpll)) {
10291                 drm_err(&dev_priv->drm,
10292                         "Couldn't find PLL settings for mode!\n");
10293                 return -EINVAL;
10294         }
10295
10296         ilk_compute_dpll(crtc, crtc_state, NULL);
10297
10298         if (!intel_reserve_shared_dplls(state, crtc, NULL)) {
10299                 drm_dbg_kms(&dev_priv->drm,
10300                             "failed to find PLL for pipe %c\n",
10301                             pipe_name(crtc->pipe));
10302                 return -EINVAL;
10303         }
10304
10305         return 0;
10306 }
10307
10308 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
10309                                          struct intel_link_m_n *m_n)
10310 {
10311         struct drm_device *dev = crtc->base.dev;
10312         struct drm_i915_private *dev_priv = to_i915(dev);
10313         enum pipe pipe = crtc->pipe;
10314
10315         m_n->link_m = intel_de_read(dev_priv, PCH_TRANS_LINK_M1(pipe));
10316         m_n->link_n = intel_de_read(dev_priv, PCH_TRANS_LINK_N1(pipe));
10317         m_n->gmch_m = intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe))
10318                 & ~TU_SIZE_MASK;
10319         m_n->gmch_n = intel_de_read(dev_priv, PCH_TRANS_DATA_N1(pipe));
10320         m_n->tu = ((intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe))
10321                     & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
10322 }
10323
10324 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
10325                                          enum transcoder transcoder,
10326                                          struct intel_link_m_n *m_n,
10327                                          struct intel_link_m_n *m2_n2)
10328 {
10329         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10330         enum pipe pipe = crtc->pipe;
10331
10332         if (INTEL_GEN(dev_priv) >= 5) {
10333                 m_n->link_m = intel_de_read(dev_priv,
10334                                             PIPE_LINK_M1(transcoder));
10335                 m_n->link_n = intel_de_read(dev_priv,
10336                                             PIPE_LINK_N1(transcoder));
10337                 m_n->gmch_m = intel_de_read(dev_priv,
10338                                             PIPE_DATA_M1(transcoder))
10339                         & ~TU_SIZE_MASK;
10340                 m_n->gmch_n = intel_de_read(dev_priv,
10341                                             PIPE_DATA_N1(transcoder));
10342                 m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M1(transcoder))
10343                             & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
10344
10345                 if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) {
10346                         m2_n2->link_m = intel_de_read(dev_priv,
10347                                                       PIPE_LINK_M2(transcoder));
10348                         m2_n2->link_n = intel_de_read(dev_priv,
10349                                                              PIPE_LINK_N2(transcoder));
10350                         m2_n2->gmch_m = intel_de_read(dev_priv,
10351                                                              PIPE_DATA_M2(transcoder))
10352                                         & ~TU_SIZE_MASK;
10353                         m2_n2->gmch_n = intel_de_read(dev_priv,
10354                                                              PIPE_DATA_N2(transcoder));
10355                         m2_n2->tu = ((intel_de_read(dev_priv, PIPE_DATA_M2(transcoder))
10356                                         & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
10357                 }
10358         } else {
10359                 m_n->link_m = intel_de_read(dev_priv, PIPE_LINK_M_G4X(pipe));
10360                 m_n->link_n = intel_de_read(dev_priv, PIPE_LINK_N_G4X(pipe));
10361                 m_n->gmch_m = intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe))
10362                         & ~TU_SIZE_MASK;
10363                 m_n->gmch_n = intel_de_read(dev_priv, PIPE_DATA_N_G4X(pipe));
10364                 m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe))
10365                             & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
10366         }
10367 }
10368
10369 void intel_dp_get_m_n(struct intel_crtc *crtc,
10370                       struct intel_crtc_state *pipe_config)
10371 {
10372         if (pipe_config->has_pch_encoder)
10373                 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
10374         else
10375                 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
10376                                              &pipe_config->dp_m_n,
10377                                              &pipe_config->dp_m2_n2);
10378 }
10379
10380 static void ilk_get_fdi_m_n_config(struct intel_crtc *crtc,
10381                                    struct intel_crtc_state *pipe_config)
10382 {
10383         intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
10384                                      &pipe_config->fdi_m_n, NULL);
10385 }
10386
10387 static void skl_get_pfit_config(struct intel_crtc *crtc,
10388                                 struct intel_crtc_state *pipe_config)
10389 {
10390         struct drm_device *dev = crtc->base.dev;
10391         struct drm_i915_private *dev_priv = to_i915(dev);
10392         struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
10393         u32 ps_ctrl = 0;
10394         int id = -1;
10395         int i;
10396
10397         /* find scaler attached to this pipe */
10398         for (i = 0; i < crtc->num_scalers; i++) {
10399                 ps_ctrl = intel_de_read(dev_priv, SKL_PS_CTRL(crtc->pipe, i));
10400                 if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) {
10401                         id = i;
10402                         pipe_config->pch_pfit.enabled = true;
10403                         pipe_config->pch_pfit.pos = intel_de_read(dev_priv,
10404                                                                   SKL_PS_WIN_POS(crtc->pipe, i));
10405                         pipe_config->pch_pfit.size = intel_de_read(dev_priv,
10406                                                                    SKL_PS_WIN_SZ(crtc->pipe, i));
10407                         scaler_state->scalers[i].in_use = true;
10408                         break;
10409                 }
10410         }
10411
10412         scaler_state->scaler_id = id;
10413         if (id >= 0) {
10414                 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
10415         } else {
10416                 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
10417         }
10418 }
10419
10420 static void
10421 skl_get_initial_plane_config(struct intel_crtc *crtc,
10422                              struct intel_initial_plane_config *plane_config)
10423 {
10424         struct drm_device *dev = crtc->base.dev;
10425         struct drm_i915_private *dev_priv = to_i915(dev);
10426         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
10427         enum plane_id plane_id = plane->id;
10428         enum pipe pipe;
10429         u32 val, base, offset, stride_mult, tiling, alpha;
10430         int fourcc, pixel_format;
10431         unsigned int aligned_height;
10432         struct drm_framebuffer *fb;
10433         struct intel_framebuffer *intel_fb;
10434
10435         if (!plane->get_hw_state(plane, &pipe))
10436                 return;
10437
10438         WARN_ON(pipe != crtc->pipe);
10439
10440         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
10441         if (!intel_fb) {
10442                 drm_dbg_kms(&dev_priv->drm, "failed to alloc fb\n");
10443                 return;
10444         }
10445
10446         fb = &intel_fb->base;
10447
10448         fb->dev = dev;
10449
10450         val = intel_de_read(dev_priv, PLANE_CTL(pipe, plane_id));
10451
10452         if (INTEL_GEN(dev_priv) >= 11)
10453                 pixel_format = val & ICL_PLANE_CTL_FORMAT_MASK;
10454         else
10455                 pixel_format = val & PLANE_CTL_FORMAT_MASK;
10456
10457         if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
10458                 alpha = intel_de_read(dev_priv,
10459                                       PLANE_COLOR_CTL(pipe, plane_id));
10460                 alpha &= PLANE_COLOR_ALPHA_MASK;
10461         } else {
10462                 alpha = val & PLANE_CTL_ALPHA_MASK;
10463         }
10464
10465         fourcc = skl_format_to_fourcc(pixel_format,
10466                                       val & PLANE_CTL_ORDER_RGBX, alpha);
10467         fb->format = drm_format_info(fourcc);
10468
10469         tiling = val & PLANE_CTL_TILED_MASK;
10470         switch (tiling) {
10471         case PLANE_CTL_TILED_LINEAR:
10472                 fb->modifier = DRM_FORMAT_MOD_LINEAR;
10473                 break;
10474         case PLANE_CTL_TILED_X:
10475                 plane_config->tiling = I915_TILING_X;
10476                 fb->modifier = I915_FORMAT_MOD_X_TILED;
10477                 break;
10478         case PLANE_CTL_TILED_Y:
10479                 plane_config->tiling = I915_TILING_Y;
10480                 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
10481                         fb->modifier = INTEL_GEN(dev_priv) >= 12 ?
10482                                 I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS :
10483                                 I915_FORMAT_MOD_Y_TILED_CCS;
10484                 else if (val & PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE)
10485                         fb->modifier = I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS;
10486                 else
10487                         fb->modifier = I915_FORMAT_MOD_Y_TILED;
10488                 break;
10489         case PLANE_CTL_TILED_YF:
10490                 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
10491                         fb->modifier = I915_FORMAT_MOD_Yf_TILED_CCS;
10492                 else
10493                         fb->modifier = I915_FORMAT_MOD_Yf_TILED;
10494                 break;
10495         default:
10496                 MISSING_CASE(tiling);
10497                 goto error;
10498         }
10499
10500         /*
10501          * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
10502          * while i915 HW rotation is clockwise, thats why this swapping.
10503          */
10504         switch (val & PLANE_CTL_ROTATE_MASK) {
10505         case PLANE_CTL_ROTATE_0:
10506                 plane_config->rotation = DRM_MODE_ROTATE_0;
10507                 break;
10508         case PLANE_CTL_ROTATE_90:
10509                 plane_config->rotation = DRM_MODE_ROTATE_270;
10510                 break;
10511         case PLANE_CTL_ROTATE_180:
10512                 plane_config->rotation = DRM_MODE_ROTATE_180;
10513                 break;
10514         case PLANE_CTL_ROTATE_270:
10515                 plane_config->rotation = DRM_MODE_ROTATE_90;
10516                 break;
10517         }
10518
10519         if (INTEL_GEN(dev_priv) >= 10 &&
10520             val & PLANE_CTL_FLIP_HORIZONTAL)
10521                 plane_config->rotation |= DRM_MODE_REFLECT_X;
10522
10523         base = intel_de_read(dev_priv, PLANE_SURF(pipe, plane_id)) & 0xfffff000;
10524         plane_config->base = base;
10525
10526         offset = intel_de_read(dev_priv, PLANE_OFFSET(pipe, plane_id));
10527
10528         val = intel_de_read(dev_priv, PLANE_SIZE(pipe, plane_id));
10529         fb->height = ((val >> 16) & 0xffff) + 1;
10530         fb->width = ((val >> 0) & 0xffff) + 1;
10531
10532         val = intel_de_read(dev_priv, PLANE_STRIDE(pipe, plane_id));
10533         stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0);
10534         fb->pitches[0] = (val & 0x3ff) * stride_mult;
10535
10536         aligned_height = intel_fb_align_height(fb, 0, fb->height);
10537
10538         plane_config->size = fb->pitches[0] * aligned_height;
10539
10540         drm_dbg_kms(&dev_priv->drm,
10541                     "%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
10542                     crtc->base.name, plane->base.name, fb->width, fb->height,
10543                     fb->format->cpp[0] * 8, base, fb->pitches[0],
10544                     plane_config->size);
10545
10546         plane_config->fb = intel_fb;
10547         return;
10548
10549 error:
10550         kfree(intel_fb);
10551 }
10552
10553 static void ilk_get_pfit_config(struct intel_crtc *crtc,
10554                                 struct intel_crtc_state *pipe_config)
10555 {
10556         struct drm_device *dev = crtc->base.dev;
10557         struct drm_i915_private *dev_priv = to_i915(dev);
10558         u32 tmp;
10559
10560         tmp = intel_de_read(dev_priv, PF_CTL(crtc->pipe));
10561
10562         if (tmp & PF_ENABLE) {
10563                 pipe_config->pch_pfit.enabled = true;
10564                 pipe_config->pch_pfit.pos = intel_de_read(dev_priv,
10565                                                           PF_WIN_POS(crtc->pipe));
10566                 pipe_config->pch_pfit.size = intel_de_read(dev_priv,
10567                                                            PF_WIN_SZ(crtc->pipe));
10568
10569                 /* We currently do not free assignements of panel fitters on
10570                  * ivb/hsw (since we don't use the higher upscaling modes which
10571                  * differentiates them) so just WARN about this case for now. */
10572                 if (IS_GEN(dev_priv, 7)) {
10573                         WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
10574                                 PF_PIPE_SEL_IVB(crtc->pipe));
10575                 }
10576         }
10577 }
10578
10579 static bool ilk_get_pipe_config(struct intel_crtc *crtc,
10580                                 struct intel_crtc_state *pipe_config)
10581 {
10582         struct drm_device *dev = crtc->base.dev;
10583         struct drm_i915_private *dev_priv = to_i915(dev);
10584         enum intel_display_power_domain power_domain;
10585         intel_wakeref_t wakeref;
10586         u32 tmp;
10587         bool ret;
10588
10589         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
10590         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
10591         if (!wakeref)
10592                 return false;
10593
10594         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
10595         pipe_config->shared_dpll = NULL;
10596         pipe_config->master_transcoder = INVALID_TRANSCODER;
10597
10598         ret = false;
10599         tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
10600         if (!(tmp & PIPECONF_ENABLE))
10601                 goto out;
10602
10603         switch (tmp & PIPECONF_BPC_MASK) {
10604         case PIPECONF_6BPC:
10605                 pipe_config->pipe_bpp = 18;
10606                 break;
10607         case PIPECONF_8BPC:
10608                 pipe_config->pipe_bpp = 24;
10609                 break;
10610         case PIPECONF_10BPC:
10611                 pipe_config->pipe_bpp = 30;
10612                 break;
10613         case PIPECONF_12BPC:
10614                 pipe_config->pipe_bpp = 36;
10615                 break;
10616         default:
10617                 break;
10618         }
10619
10620         if (tmp & PIPECONF_COLOR_RANGE_SELECT)
10621                 pipe_config->limited_color_range = true;
10622
10623         switch (tmp & PIPECONF_OUTPUT_COLORSPACE_MASK) {
10624         case PIPECONF_OUTPUT_COLORSPACE_YUV601:
10625         case PIPECONF_OUTPUT_COLORSPACE_YUV709:
10626                 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
10627                 break;
10628         default:
10629                 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
10630                 break;
10631         }
10632
10633         pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >>
10634                 PIPECONF_GAMMA_MODE_SHIFT;
10635
10636         pipe_config->csc_mode = intel_de_read(dev_priv,
10637                                               PIPE_CSC_MODE(crtc->pipe));
10638
10639         i9xx_get_pipe_color_config(pipe_config);
10640         intel_color_get_config(pipe_config);
10641
10642         if (intel_de_read(dev_priv, PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
10643                 struct intel_shared_dpll *pll;
10644                 enum intel_dpll_id pll_id;
10645
10646                 pipe_config->has_pch_encoder = true;
10647
10648                 tmp = intel_de_read(dev_priv, FDI_RX_CTL(crtc->pipe));
10649                 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
10650                                           FDI_DP_PORT_WIDTH_SHIFT) + 1;
10651
10652                 ilk_get_fdi_m_n_config(crtc, pipe_config);
10653
10654                 if (HAS_PCH_IBX(dev_priv)) {
10655                         /*
10656                          * The pipe->pch transcoder and pch transcoder->pll
10657                          * mapping is fixed.
10658                          */
10659                         pll_id = (enum intel_dpll_id) crtc->pipe;
10660                 } else {
10661                         tmp = intel_de_read(dev_priv, PCH_DPLL_SEL);
10662                         if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
10663                                 pll_id = DPLL_ID_PCH_PLL_B;
10664                         else
10665                                 pll_id= DPLL_ID_PCH_PLL_A;
10666                 }
10667
10668                 pipe_config->shared_dpll =
10669                         intel_get_shared_dpll_by_id(dev_priv, pll_id);
10670                 pll = pipe_config->shared_dpll;
10671
10672                 WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
10673                                                 &pipe_config->dpll_hw_state));
10674
10675                 tmp = pipe_config->dpll_hw_state.dpll;
10676                 pipe_config->pixel_multiplier =
10677                         ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
10678                          >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
10679
10680                 ilk_pch_clock_get(crtc, pipe_config);
10681         } else {
10682                 pipe_config->pixel_multiplier = 1;
10683         }
10684
10685         intel_get_pipe_timings(crtc, pipe_config);
10686         intel_get_pipe_src_size(crtc, pipe_config);
10687
10688         ilk_get_pfit_config(crtc, pipe_config);
10689
10690         ret = true;
10691
10692 out:
10693         intel_display_power_put(dev_priv, power_domain, wakeref);
10694
10695         return ret;
10696 }
10697
10698 static int hsw_crtc_compute_clock(struct intel_crtc *crtc,
10699                                   struct intel_crtc_state *crtc_state)
10700 {
10701         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10702         struct intel_atomic_state *state =
10703                 to_intel_atomic_state(crtc_state->uapi.state);
10704
10705         if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) ||
10706             INTEL_GEN(dev_priv) >= 11) {
10707                 struct intel_encoder *encoder =
10708                         intel_get_crtc_new_encoder(state, crtc_state);
10709
10710                 if (!intel_reserve_shared_dplls(state, crtc, encoder)) {
10711                         drm_dbg_kms(&dev_priv->drm,
10712                                     "failed to find PLL for pipe %c\n",
10713                                     pipe_name(crtc->pipe));
10714                         return -EINVAL;
10715                 }
10716         }
10717
10718         return 0;
10719 }
10720
10721 static void cnl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
10722                             struct intel_crtc_state *pipe_config)
10723 {
10724         enum intel_dpll_id id;
10725         u32 temp;
10726
10727         temp = intel_de_read(dev_priv, DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
10728         id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
10729
10730         if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL2))
10731                 return;
10732
10733         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
10734 }
10735
10736 static void icl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
10737                             struct intel_crtc_state *pipe_config)
10738 {
10739         enum phy phy = intel_port_to_phy(dev_priv, port);
10740         enum icl_port_dpll_id port_dpll_id;
10741         enum intel_dpll_id id;
10742         u32 temp;
10743
10744         if (intel_phy_is_combo(dev_priv, phy)) {
10745                 temp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0) &
10746                         ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
10747                 id = temp >> ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy);
10748                 port_dpll_id = ICL_PORT_DPLL_DEFAULT;
10749         } else if (intel_phy_is_tc(dev_priv, phy)) {
10750                 u32 clk_sel = intel_de_read(dev_priv, DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK;
10751
10752                 if (clk_sel == DDI_CLK_SEL_MG) {
10753                         id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
10754                                                                     port));
10755                         port_dpll_id = ICL_PORT_DPLL_MG_PHY;
10756                 } else {
10757                         WARN_ON(clk_sel < DDI_CLK_SEL_TBT_162);
10758                         id = DPLL_ID_ICL_TBTPLL;
10759                         port_dpll_id = ICL_PORT_DPLL_DEFAULT;
10760                 }
10761         } else {
10762                 WARN(1, "Invalid port %x\n", port);
10763                 return;
10764         }
10765
10766         pipe_config->icl_port_dplls[port_dpll_id].pll =
10767                 intel_get_shared_dpll_by_id(dev_priv, id);
10768
10769         icl_set_active_port_dpll(pipe_config, port_dpll_id);
10770 }
10771
10772 static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
10773                                 enum port port,
10774                                 struct intel_crtc_state *pipe_config)
10775 {
10776         enum intel_dpll_id id;
10777
10778         switch (port) {
10779         case PORT_A:
10780                 id = DPLL_ID_SKL_DPLL0;
10781                 break;
10782         case PORT_B:
10783                 id = DPLL_ID_SKL_DPLL1;
10784                 break;
10785         case PORT_C:
10786                 id = DPLL_ID_SKL_DPLL2;
10787                 break;
10788         default:
10789                 drm_err(&dev_priv->drm, "Incorrect port type\n");
10790                 return;
10791         }
10792
10793         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
10794 }
10795
10796 static void skl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
10797                             struct intel_crtc_state *pipe_config)
10798 {
10799         enum intel_dpll_id id;
10800         u32 temp;
10801
10802         temp = intel_de_read(dev_priv, DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
10803         id = temp >> (port * 3 + 1);
10804
10805         if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL3))
10806                 return;
10807
10808         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
10809 }
10810
10811 static void hsw_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
10812                             struct intel_crtc_state *pipe_config)
10813 {
10814         enum intel_dpll_id id;
10815         u32 ddi_pll_sel = intel_de_read(dev_priv, PORT_CLK_SEL(port));
10816
10817         switch (ddi_pll_sel) {
10818         case PORT_CLK_SEL_WRPLL1:
10819                 id = DPLL_ID_WRPLL1;
10820                 break;
10821         case PORT_CLK_SEL_WRPLL2:
10822                 id = DPLL_ID_WRPLL2;
10823                 break;
10824         case PORT_CLK_SEL_SPLL:
10825                 id = DPLL_ID_SPLL;
10826                 break;
10827         case PORT_CLK_SEL_LCPLL_810:
10828                 id = DPLL_ID_LCPLL_810;
10829                 break;
10830         case PORT_CLK_SEL_LCPLL_1350:
10831                 id = DPLL_ID_LCPLL_1350;
10832                 break;
10833         case PORT_CLK_SEL_LCPLL_2700:
10834                 id = DPLL_ID_LCPLL_2700;
10835                 break;
10836         default:
10837                 MISSING_CASE(ddi_pll_sel);
10838                 /* fall through */
10839         case PORT_CLK_SEL_NONE:
10840                 return;
10841         }
10842
10843         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
10844 }
10845
10846 static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
10847                                      struct intel_crtc_state *pipe_config,
10848                                      u64 *power_domain_mask,
10849                                      intel_wakeref_t *wakerefs)
10850 {
10851         struct drm_device *dev = crtc->base.dev;
10852         struct drm_i915_private *dev_priv = to_i915(dev);
10853         enum intel_display_power_domain power_domain;
10854         unsigned long panel_transcoder_mask = 0;
10855         unsigned long enabled_panel_transcoders = 0;
10856         enum transcoder panel_transcoder;
10857         intel_wakeref_t wf;
10858         u32 tmp;
10859
10860         if (INTEL_GEN(dev_priv) >= 11)
10861                 panel_transcoder_mask |=
10862                         BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
10863
10864         if (HAS_TRANSCODER_EDP(dev_priv))
10865                 panel_transcoder_mask |= BIT(TRANSCODER_EDP);
10866
10867         /*
10868          * The pipe->transcoder mapping is fixed with the exception of the eDP
10869          * and DSI transcoders handled below.
10870          */
10871         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
10872
10873         /*
10874          * XXX: Do intel_display_power_get_if_enabled before reading this (for
10875          * consistency and less surprising code; it's in always on power).
10876          */
10877         for_each_set_bit(panel_transcoder,
10878                          &panel_transcoder_mask,
10879                          ARRAY_SIZE(INTEL_INFO(dev_priv)->trans_offsets)) {
10880                 bool force_thru = false;
10881                 enum pipe trans_pipe;
10882
10883                 tmp = intel_de_read(dev_priv,
10884                                     TRANS_DDI_FUNC_CTL(panel_transcoder));
10885                 if (!(tmp & TRANS_DDI_FUNC_ENABLE))
10886                         continue;
10887
10888                 /*
10889                  * Log all enabled ones, only use the first one.
10890                  *
10891                  * FIXME: This won't work for two separate DSI displays.
10892                  */
10893                 enabled_panel_transcoders |= BIT(panel_transcoder);
10894                 if (enabled_panel_transcoders != BIT(panel_transcoder))
10895                         continue;
10896
10897                 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
10898                 default:
10899                         WARN(1, "unknown pipe linked to transcoder %s\n",
10900                              transcoder_name(panel_transcoder));
10901                         /* fall through */
10902                 case TRANS_DDI_EDP_INPUT_A_ONOFF:
10903                         force_thru = true;
10904                         /* fall through */
10905                 case TRANS_DDI_EDP_INPUT_A_ON:
10906                         trans_pipe = PIPE_A;
10907                         break;
10908                 case TRANS_DDI_EDP_INPUT_B_ONOFF:
10909                         trans_pipe = PIPE_B;
10910                         break;
10911                 case TRANS_DDI_EDP_INPUT_C_ONOFF:
10912                         trans_pipe = PIPE_C;
10913                         break;
10914                 case TRANS_DDI_EDP_INPUT_D_ONOFF:
10915                         trans_pipe = PIPE_D;
10916                         break;
10917                 }
10918
10919                 if (trans_pipe == crtc->pipe) {
10920                         pipe_config->cpu_transcoder = panel_transcoder;
10921                         pipe_config->pch_pfit.force_thru = force_thru;
10922                 }
10923         }
10924
10925         /*
10926          * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1
10927          */
10928         WARN_ON((enabled_panel_transcoders & BIT(TRANSCODER_EDP)) &&
10929                 enabled_panel_transcoders != BIT(TRANSCODER_EDP));
10930
10931         power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
10932         WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
10933
10934         wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
10935         if (!wf)
10936                 return false;
10937
10938         wakerefs[power_domain] = wf;
10939         *power_domain_mask |= BIT_ULL(power_domain);
10940
10941         tmp = intel_de_read(dev_priv, PIPECONF(pipe_config->cpu_transcoder));
10942
10943         return tmp & PIPECONF_ENABLE;
10944 }
10945
10946 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
10947                                          struct intel_crtc_state *pipe_config,
10948                                          u64 *power_domain_mask,
10949                                          intel_wakeref_t *wakerefs)
10950 {
10951         struct drm_device *dev = crtc->base.dev;
10952         struct drm_i915_private *dev_priv = to_i915(dev);
10953         enum intel_display_power_domain power_domain;
10954         enum transcoder cpu_transcoder;
10955         intel_wakeref_t wf;
10956         enum port port;
10957         u32 tmp;
10958
10959         for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
10960                 if (port == PORT_A)
10961                         cpu_transcoder = TRANSCODER_DSI_A;
10962                 else
10963                         cpu_transcoder = TRANSCODER_DSI_C;
10964
10965                 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
10966                 WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
10967
10968                 wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
10969                 if (!wf)
10970                         continue;
10971
10972                 wakerefs[power_domain] = wf;
10973                 *power_domain_mask |= BIT_ULL(power_domain);
10974
10975                 /*
10976                  * The PLL needs to be enabled with a valid divider
10977                  * configuration, otherwise accessing DSI registers will hang
10978                  * the machine. See BSpec North Display Engine
10979                  * registers/MIPI[BXT]. We can break out here early, since we
10980                  * need the same DSI PLL to be enabled for both DSI ports.
10981                  */
10982                 if (!bxt_dsi_pll_is_enabled(dev_priv))
10983                         break;
10984
10985                 /* XXX: this works for video mode only */
10986                 tmp = intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port));
10987                 if (!(tmp & DPI_ENABLE))
10988                         continue;
10989
10990                 tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
10991                 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
10992                         continue;
10993
10994                 pipe_config->cpu_transcoder = cpu_transcoder;
10995                 break;
10996         }
10997
10998         return transcoder_is_dsi(pipe_config->cpu_transcoder);
10999 }
11000
11001 static void hsw_get_ddi_port_state(struct intel_crtc *crtc,
11002                                    struct intel_crtc_state *pipe_config)
11003 {
11004         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11005         enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
11006         struct intel_shared_dpll *pll;
11007         enum port port;
11008         u32 tmp;
11009
11010         if (transcoder_is_dsi(cpu_transcoder)) {
11011                 port = (cpu_transcoder == TRANSCODER_DSI_A) ?
11012                                                 PORT_A : PORT_B;
11013         } else {
11014                 tmp = intel_de_read(dev_priv,
11015                                     TRANS_DDI_FUNC_CTL(cpu_transcoder));
11016                 if (INTEL_GEN(dev_priv) >= 12)
11017                         port = TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
11018                 else
11019                         port = TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
11020         }
11021
11022         if (INTEL_GEN(dev_priv) >= 11)
11023                 icl_get_ddi_pll(dev_priv, port, pipe_config);
11024         else if (IS_CANNONLAKE(dev_priv))
11025                 cnl_get_ddi_pll(dev_priv, port, pipe_config);
11026         else if (IS_GEN9_BC(dev_priv))
11027                 skl_get_ddi_pll(dev_priv, port, pipe_config);
11028         else if (IS_GEN9_LP(dev_priv))
11029                 bxt_get_ddi_pll(dev_priv, port, pipe_config);
11030         else
11031                 hsw_get_ddi_pll(dev_priv, port, pipe_config);
11032
11033         pll = pipe_config->shared_dpll;
11034         if (pll) {
11035                 WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
11036                                                 &pipe_config->dpll_hw_state));
11037         }
11038
11039         /*
11040          * Haswell has only FDI/PCH transcoder A. It is which is connected to
11041          * DDI E. So just check whether this pipe is wired to DDI E and whether
11042          * the PCH transcoder is on.
11043          */
11044         if (INTEL_GEN(dev_priv) < 9 &&
11045             (port == PORT_E) && intel_de_read(dev_priv, LPT_TRANSCONF) & TRANS_ENABLE) {
11046                 pipe_config->has_pch_encoder = true;
11047
11048                 tmp = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
11049                 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
11050                                           FDI_DP_PORT_WIDTH_SHIFT) + 1;
11051
11052                 ilk_get_fdi_m_n_config(crtc, pipe_config);
11053         }
11054 }
11055
11056 static enum transcoder transcoder_master_readout(struct drm_i915_private *dev_priv,
11057                                                  enum transcoder cpu_transcoder)
11058 {
11059         u32 trans_port_sync, master_select;
11060
11061         trans_port_sync = intel_de_read(dev_priv,
11062                                         TRANS_DDI_FUNC_CTL2(cpu_transcoder));
11063
11064         if ((trans_port_sync & PORT_SYNC_MODE_ENABLE) == 0)
11065                 return INVALID_TRANSCODER;
11066
11067         master_select = trans_port_sync &
11068                         PORT_SYNC_MODE_MASTER_SELECT_MASK;
11069         if (master_select == 0)
11070                 return TRANSCODER_EDP;
11071         else
11072                 return master_select - 1;
11073 }
11074
11075 static void icl_get_trans_port_sync_config(struct intel_crtc_state *crtc_state)
11076 {
11077         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
11078         u32 transcoders;
11079         enum transcoder cpu_transcoder;
11080
11081         crtc_state->master_transcoder = transcoder_master_readout(dev_priv,
11082                                                                   crtc_state->cpu_transcoder);
11083
11084         transcoders = BIT(TRANSCODER_A) |
11085                 BIT(TRANSCODER_B) |
11086                 BIT(TRANSCODER_C) |
11087                 BIT(TRANSCODER_D);
11088         for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
11089                 enum intel_display_power_domain power_domain;
11090                 intel_wakeref_t trans_wakeref;
11091
11092                 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
11093                 trans_wakeref = intel_display_power_get_if_enabled(dev_priv,
11094                                                                    power_domain);
11095
11096                 if (!trans_wakeref)
11097                         continue;
11098
11099                 if (transcoder_master_readout(dev_priv, cpu_transcoder) ==
11100                     crtc_state->cpu_transcoder)
11101                         crtc_state->sync_mode_slaves_mask |= BIT(cpu_transcoder);
11102
11103                 intel_display_power_put(dev_priv, power_domain, trans_wakeref);
11104         }
11105
11106         WARN_ON(crtc_state->master_transcoder != INVALID_TRANSCODER &&
11107                 crtc_state->sync_mode_slaves_mask);
11108 }
11109
11110 static bool hsw_get_pipe_config(struct intel_crtc *crtc,
11111                                 struct intel_crtc_state *pipe_config)
11112 {
11113         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11114         intel_wakeref_t wakerefs[POWER_DOMAIN_NUM], wf;
11115         enum intel_display_power_domain power_domain;
11116         u64 power_domain_mask;
11117         bool active;
11118         u32 tmp;
11119
11120         pipe_config->master_transcoder = INVALID_TRANSCODER;
11121
11122         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
11123         wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
11124         if (!wf)
11125                 return false;
11126
11127         wakerefs[power_domain] = wf;
11128         power_domain_mask = BIT_ULL(power_domain);
11129
11130         pipe_config->shared_dpll = NULL;
11131
11132         active = hsw_get_transcoder_state(crtc, pipe_config,
11133                                           &power_domain_mask, wakerefs);
11134
11135         if (IS_GEN9_LP(dev_priv) &&
11136             bxt_get_dsi_transcoder_state(crtc, pipe_config,
11137                                          &power_domain_mask, wakerefs)) {
11138                 WARN_ON(active);
11139                 active = true;
11140         }
11141
11142         if (!active)
11143                 goto out;
11144
11145         if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
11146             INTEL_GEN(dev_priv) >= 11) {
11147                 hsw_get_ddi_port_state(crtc, pipe_config);
11148                 intel_get_pipe_timings(crtc, pipe_config);
11149         }
11150
11151         intel_get_pipe_src_size(crtc, pipe_config);
11152
11153         if (IS_HASWELL(dev_priv)) {
11154                 u32 tmp = intel_de_read(dev_priv,
11155                                         PIPECONF(pipe_config->cpu_transcoder));
11156
11157                 if (tmp & PIPECONF_OUTPUT_COLORSPACE_YUV_HSW)
11158                         pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
11159                 else
11160                         pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
11161         } else {
11162                 pipe_config->output_format =
11163                         bdw_get_pipemisc_output_format(crtc);
11164
11165                 /*
11166                  * Currently there is no interface defined to
11167                  * check user preference between RGB/YCBCR444
11168                  * or YCBCR420. So the only possible case for
11169                  * YCBCR444 usage is driving YCBCR420 output
11170                  * with LSPCON, when pipe is configured for
11171                  * YCBCR444 output and LSPCON takes care of
11172                  * downsampling it.
11173                  */
11174                 pipe_config->lspcon_downsampling =
11175                         pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444;
11176         }
11177
11178         pipe_config->gamma_mode = intel_de_read(dev_priv,
11179                                                 GAMMA_MODE(crtc->pipe));
11180
11181         pipe_config->csc_mode = intel_de_read(dev_priv,
11182                                               PIPE_CSC_MODE(crtc->pipe));
11183
11184         if (INTEL_GEN(dev_priv) >= 9) {
11185                 tmp = intel_de_read(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe));
11186
11187                 if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
11188                         pipe_config->gamma_enable = true;
11189
11190                 if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE)
11191                         pipe_config->csc_enable = true;
11192         } else {
11193                 i9xx_get_pipe_color_config(pipe_config);
11194         }
11195
11196         intel_color_get_config(pipe_config);
11197
11198         tmp = intel_de_read(dev_priv, WM_LINETIME(crtc->pipe));
11199         pipe_config->linetime = REG_FIELD_GET(HSW_LINETIME_MASK, tmp);
11200         if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
11201                 pipe_config->ips_linetime =
11202                         REG_FIELD_GET(HSW_IPS_LINETIME_MASK, tmp);
11203
11204         power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
11205         WARN_ON(power_domain_mask & BIT_ULL(power_domain));
11206
11207         wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
11208         if (wf) {
11209                 wakerefs[power_domain] = wf;
11210                 power_domain_mask |= BIT_ULL(power_domain);
11211
11212                 if (INTEL_GEN(dev_priv) >= 9)
11213                         skl_get_pfit_config(crtc, pipe_config);
11214                 else
11215                         ilk_get_pfit_config(crtc, pipe_config);
11216         }
11217
11218         if (hsw_crtc_supports_ips(crtc)) {
11219                 if (IS_HASWELL(dev_priv))
11220                         pipe_config->ips_enabled = intel_de_read(dev_priv,
11221                                                                  IPS_CTL) & IPS_ENABLE;
11222                 else {
11223                         /*
11224                          * We cannot readout IPS state on broadwell, set to
11225                          * true so we can set it to a defined state on first
11226                          * commit.
11227                          */
11228                         pipe_config->ips_enabled = true;
11229                 }
11230         }
11231
11232         if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
11233             !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
11234                 pipe_config->pixel_multiplier =
11235                         intel_de_read(dev_priv,
11236                                       PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
11237         } else {
11238                 pipe_config->pixel_multiplier = 1;
11239         }
11240
11241         if (INTEL_GEN(dev_priv) >= 11 &&
11242             !transcoder_is_dsi(pipe_config->cpu_transcoder))
11243                 icl_get_trans_port_sync_config(pipe_config);
11244
11245 out:
11246         for_each_power_domain(power_domain, power_domain_mask)
11247                 intel_display_power_put(dev_priv,
11248                                         power_domain, wakerefs[power_domain]);
11249
11250         return active;
11251 }
11252
11253 static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
11254 {
11255         struct drm_i915_private *dev_priv =
11256                 to_i915(plane_state->uapi.plane->dev);
11257         const struct drm_framebuffer *fb = plane_state->hw.fb;
11258         const struct drm_i915_gem_object *obj = intel_fb_obj(fb);
11259         u32 base;
11260
11261         if (INTEL_INFO(dev_priv)->display.cursor_needs_physical)
11262                 base = sg_dma_address(obj->mm.pages->sgl);
11263         else
11264                 base = intel_plane_ggtt_offset(plane_state);
11265
11266         return base + plane_state->color_plane[0].offset;
11267 }
11268
11269 static u32 intel_cursor_position(const struct intel_plane_state *plane_state)
11270 {
11271         int x = plane_state->uapi.dst.x1;
11272         int y = plane_state->uapi.dst.y1;
11273         u32 pos = 0;
11274
11275         if (x < 0) {
11276                 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
11277                 x = -x;
11278         }
11279         pos |= x << CURSOR_X_SHIFT;
11280
11281         if (y < 0) {
11282                 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
11283                 y = -y;
11284         }
11285         pos |= y << CURSOR_Y_SHIFT;
11286
11287         return pos;
11288 }
11289
11290 static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state)
11291 {
11292         const struct drm_mode_config *config =
11293                 &plane_state->uapi.plane->dev->mode_config;
11294         int width = drm_rect_width(&plane_state->uapi.dst);
11295         int height = drm_rect_height(&plane_state->uapi.dst);
11296
11297         return width > 0 && width <= config->cursor_width &&
11298                 height > 0 && height <= config->cursor_height;
11299 }
11300
11301 static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
11302 {
11303         struct drm_i915_private *dev_priv =
11304                 to_i915(plane_state->uapi.plane->dev);
11305         unsigned int rotation = plane_state->hw.rotation;
11306         int src_x, src_y;
11307         u32 offset;
11308         int ret;
11309
11310         ret = intel_plane_compute_gtt(plane_state);
11311         if (ret)
11312                 return ret;
11313
11314         if (!plane_state->uapi.visible)
11315                 return 0;
11316
11317         src_x = plane_state->uapi.src.x1 >> 16;
11318         src_y = plane_state->uapi.src.y1 >> 16;
11319
11320         intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
11321         offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
11322                                                     plane_state, 0);
11323
11324         if (src_x != 0 || src_y != 0) {
11325                 drm_dbg_kms(&dev_priv->drm,
11326                             "Arbitrary cursor panning not supported\n");
11327                 return -EINVAL;
11328         }
11329
11330         /*
11331          * Put the final coordinates back so that the src
11332          * coordinate checks will see the right values.
11333          */
11334         drm_rect_translate_to(&plane_state->uapi.src,
11335                               src_x << 16, src_y << 16);
11336
11337         /* ILK+ do this automagically in hardware */
11338         if (HAS_GMCH(dev_priv) && rotation & DRM_MODE_ROTATE_180) {
11339                 const struct drm_framebuffer *fb = plane_state->hw.fb;
11340                 int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
11341                 int src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
11342
11343                 offset += (src_h * src_w - 1) * fb->format->cpp[0];
11344         }
11345
11346         plane_state->color_plane[0].offset = offset;
11347         plane_state->color_plane[0].x = src_x;
11348         plane_state->color_plane[0].y = src_y;
11349
11350         return 0;
11351 }
11352
11353 static int intel_check_cursor(struct intel_crtc_state *crtc_state,
11354                               struct intel_plane_state *plane_state)
11355 {
11356         const struct drm_framebuffer *fb = plane_state->hw.fb;
11357         struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
11358         int ret;
11359
11360         if (fb && fb->modifier != DRM_FORMAT_MOD_LINEAR) {
11361                 drm_dbg_kms(&i915->drm, "cursor cannot be tiled\n");
11362                 return -EINVAL;
11363         }
11364
11365         ret = drm_atomic_helper_check_plane_state(&plane_state->uapi,
11366                                                   &crtc_state->uapi,
11367                                                   DRM_PLANE_HELPER_NO_SCALING,
11368                                                   DRM_PLANE_HELPER_NO_SCALING,
11369                                                   true, true);
11370         if (ret)
11371                 return ret;
11372
11373         /* Use the unclipped src/dst rectangles, which we program to hw */
11374         plane_state->uapi.src = drm_plane_state_src(&plane_state->uapi);
11375         plane_state->uapi.dst = drm_plane_state_dest(&plane_state->uapi);
11376
11377         ret = intel_cursor_check_surface(plane_state);
11378         if (ret)
11379                 return ret;
11380
11381         if (!plane_state->uapi.visible)
11382                 return 0;
11383
11384         ret = intel_plane_check_src_coordinates(plane_state);
11385         if (ret)
11386                 return ret;
11387
11388         return 0;
11389 }
11390
11391 static unsigned int
11392 i845_cursor_max_stride(struct intel_plane *plane,
11393                        u32 pixel_format, u64 modifier,
11394                        unsigned int rotation)
11395 {
11396         return 2048;
11397 }
11398
11399 static u32 i845_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
11400 {
11401         u32 cntl = 0;
11402
11403         if (crtc_state->gamma_enable)
11404                 cntl |= CURSOR_GAMMA_ENABLE;
11405
11406         return cntl;
11407 }
11408
11409 static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
11410                            const struct intel_plane_state *plane_state)
11411 {
11412         return CURSOR_ENABLE |
11413                 CURSOR_FORMAT_ARGB |
11414                 CURSOR_STRIDE(plane_state->color_plane[0].stride);
11415 }
11416
11417 static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state)
11418 {
11419         int width = drm_rect_width(&plane_state->uapi.dst);
11420
11421         /*
11422          * 845g/865g are only limited by the width of their cursors,
11423          * the height is arbitrary up to the precision of the register.
11424          */
11425         return intel_cursor_size_ok(plane_state) && IS_ALIGNED(width, 64);
11426 }
11427
11428 static int i845_check_cursor(struct intel_crtc_state *crtc_state,
11429                              struct intel_plane_state *plane_state)
11430 {
11431         const struct drm_framebuffer *fb = plane_state->hw.fb;
11432         struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
11433         int ret;
11434
11435         ret = intel_check_cursor(crtc_state, plane_state);
11436         if (ret)
11437                 return ret;
11438
11439         /* if we want to turn off the cursor ignore width and height */
11440         if (!fb)
11441                 return 0;
11442
11443         /* Check for which cursor types we support */
11444         if (!i845_cursor_size_ok(plane_state)) {
11445                 drm_dbg_kms(&i915->drm,
11446                             "Cursor dimension %dx%d not supported\n",
11447                             drm_rect_width(&plane_state->uapi.dst),
11448                             drm_rect_height(&plane_state->uapi.dst));
11449                 return -EINVAL;
11450         }
11451
11452         WARN_ON(plane_state->uapi.visible &&
11453                 plane_state->color_plane[0].stride != fb->pitches[0]);
11454
11455         switch (fb->pitches[0]) {
11456         case 256:
11457         case 512:
11458         case 1024:
11459         case 2048:
11460                 break;
11461         default:
11462                  drm_dbg_kms(&i915->drm, "Invalid cursor stride (%u)\n",
11463                              fb->pitches[0]);
11464                 return -EINVAL;
11465         }
11466
11467         plane_state->ctl = i845_cursor_ctl(crtc_state, plane_state);
11468
11469         return 0;
11470 }
11471
11472 static void i845_update_cursor(struct intel_plane *plane,
11473                                const struct intel_crtc_state *crtc_state,
11474                                const struct intel_plane_state *plane_state)
11475 {
11476         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11477         u32 cntl = 0, base = 0, pos = 0, size = 0;
11478         unsigned long irqflags;
11479
11480         if (plane_state && plane_state->uapi.visible) {
11481                 unsigned int width = drm_rect_width(&plane_state->uapi.dst);
11482                 unsigned int height = drm_rect_height(&plane_state->uapi.dst);
11483
11484                 cntl = plane_state->ctl |
11485                         i845_cursor_ctl_crtc(crtc_state);
11486
11487                 size = (height << 12) | width;
11488
11489                 base = intel_cursor_base(plane_state);
11490                 pos = intel_cursor_position(plane_state);
11491         }
11492
11493         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
11494
11495         /* On these chipsets we can only modify the base/size/stride
11496          * whilst the cursor is disabled.
11497          */
11498         if (plane->cursor.base != base ||
11499             plane->cursor.size != size ||
11500             plane->cursor.cntl != cntl) {
11501                 intel_de_write_fw(dev_priv, CURCNTR(PIPE_A), 0);
11502                 intel_de_write_fw(dev_priv, CURBASE(PIPE_A), base);
11503                 intel_de_write_fw(dev_priv, CURSIZE, size);
11504                 intel_de_write_fw(dev_priv, CURPOS(PIPE_A), pos);
11505                 intel_de_write_fw(dev_priv, CURCNTR(PIPE_A), cntl);
11506
11507                 plane->cursor.base = base;
11508                 plane->cursor.size = size;
11509                 plane->cursor.cntl = cntl;
11510         } else {
11511                 intel_de_write_fw(dev_priv, CURPOS(PIPE_A), pos);
11512         }
11513
11514         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
11515 }
11516
11517 static void i845_disable_cursor(struct intel_plane *plane,
11518                                 const struct intel_crtc_state *crtc_state)
11519 {
11520         i845_update_cursor(plane, crtc_state, NULL);
11521 }
11522
11523 static bool i845_cursor_get_hw_state(struct intel_plane *plane,
11524                                      enum pipe *pipe)
11525 {
11526         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11527         enum intel_display_power_domain power_domain;
11528         intel_wakeref_t wakeref;
11529         bool ret;
11530
11531         power_domain = POWER_DOMAIN_PIPE(PIPE_A);
11532         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
11533         if (!wakeref)
11534                 return false;
11535
11536         ret = intel_de_read(dev_priv, CURCNTR(PIPE_A)) & CURSOR_ENABLE;
11537
11538         *pipe = PIPE_A;
11539
11540         intel_display_power_put(dev_priv, power_domain, wakeref);
11541
11542         return ret;
11543 }
11544
11545 static unsigned int
11546 i9xx_cursor_max_stride(struct intel_plane *plane,
11547                        u32 pixel_format, u64 modifier,
11548                        unsigned int rotation)
11549 {
11550         return plane->base.dev->mode_config.cursor_width * 4;
11551 }
11552
11553 static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
11554 {
11555         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
11556         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11557         u32 cntl = 0;
11558
11559         if (INTEL_GEN(dev_priv) >= 11)
11560                 return cntl;
11561
11562         if (crtc_state->gamma_enable)
11563                 cntl = MCURSOR_GAMMA_ENABLE;
11564
11565         if (crtc_state->csc_enable)
11566                 cntl |= MCURSOR_PIPE_CSC_ENABLE;
11567
11568         if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
11569                 cntl |= MCURSOR_PIPE_SELECT(crtc->pipe);
11570
11571         return cntl;
11572 }
11573
11574 static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
11575                            const struct intel_plane_state *plane_state)
11576 {
11577         struct drm_i915_private *dev_priv =
11578                 to_i915(plane_state->uapi.plane->dev);
11579         u32 cntl = 0;
11580
11581         if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
11582                 cntl |= MCURSOR_TRICKLE_FEED_DISABLE;
11583
11584         switch (drm_rect_width(&plane_state->uapi.dst)) {
11585         case 64:
11586                 cntl |= MCURSOR_MODE_64_ARGB_AX;
11587                 break;
11588         case 128:
11589                 cntl |= MCURSOR_MODE_128_ARGB_AX;
11590                 break;
11591         case 256:
11592                 cntl |= MCURSOR_MODE_256_ARGB_AX;
11593                 break;
11594         default:
11595                 MISSING_CASE(drm_rect_width(&plane_state->uapi.dst));
11596                 return 0;
11597         }
11598
11599         if (plane_state->hw.rotation & DRM_MODE_ROTATE_180)
11600                 cntl |= MCURSOR_ROTATE_180;
11601
11602         return cntl;
11603 }
11604
11605 static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
11606 {
11607         struct drm_i915_private *dev_priv =
11608                 to_i915(plane_state->uapi.plane->dev);
11609         int width = drm_rect_width(&plane_state->uapi.dst);
11610         int height = drm_rect_height(&plane_state->uapi.dst);
11611
11612         if (!intel_cursor_size_ok(plane_state))
11613                 return false;
11614
11615         /* Cursor width is limited to a few power-of-two sizes */
11616         switch (width) {
11617         case 256:
11618         case 128:
11619         case 64:
11620                 break;
11621         default:
11622                 return false;
11623         }
11624
11625         /*
11626          * IVB+ have CUR_FBC_CTL which allows an arbitrary cursor
11627          * height from 8 lines up to the cursor width, when the
11628          * cursor is not rotated. Everything else requires square
11629          * cursors.
11630          */
11631         if (HAS_CUR_FBC(dev_priv) &&
11632             plane_state->hw.rotation & DRM_MODE_ROTATE_0) {
11633                 if (height < 8 || height > width)
11634                         return false;
11635         } else {
11636                 if (height != width)
11637                         return false;
11638         }
11639
11640         return true;
11641 }
11642
11643 static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
11644                              struct intel_plane_state *plane_state)
11645 {
11646         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
11647         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11648         const struct drm_framebuffer *fb = plane_state->hw.fb;
11649         enum pipe pipe = plane->pipe;
11650         int ret;
11651
11652         ret = intel_check_cursor(crtc_state, plane_state);
11653         if (ret)
11654                 return ret;
11655
11656         /* if we want to turn off the cursor ignore width and height */
11657         if (!fb)
11658                 return 0;
11659
11660         /* Check for which cursor types we support */
11661         if (!i9xx_cursor_size_ok(plane_state)) {
11662                 drm_dbg(&dev_priv->drm,
11663                         "Cursor dimension %dx%d not supported\n",
11664                         drm_rect_width(&plane_state->uapi.dst),
11665                         drm_rect_height(&plane_state->uapi.dst));
11666                 return -EINVAL;
11667         }
11668
11669         WARN_ON(plane_state->uapi.visible &&
11670                 plane_state->color_plane[0].stride != fb->pitches[0]);
11671
11672         if (fb->pitches[0] !=
11673             drm_rect_width(&plane_state->uapi.dst) * fb->format->cpp[0]) {
11674                 drm_dbg_kms(&dev_priv->drm,
11675                             "Invalid cursor stride (%u) (cursor width %d)\n",
11676                             fb->pitches[0],
11677                             drm_rect_width(&plane_state->uapi.dst));
11678                 return -EINVAL;
11679         }
11680
11681         /*
11682          * There's something wrong with the cursor on CHV pipe C.
11683          * If it straddles the left edge of the screen then
11684          * moving it away from the edge or disabling it often
11685          * results in a pipe underrun, and often that can lead to
11686          * dead pipe (constant underrun reported, and it scans
11687          * out just a solid color). To recover from that, the
11688          * display power well must be turned off and on again.
11689          * Refuse the put the cursor into that compromised position.
11690          */
11691         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C &&
11692             plane_state->uapi.visible && plane_state->uapi.dst.x1 < 0) {
11693                 drm_dbg_kms(&dev_priv->drm,
11694                             "CHV cursor C not allowed to straddle the left screen edge\n");
11695                 return -EINVAL;
11696         }
11697
11698         plane_state->ctl = i9xx_cursor_ctl(crtc_state, plane_state);
11699
11700         return 0;
11701 }
11702
11703 static void i9xx_update_cursor(struct intel_plane *plane,
11704                                const struct intel_crtc_state *crtc_state,
11705                                const struct intel_plane_state *plane_state)
11706 {
11707         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11708         enum pipe pipe = plane->pipe;
11709         u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0;
11710         unsigned long irqflags;
11711
11712         if (plane_state && plane_state->uapi.visible) {
11713                 unsigned width = drm_rect_width(&plane_state->uapi.dst);
11714                 unsigned height = drm_rect_height(&plane_state->uapi.dst);
11715
11716                 cntl = plane_state->ctl |
11717                         i9xx_cursor_ctl_crtc(crtc_state);
11718
11719                 if (width != height)
11720                         fbc_ctl = CUR_FBC_CTL_EN | (height - 1);
11721
11722                 base = intel_cursor_base(plane_state);
11723                 pos = intel_cursor_position(plane_state);
11724         }
11725
11726         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
11727
11728         /*
11729          * On some platforms writing CURCNTR first will also
11730          * cause CURPOS to be armed by the CURBASE write.
11731          * Without the CURCNTR write the CURPOS write would
11732          * arm itself. Thus we always update CURCNTR before
11733          * CURPOS.
11734          *
11735          * On other platforms CURPOS always requires the
11736          * CURBASE write to arm the update. Additonally
11737          * a write to any of the cursor register will cancel
11738          * an already armed cursor update. Thus leaving out
11739          * the CURBASE write after CURPOS could lead to a
11740          * cursor that doesn't appear to move, or even change
11741          * shape. Thus we always write CURBASE.
11742          *
11743          * The other registers are armed by by the CURBASE write
11744          * except when the plane is getting enabled at which time
11745          * the CURCNTR write arms the update.
11746          */
11747
11748         if (INTEL_GEN(dev_priv) >= 9)
11749                 skl_write_cursor_wm(plane, crtc_state);
11750
11751         if (plane->cursor.base != base ||
11752             plane->cursor.size != fbc_ctl ||
11753             plane->cursor.cntl != cntl) {
11754                 if (HAS_CUR_FBC(dev_priv))
11755                         intel_de_write_fw(dev_priv, CUR_FBC_CTL(pipe),
11756                                           fbc_ctl);
11757                 intel_de_write_fw(dev_priv, CURCNTR(pipe), cntl);
11758                 intel_de_write_fw(dev_priv, CURPOS(pipe), pos);
11759                 intel_de_write_fw(dev_priv, CURBASE(pipe), base);
11760
11761                 plane->cursor.base = base;
11762                 plane->cursor.size = fbc_ctl;
11763                 plane->cursor.cntl = cntl;
11764         } else {
11765                 intel_de_write_fw(dev_priv, CURPOS(pipe), pos);
11766                 intel_de_write_fw(dev_priv, CURBASE(pipe), base);
11767         }
11768
11769         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
11770 }
11771
11772 static void i9xx_disable_cursor(struct intel_plane *plane,
11773                                 const struct intel_crtc_state *crtc_state)
11774 {
11775         i9xx_update_cursor(plane, crtc_state, NULL);
11776 }
11777
11778 static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
11779                                      enum pipe *pipe)
11780 {
11781         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11782         enum intel_display_power_domain power_domain;
11783         intel_wakeref_t wakeref;
11784         bool ret;
11785         u32 val;
11786
11787         /*
11788          * Not 100% correct for planes that can move between pipes,
11789          * but that's only the case for gen2-3 which don't have any
11790          * display power wells.
11791          */
11792         power_domain = POWER_DOMAIN_PIPE(plane->pipe);
11793         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
11794         if (!wakeref)
11795                 return false;
11796
11797         val = intel_de_read(dev_priv, CURCNTR(plane->pipe));
11798
11799         ret = val & MCURSOR_MODE;
11800
11801         if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
11802                 *pipe = plane->pipe;
11803         else
11804                 *pipe = (val & MCURSOR_PIPE_SELECT_MASK) >>
11805                         MCURSOR_PIPE_SELECT_SHIFT;
11806
11807         intel_display_power_put(dev_priv, power_domain, wakeref);
11808
11809         return ret;
11810 }
11811
11812 /* VESA 640x480x72Hz mode to set on the pipe */
11813 static const struct drm_display_mode load_detect_mode = {
11814         DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
11815                  704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
11816 };
11817
11818 struct drm_framebuffer *
11819 intel_framebuffer_create(struct drm_i915_gem_object *obj,
11820                          struct drm_mode_fb_cmd2 *mode_cmd)
11821 {
11822         struct intel_framebuffer *intel_fb;
11823         int ret;
11824
11825         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
11826         if (!intel_fb)
11827                 return ERR_PTR(-ENOMEM);
11828
11829         ret = intel_framebuffer_init(intel_fb, obj, mode_cmd);
11830         if (ret)
11831                 goto err;
11832
11833         return &intel_fb->base;
11834
11835 err:
11836         kfree(intel_fb);
11837         return ERR_PTR(ret);
11838 }
11839
11840 static int intel_modeset_disable_planes(struct drm_atomic_state *state,
11841                                         struct drm_crtc *crtc)
11842 {
11843         struct drm_plane *plane;
11844         struct drm_plane_state *plane_state;
11845         int ret, i;
11846
11847         ret = drm_atomic_add_affected_planes(state, crtc);
11848         if (ret)
11849                 return ret;
11850
11851         for_each_new_plane_in_state(state, plane, plane_state, i) {
11852                 if (plane_state->crtc != crtc)
11853                         continue;
11854
11855                 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
11856                 if (ret)
11857                         return ret;
11858
11859                 drm_atomic_set_fb_for_plane(plane_state, NULL);
11860         }
11861
11862         return 0;
11863 }
11864
11865 int intel_get_load_detect_pipe(struct drm_connector *connector,
11866                                struct intel_load_detect_pipe *old,
11867                                struct drm_modeset_acquire_ctx *ctx)
11868 {
11869         struct intel_crtc *intel_crtc;
11870         struct intel_encoder *intel_encoder =
11871                 intel_attached_encoder(to_intel_connector(connector));
11872         struct drm_crtc *possible_crtc;
11873         struct drm_encoder *encoder = &intel_encoder->base;
11874         struct drm_crtc *crtc = NULL;
11875         struct drm_device *dev = encoder->dev;
11876         struct drm_i915_private *dev_priv = to_i915(dev);
11877         struct drm_mode_config *config = &dev->mode_config;
11878         struct drm_atomic_state *state = NULL, *restore_state = NULL;
11879         struct drm_connector_state *connector_state;
11880         struct intel_crtc_state *crtc_state;
11881         int ret, i = -1;
11882
11883         drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
11884                     connector->base.id, connector->name,
11885                     encoder->base.id, encoder->name);
11886
11887         old->restore_state = NULL;
11888
11889         WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
11890
11891         /*
11892          * Algorithm gets a little messy:
11893          *
11894          *   - if the connector already has an assigned crtc, use it (but make
11895          *     sure it's on first)
11896          *
11897          *   - try to find the first unused crtc that can drive this connector,
11898          *     and use that if we find one
11899          */
11900
11901         /* See if we already have a CRTC for this connector */
11902         if (connector->state->crtc) {
11903                 crtc = connector->state->crtc;
11904
11905                 ret = drm_modeset_lock(&crtc->mutex, ctx);
11906                 if (ret)
11907                         goto fail;
11908
11909                 /* Make sure the crtc and connector are running */
11910                 goto found;
11911         }
11912
11913         /* Find an unused one (if possible) */
11914         for_each_crtc(dev, possible_crtc) {
11915                 i++;
11916                 if (!(encoder->possible_crtcs & (1 << i)))
11917                         continue;
11918
11919                 ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
11920                 if (ret)
11921                         goto fail;
11922
11923                 if (possible_crtc->state->enable) {
11924                         drm_modeset_unlock(&possible_crtc->mutex);
11925                         continue;
11926                 }
11927
11928                 crtc = possible_crtc;
11929                 break;
11930         }
11931
11932         /*
11933          * If we didn't find an unused CRTC, don't use any.
11934          */
11935         if (!crtc) {
11936                 drm_dbg_kms(&dev_priv->drm,
11937                             "no pipe available for load-detect\n");
11938                 ret = -ENODEV;
11939                 goto fail;
11940         }
11941
11942 found:
11943         intel_crtc = to_intel_crtc(crtc);
11944
11945         state = drm_atomic_state_alloc(dev);
11946         restore_state = drm_atomic_state_alloc(dev);
11947         if (!state || !restore_state) {
11948                 ret = -ENOMEM;
11949                 goto fail;
11950         }
11951
11952         state->acquire_ctx = ctx;
11953         restore_state->acquire_ctx = ctx;
11954
11955         connector_state = drm_atomic_get_connector_state(state, connector);
11956         if (IS_ERR(connector_state)) {
11957                 ret = PTR_ERR(connector_state);
11958                 goto fail;
11959         }
11960
11961         ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
11962         if (ret)
11963                 goto fail;
11964
11965         crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
11966         if (IS_ERR(crtc_state)) {
11967                 ret = PTR_ERR(crtc_state);
11968                 goto fail;
11969         }
11970
11971         crtc_state->uapi.active = true;
11972
11973         ret = drm_atomic_set_mode_for_crtc(&crtc_state->uapi,
11974                                            &load_detect_mode);
11975         if (ret)
11976                 goto fail;
11977
11978         ret = intel_modeset_disable_planes(state, crtc);
11979         if (ret)
11980                 goto fail;
11981
11982         ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
11983         if (!ret)
11984                 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
11985         if (!ret)
11986                 ret = drm_atomic_add_affected_planes(restore_state, crtc);
11987         if (ret) {
11988                 drm_dbg_kms(&dev_priv->drm,
11989                             "Failed to create a copy of old state to restore: %i\n",
11990                             ret);
11991                 goto fail;
11992         }
11993
11994         ret = drm_atomic_commit(state);
11995         if (ret) {
11996                 drm_dbg_kms(&dev_priv->drm,
11997                             "failed to set mode on load-detect pipe\n");
11998                 goto fail;
11999         }
12000
12001         old->restore_state = restore_state;
12002         drm_atomic_state_put(state);
12003
12004         /* let the connector get through one full cycle before testing */
12005         intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
12006         return true;
12007
12008 fail:
12009         if (state) {
12010                 drm_atomic_state_put(state);
12011                 state = NULL;
12012         }
12013         if (restore_state) {
12014                 drm_atomic_state_put(restore_state);
12015                 restore_state = NULL;
12016         }
12017
12018         if (ret == -EDEADLK)
12019                 return ret;
12020
12021         return false;
12022 }
12023
12024 void intel_release_load_detect_pipe(struct drm_connector *connector,
12025                                     struct intel_load_detect_pipe *old,
12026                                     struct drm_modeset_acquire_ctx *ctx)
12027 {
12028         struct intel_encoder *intel_encoder =
12029                 intel_attached_encoder(to_intel_connector(connector));
12030         struct drm_i915_private *i915 = to_i915(intel_encoder->base.dev);
12031         struct drm_encoder *encoder = &intel_encoder->base;
12032         struct drm_atomic_state *state = old->restore_state;
12033         int ret;
12034
12035         drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
12036                     connector->base.id, connector->name,
12037                     encoder->base.id, encoder->name);
12038
12039         if (!state)
12040                 return;
12041
12042         ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
12043         if (ret)
12044                 drm_dbg_kms(&i915->drm,
12045                             "Couldn't release load detect pipe: %i\n", ret);
12046         drm_atomic_state_put(state);
12047 }
12048
12049 static int i9xx_pll_refclk(struct drm_device *dev,
12050                            const struct intel_crtc_state *pipe_config)
12051 {
12052         struct drm_i915_private *dev_priv = to_i915(dev);
12053         u32 dpll = pipe_config->dpll_hw_state.dpll;
12054
12055         if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
12056                 return dev_priv->vbt.lvds_ssc_freq;
12057         else if (HAS_PCH_SPLIT(dev_priv))
12058                 return 120000;
12059         else if (!IS_GEN(dev_priv, 2))
12060                 return 96000;
12061         else
12062                 return 48000;
12063 }
12064
12065 /* Returns the clock of the currently programmed mode of the given pipe. */
12066 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
12067                                 struct intel_crtc_state *pipe_config)
12068 {
12069         struct drm_device *dev = crtc->base.dev;
12070         struct drm_i915_private *dev_priv = to_i915(dev);
12071         enum pipe pipe = crtc->pipe;
12072         u32 dpll = pipe_config->dpll_hw_state.dpll;
12073         u32 fp;
12074         struct dpll clock;
12075         int port_clock;
12076         int refclk = i9xx_pll_refclk(dev, pipe_config);
12077
12078         if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
12079                 fp = pipe_config->dpll_hw_state.fp0;
12080         else
12081                 fp = pipe_config->dpll_hw_state.fp1;
12082
12083         clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
12084         if (IS_PINEVIEW(dev_priv)) {
12085                 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
12086                 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
12087         } else {
12088                 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
12089                 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
12090         }
12091
12092         if (!IS_GEN(dev_priv, 2)) {
12093                 if (IS_PINEVIEW(dev_priv))
12094                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
12095                                 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
12096                 else
12097                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
12098                                DPLL_FPA01_P1_POST_DIV_SHIFT);
12099
12100                 switch (dpll & DPLL_MODE_MASK) {
12101                 case DPLLB_MODE_DAC_SERIAL:
12102                         clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
12103                                 5 : 10;
12104                         break;
12105                 case DPLLB_MODE_LVDS:
12106                         clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
12107                                 7 : 14;
12108                         break;
12109                 default:
12110                         drm_dbg_kms(&dev_priv->drm,
12111                                     "Unknown DPLL mode %08x in programmed "
12112                                     "mode\n", (int)(dpll & DPLL_MODE_MASK));
12113                         return;
12114                 }
12115
12116                 if (IS_PINEVIEW(dev_priv))
12117                         port_clock = pnv_calc_dpll_params(refclk, &clock);
12118                 else
12119                         port_clock = i9xx_calc_dpll_params(refclk, &clock);
12120         } else {
12121                 u32 lvds = IS_I830(dev_priv) ? 0 : intel_de_read(dev_priv,
12122                                                                  LVDS);
12123                 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
12124
12125                 if (is_lvds) {
12126                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
12127                                        DPLL_FPA01_P1_POST_DIV_SHIFT);
12128
12129                         if (lvds & LVDS_CLKB_POWER_UP)
12130                                 clock.p2 = 7;
12131                         else
12132                                 clock.p2 = 14;
12133                 } else {
12134                         if (dpll & PLL_P1_DIVIDE_BY_TWO)
12135                                 clock.p1 = 2;
12136                         else {
12137                                 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
12138                                             DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
12139                         }
12140                         if (dpll & PLL_P2_DIVIDE_BY_4)
12141                                 clock.p2 = 4;
12142                         else
12143                                 clock.p2 = 2;
12144                 }
12145
12146                 port_clock = i9xx_calc_dpll_params(refclk, &clock);
12147         }
12148
12149         /*
12150          * This value includes pixel_multiplier. We will use
12151          * port_clock to compute adjusted_mode.crtc_clock in the
12152          * encoder's get_config() function.
12153          */
12154         pipe_config->port_clock = port_clock;
12155 }
12156
12157 int intel_dotclock_calculate(int link_freq,
12158                              const struct intel_link_m_n *m_n)
12159 {
12160         /*
12161          * The calculation for the data clock is:
12162          * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
12163          * But we want to avoid losing precison if possible, so:
12164          * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
12165          *
12166          * and the link clock is simpler:
12167          * link_clock = (m * link_clock) / n
12168          */
12169
12170         if (!m_n->link_n)
12171                 return 0;
12172
12173         return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
12174 }
12175
12176 static void ilk_pch_clock_get(struct intel_crtc *crtc,
12177                               struct intel_crtc_state *pipe_config)
12178 {
12179         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12180
12181         /* read out port_clock from the DPLL */
12182         i9xx_crtc_clock_get(crtc, pipe_config);
12183
12184         /*
12185          * In case there is an active pipe without active ports,
12186          * we may need some idea for the dotclock anyway.
12187          * Calculate one based on the FDI configuration.
12188          */
12189         pipe_config->hw.adjusted_mode.crtc_clock =
12190                 intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
12191                                          &pipe_config->fdi_m_n);
12192 }
12193
12194 static void intel_crtc_state_reset(struct intel_crtc_state *crtc_state,
12195                                    struct intel_crtc *crtc)
12196 {
12197         memset(crtc_state, 0, sizeof(*crtc_state));
12198
12199         __drm_atomic_helper_crtc_state_reset(&crtc_state->uapi, &crtc->base);
12200
12201         crtc_state->cpu_transcoder = INVALID_TRANSCODER;
12202         crtc_state->master_transcoder = INVALID_TRANSCODER;
12203         crtc_state->hsw_workaround_pipe = INVALID_PIPE;
12204         crtc_state->output_format = INTEL_OUTPUT_FORMAT_INVALID;
12205         crtc_state->scaler_state.scaler_id = -1;
12206         crtc_state->mst_master_transcoder = INVALID_TRANSCODER;
12207 }
12208
12209 static struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc)
12210 {
12211         struct intel_crtc_state *crtc_state;
12212
12213         crtc_state = kmalloc(sizeof(*crtc_state), GFP_KERNEL);
12214
12215         if (crtc_state)
12216                 intel_crtc_state_reset(crtc_state, crtc);
12217
12218         return crtc_state;
12219 }
12220
12221 /* Returns the currently programmed mode of the given encoder. */
12222 struct drm_display_mode *
12223 intel_encoder_current_mode(struct intel_encoder *encoder)
12224 {
12225         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
12226         struct intel_crtc_state *crtc_state;
12227         struct drm_display_mode *mode;
12228         struct intel_crtc *crtc;
12229         enum pipe pipe;
12230
12231         if (!encoder->get_hw_state(encoder, &pipe))
12232                 return NULL;
12233
12234         crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
12235
12236         mode = kzalloc(sizeof(*mode), GFP_KERNEL);
12237         if (!mode)
12238                 return NULL;
12239
12240         crtc_state = intel_crtc_state_alloc(crtc);
12241         if (!crtc_state) {
12242                 kfree(mode);
12243                 return NULL;
12244         }
12245
12246         if (!dev_priv->display.get_pipe_config(crtc, crtc_state)) {
12247                 kfree(crtc_state);
12248                 kfree(mode);
12249                 return NULL;
12250         }
12251
12252         encoder->get_config(encoder, crtc_state);
12253
12254         intel_mode_from_pipe_config(mode, crtc_state);
12255
12256         kfree(crtc_state);
12257
12258         return mode;
12259 }
12260
12261 static void intel_crtc_destroy(struct drm_crtc *crtc)
12262 {
12263         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12264
12265         drm_crtc_cleanup(crtc);
12266         kfree(intel_crtc);
12267 }
12268
12269 /**
12270  * intel_wm_need_update - Check whether watermarks need updating
12271  * @cur: current plane state
12272  * @new: new plane state
12273  *
12274  * Check current plane state versus the new one to determine whether
12275  * watermarks need to be recalculated.
12276  *
12277  * Returns true or false.
12278  */
12279 static bool intel_wm_need_update(const struct intel_plane_state *cur,
12280                                  struct intel_plane_state *new)
12281 {
12282         /* Update watermarks on tiling or size changes. */
12283         if (new->uapi.visible != cur->uapi.visible)
12284                 return true;
12285
12286         if (!cur->hw.fb || !new->hw.fb)
12287                 return false;
12288
12289         if (cur->hw.fb->modifier != new->hw.fb->modifier ||
12290             cur->hw.rotation != new->hw.rotation ||
12291             drm_rect_width(&new->uapi.src) != drm_rect_width(&cur->uapi.src) ||
12292             drm_rect_height(&new->uapi.src) != drm_rect_height(&cur->uapi.src) ||
12293             drm_rect_width(&new->uapi.dst) != drm_rect_width(&cur->uapi.dst) ||
12294             drm_rect_height(&new->uapi.dst) != drm_rect_height(&cur->uapi.dst))
12295                 return true;
12296
12297         return false;
12298 }
12299
12300 static bool needs_scaling(const struct intel_plane_state *state)
12301 {
12302         int src_w = drm_rect_width(&state->uapi.src) >> 16;
12303         int src_h = drm_rect_height(&state->uapi.src) >> 16;
12304         int dst_w = drm_rect_width(&state->uapi.dst);
12305         int dst_h = drm_rect_height(&state->uapi.dst);
12306
12307         return (src_w != dst_w || src_h != dst_h);
12308 }
12309
12310 int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
12311                                     struct intel_crtc_state *crtc_state,
12312                                     const struct intel_plane_state *old_plane_state,
12313                                     struct intel_plane_state *plane_state)
12314 {
12315         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
12316         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
12317         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12318         bool mode_changed = needs_modeset(crtc_state);
12319         bool was_crtc_enabled = old_crtc_state->hw.active;
12320         bool is_crtc_enabled = crtc_state->hw.active;
12321         bool turn_off, turn_on, visible, was_visible;
12322         int ret;
12323
12324         if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
12325                 ret = skl_update_scaler_plane(crtc_state, plane_state);
12326                 if (ret)
12327                         return ret;
12328         }
12329
12330         was_visible = old_plane_state->uapi.visible;
12331         visible = plane_state->uapi.visible;
12332
12333         if (!was_crtc_enabled && WARN_ON(was_visible))
12334                 was_visible = false;
12335
12336         /*
12337          * Visibility is calculated as if the crtc was on, but
12338          * after scaler setup everything depends on it being off
12339          * when the crtc isn't active.
12340          *
12341          * FIXME this is wrong for watermarks. Watermarks should also
12342          * be computed as if the pipe would be active. Perhaps move
12343          * per-plane wm computation to the .check_plane() hook, and
12344          * only combine the results from all planes in the current place?
12345          */
12346         if (!is_crtc_enabled) {
12347                 plane_state->uapi.visible = visible = false;
12348                 crtc_state->active_planes &= ~BIT(plane->id);
12349                 crtc_state->data_rate[plane->id] = 0;
12350                 crtc_state->min_cdclk[plane->id] = 0;
12351         }
12352
12353         if (!was_visible && !visible)
12354                 return 0;
12355
12356         turn_off = was_visible && (!visible || mode_changed);
12357         turn_on = visible && (!was_visible || mode_changed);
12358
12359         drm_dbg_atomic(&dev_priv->drm,
12360                        "[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
12361                        crtc->base.base.id, crtc->base.name,
12362                        plane->base.base.id, plane->base.name,
12363                        was_visible, visible,
12364                        turn_off, turn_on, mode_changed);
12365
12366         if (turn_on) {
12367                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
12368                         crtc_state->update_wm_pre = true;
12369
12370                 /* must disable cxsr around plane enable/disable */
12371                 if (plane->id != PLANE_CURSOR)
12372                         crtc_state->disable_cxsr = true;
12373         } else if (turn_off) {
12374                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
12375                         crtc_state->update_wm_post = true;
12376
12377                 /* must disable cxsr around plane enable/disable */
12378                 if (plane->id != PLANE_CURSOR)
12379                         crtc_state->disable_cxsr = true;
12380         } else if (intel_wm_need_update(old_plane_state, plane_state)) {
12381                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
12382                         /* FIXME bollocks */
12383                         crtc_state->update_wm_pre = true;
12384                         crtc_state->update_wm_post = true;
12385                 }
12386         }
12387
12388         if (visible || was_visible)
12389                 crtc_state->fb_bits |= plane->frontbuffer_bit;
12390
12391         /*
12392          * ILK/SNB DVSACNTR/Sprite Enable
12393          * IVB SPR_CTL/Sprite Enable
12394          * "When in Self Refresh Big FIFO mode, a write to enable the
12395          *  plane will be internally buffered and delayed while Big FIFO
12396          *  mode is exiting."
12397          *
12398          * Which means that enabling the sprite can take an extra frame
12399          * when we start in big FIFO mode (LP1+). Thus we need to drop
12400          * down to LP0 and wait for vblank in order to make sure the
12401          * sprite gets enabled on the next vblank after the register write.
12402          * Doing otherwise would risk enabling the sprite one frame after
12403          * we've already signalled flip completion. We can resume LP1+
12404          * once the sprite has been enabled.
12405          *
12406          *
12407          * WaCxSRDisabledForSpriteScaling:ivb
12408          * IVB SPR_SCALE/Scaling Enable
12409          * "Low Power watermarks must be disabled for at least one
12410          *  frame before enabling sprite scaling, and kept disabled
12411          *  until sprite scaling is disabled."
12412          *
12413          * ILK/SNB DVSASCALE/Scaling Enable
12414          * "When in Self Refresh Big FIFO mode, scaling enable will be
12415          *  masked off while Big FIFO mode is exiting."
12416          *
12417          * Despite the w/a only being listed for IVB we assume that
12418          * the ILK/SNB note has similar ramifications, hence we apply
12419          * the w/a on all three platforms.
12420          *
12421          * With experimental results seems this is needed also for primary
12422          * plane, not only sprite plane.
12423          */
12424         if (plane->id != PLANE_CURSOR &&
12425             (IS_GEN_RANGE(dev_priv, 5, 6) ||
12426              IS_IVYBRIDGE(dev_priv)) &&
12427             (turn_on || (!needs_scaling(old_plane_state) &&
12428                          needs_scaling(plane_state))))
12429                 crtc_state->disable_lp_wm = true;
12430
12431         return 0;
12432 }
12433
12434 static bool encoders_cloneable(const struct intel_encoder *a,
12435                                const struct intel_encoder *b)
12436 {
12437         /* masks could be asymmetric, so check both ways */
12438         return a == b || (a->cloneable & (1 << b->type) &&
12439                           b->cloneable & (1 << a->type));
12440 }
12441
12442 static bool check_single_encoder_cloning(struct drm_atomic_state *state,
12443                                          struct intel_crtc *crtc,
12444                                          struct intel_encoder *encoder)
12445 {
12446         struct intel_encoder *source_encoder;
12447         struct drm_connector *connector;
12448         struct drm_connector_state *connector_state;
12449         int i;
12450
12451         for_each_new_connector_in_state(state, connector, connector_state, i) {
12452                 if (connector_state->crtc != &crtc->base)
12453                         continue;
12454
12455                 source_encoder =
12456                         to_intel_encoder(connector_state->best_encoder);
12457                 if (!encoders_cloneable(encoder, source_encoder))
12458                         return false;
12459         }
12460
12461         return true;
12462 }
12463
12464 static int icl_add_linked_planes(struct intel_atomic_state *state)
12465 {
12466         struct intel_plane *plane, *linked;
12467         struct intel_plane_state *plane_state, *linked_plane_state;
12468         int i;
12469
12470         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
12471                 linked = plane_state->planar_linked_plane;
12472
12473                 if (!linked)
12474                         continue;
12475
12476                 linked_plane_state = intel_atomic_get_plane_state(state, linked);
12477                 if (IS_ERR(linked_plane_state))
12478                         return PTR_ERR(linked_plane_state);
12479
12480                 WARN_ON(linked_plane_state->planar_linked_plane != plane);
12481                 WARN_ON(linked_plane_state->planar_slave == plane_state->planar_slave);
12482         }
12483
12484         return 0;
12485 }
12486
12487 static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
12488 {
12489         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
12490         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12491         struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
12492         struct intel_plane *plane, *linked;
12493         struct intel_plane_state *plane_state;
12494         int i;
12495
12496         if (INTEL_GEN(dev_priv) < 11)
12497                 return 0;
12498
12499         /*
12500          * Destroy all old plane links and make the slave plane invisible
12501          * in the crtc_state->active_planes mask.
12502          */
12503         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
12504                 if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane)
12505                         continue;
12506
12507                 plane_state->planar_linked_plane = NULL;
12508                 if (plane_state->planar_slave && !plane_state->uapi.visible) {
12509                         crtc_state->active_planes &= ~BIT(plane->id);
12510                         crtc_state->update_planes |= BIT(plane->id);
12511                 }
12512
12513                 plane_state->planar_slave = false;
12514         }
12515
12516         if (!crtc_state->nv12_planes)
12517                 return 0;
12518
12519         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
12520                 struct intel_plane_state *linked_state = NULL;
12521
12522                 if (plane->pipe != crtc->pipe ||
12523                     !(crtc_state->nv12_planes & BIT(plane->id)))
12524                         continue;
12525
12526                 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
12527                         if (!icl_is_nv12_y_plane(linked->id))
12528                                 continue;
12529
12530                         if (crtc_state->active_planes & BIT(linked->id))
12531                                 continue;
12532
12533                         linked_state = intel_atomic_get_plane_state(state, linked);
12534                         if (IS_ERR(linked_state))
12535                                 return PTR_ERR(linked_state);
12536
12537                         break;
12538                 }
12539
12540                 if (!linked_state) {
12541                         drm_dbg_kms(&dev_priv->drm,
12542                                     "Need %d free Y planes for planar YUV\n",
12543                                     hweight8(crtc_state->nv12_planes));
12544
12545                         return -EINVAL;
12546                 }
12547
12548                 plane_state->planar_linked_plane = linked;
12549
12550                 linked_state->planar_slave = true;
12551                 linked_state->planar_linked_plane = plane;
12552                 crtc_state->active_planes |= BIT(linked->id);
12553                 crtc_state->update_planes |= BIT(linked->id);
12554                 drm_dbg_kms(&dev_priv->drm, "Using %s as Y plane for %s\n",
12555                             linked->base.name, plane->base.name);
12556
12557                 /* Copy parameters to slave plane */
12558                 linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE;
12559                 linked_state->color_ctl = plane_state->color_ctl;
12560                 linked_state->view = plane_state->view;
12561                 memcpy(linked_state->color_plane, plane_state->color_plane,
12562                        sizeof(linked_state->color_plane));
12563
12564                 intel_plane_copy_uapi_to_hw_state(linked_state, plane_state);
12565                 linked_state->uapi.src = plane_state->uapi.src;
12566                 linked_state->uapi.dst = plane_state->uapi.dst;
12567
12568                 if (icl_is_hdr_plane(dev_priv, plane->id)) {
12569                         if (linked->id == PLANE_SPRITE5)
12570                                 plane_state->cus_ctl |= PLANE_CUS_PLANE_7;
12571                         else if (linked->id == PLANE_SPRITE4)
12572                                 plane_state->cus_ctl |= PLANE_CUS_PLANE_6;
12573                         else
12574                                 MISSING_CASE(linked->id);
12575                 }
12576         }
12577
12578         return 0;
12579 }
12580
12581 static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
12582 {
12583         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
12584         struct intel_atomic_state *state =
12585                 to_intel_atomic_state(new_crtc_state->uapi.state);
12586         const struct intel_crtc_state *old_crtc_state =
12587                 intel_atomic_get_old_crtc_state(state, crtc);
12588
12589         return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes;
12590 }
12591
12592 static bool
12593 intel_atomic_is_master_connector(struct intel_crtc_state *crtc_state)
12594 {
12595         struct drm_crtc *crtc = crtc_state->uapi.crtc;
12596         struct drm_atomic_state *state = crtc_state->uapi.state;
12597         struct drm_connector *connector;
12598         struct drm_connector_state *connector_state;
12599         int i;
12600
12601         for_each_new_connector_in_state(state, connector, connector_state, i) {
12602                 if (connector_state->crtc != crtc)
12603                         continue;
12604                 if (connector->has_tile &&
12605                     connector->tile_h_loc == connector->num_h_tile - 1 &&
12606                     connector->tile_v_loc == connector->num_v_tile - 1)
12607                         return true;
12608         }
12609
12610         return false;
12611 }
12612
12613 static void reset_port_sync_mode_state(struct intel_crtc_state *crtc_state)
12614 {
12615         crtc_state->master_transcoder = INVALID_TRANSCODER;
12616         crtc_state->sync_mode_slaves_mask = 0;
12617 }
12618
12619 static int icl_compute_port_sync_crtc_state(struct drm_connector *connector,
12620                                             struct intel_crtc_state *crtc_state,
12621                                             int num_tiled_conns)
12622 {
12623         struct drm_crtc *crtc = crtc_state->uapi.crtc;
12624         struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
12625         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
12626         struct drm_connector *master_connector;
12627         struct drm_connector_list_iter conn_iter;
12628         struct drm_crtc *master_crtc = NULL;
12629         struct drm_crtc_state *master_crtc_state;
12630         struct intel_crtc_state *master_pipe_config;
12631
12632         if (INTEL_GEN(dev_priv) < 11)
12633                 return 0;
12634
12635         if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP))
12636                 return 0;
12637
12638         /*
12639          * In case of tiled displays there could be one or more slaves but there is
12640          * only one master. Lets make the CRTC used by the connector corresponding
12641          * to the last horizonal and last vertical tile a master/genlock CRTC.
12642          * All the other CRTCs corresponding to other tiles of the same Tile group
12643          * are the slave CRTCs and hold a pointer to their genlock CRTC.
12644          * If all tiles not present do not make master slave assignments.
12645          */
12646         if (!connector->has_tile ||
12647             crtc_state->hw.mode.hdisplay != connector->tile_h_size ||
12648             crtc_state->hw.mode.vdisplay != connector->tile_v_size ||
12649             num_tiled_conns < connector->num_h_tile * connector->num_v_tile) {
12650                 reset_port_sync_mode_state(crtc_state);
12651                 return 0;
12652         }
12653         /* Last Horizontal and last vertical tile connector is a master
12654          * Master's crtc state is already populated in slave for port sync
12655          */
12656         if (connector->tile_h_loc == connector->num_h_tile - 1 &&
12657             connector->tile_v_loc == connector->num_v_tile - 1)
12658                 return 0;
12659
12660         /* Loop through all connectors and configure the Slave crtc_state
12661          * to point to the correct master.
12662          */
12663         drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
12664         drm_for_each_connector_iter(master_connector, &conn_iter) {
12665                 struct drm_connector_state *master_conn_state = NULL;
12666
12667                 if (!(master_connector->has_tile &&
12668                       master_connector->tile_group->id == connector->tile_group->id))
12669                         continue;
12670                 if (master_connector->tile_h_loc != master_connector->num_h_tile - 1 ||
12671                     master_connector->tile_v_loc != master_connector->num_v_tile - 1)
12672                         continue;
12673
12674                 master_conn_state = drm_atomic_get_connector_state(&state->base,
12675                                                                    master_connector);
12676                 if (IS_ERR(master_conn_state)) {
12677                         drm_connector_list_iter_end(&conn_iter);
12678                         return PTR_ERR(master_conn_state);
12679                 }
12680                 if (master_conn_state->crtc) {
12681                         master_crtc = master_conn_state->crtc;
12682                         break;
12683                 }
12684         }
12685         drm_connector_list_iter_end(&conn_iter);
12686
12687         if (!master_crtc) {
12688                 drm_dbg_kms(&dev_priv->drm,
12689                             "Could not find Master CRTC for Slave CRTC %d\n",
12690                             crtc->base.id);
12691                 return -EINVAL;
12692         }
12693
12694         master_crtc_state = drm_atomic_get_crtc_state(&state->base,
12695                                                       master_crtc);
12696         if (IS_ERR(master_crtc_state))
12697                 return PTR_ERR(master_crtc_state);
12698
12699         master_pipe_config = to_intel_crtc_state(master_crtc_state);
12700         crtc_state->master_transcoder = master_pipe_config->cpu_transcoder;
12701         master_pipe_config->sync_mode_slaves_mask |=
12702                 BIT(crtc_state->cpu_transcoder);
12703         drm_dbg_kms(&dev_priv->drm,
12704                     "Master Transcoder = %s added for Slave CRTC = %d, slave transcoder bitmask = %d\n",
12705                     transcoder_name(crtc_state->master_transcoder),
12706                     crtc->base.id,
12707                     master_pipe_config->sync_mode_slaves_mask);
12708
12709         return 0;
12710 }
12711
12712 static u16 hsw_linetime_wm(const struct intel_crtc_state *crtc_state)
12713 {
12714         const struct drm_display_mode *adjusted_mode =
12715                 &crtc_state->hw.adjusted_mode;
12716
12717         if (!crtc_state->hw.enable)
12718                 return 0;
12719
12720         return DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
12721                                  adjusted_mode->crtc_clock);
12722 }
12723
12724 static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state,
12725                                const struct intel_cdclk_state *cdclk_state)
12726 {
12727         const struct drm_display_mode *adjusted_mode =
12728                 &crtc_state->hw.adjusted_mode;
12729
12730         if (!crtc_state->hw.enable)
12731                 return 0;
12732
12733         return DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
12734                                  cdclk_state->logical.cdclk);
12735 }
12736
12737 static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state)
12738 {
12739         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
12740         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12741         const struct drm_display_mode *adjusted_mode =
12742                 &crtc_state->hw.adjusted_mode;
12743         u16 linetime_wm;
12744
12745         if (!crtc_state->hw.enable)
12746                 return 0;
12747
12748         linetime_wm = DIV_ROUND_UP(adjusted_mode->crtc_htotal * 1000 * 8,
12749                                    crtc_state->pixel_rate);
12750
12751         /* Display WA #1135: BXT:ALL GLK:ALL */
12752         if (IS_GEN9_LP(dev_priv) && dev_priv->ipc_enabled)
12753                 linetime_wm /= 2;
12754
12755         return linetime_wm;
12756 }
12757
12758 static int hsw_compute_linetime_wm(struct intel_atomic_state *state,
12759                                    struct intel_crtc *crtc)
12760 {
12761         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12762         struct intel_crtc_state *crtc_state =
12763                 intel_atomic_get_new_crtc_state(state, crtc);
12764         const struct intel_cdclk_state *cdclk_state;
12765
12766         if (INTEL_GEN(dev_priv) >= 9)
12767                 crtc_state->linetime = skl_linetime_wm(crtc_state);
12768         else
12769                 crtc_state->linetime = hsw_linetime_wm(crtc_state);
12770
12771         if (!hsw_crtc_supports_ips(crtc))
12772                 return 0;
12773
12774         cdclk_state = intel_atomic_get_cdclk_state(state);
12775         if (IS_ERR(cdclk_state))
12776                 return PTR_ERR(cdclk_state);
12777
12778         crtc_state->ips_linetime = hsw_ips_linetime_wm(crtc_state,
12779                                                        cdclk_state);
12780
12781         return 0;
12782 }
12783
12784 static int intel_crtc_atomic_check(struct intel_atomic_state *state,
12785                                    struct intel_crtc *crtc)
12786 {
12787         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12788         struct intel_crtc_state *crtc_state =
12789                 intel_atomic_get_new_crtc_state(state, crtc);
12790         bool mode_changed = needs_modeset(crtc_state);
12791         int ret;
12792
12793         if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv) &&
12794             mode_changed && !crtc_state->hw.active)
12795                 crtc_state->update_wm_post = true;
12796
12797         if (mode_changed && crtc_state->hw.enable &&
12798             dev_priv->display.crtc_compute_clock &&
12799             !WARN_ON(crtc_state->shared_dpll)) {
12800                 ret = dev_priv->display.crtc_compute_clock(crtc, crtc_state);
12801                 if (ret)
12802                         return ret;
12803         }
12804
12805         /*
12806          * May need to update pipe gamma enable bits
12807          * when C8 planes are getting enabled/disabled.
12808          */
12809         if (c8_planes_changed(crtc_state))
12810                 crtc_state->uapi.color_mgmt_changed = true;
12811
12812         if (mode_changed || crtc_state->update_pipe ||
12813             crtc_state->uapi.color_mgmt_changed) {
12814                 ret = intel_color_check(crtc_state);
12815                 if (ret)
12816                         return ret;
12817         }
12818
12819         if (dev_priv->display.compute_pipe_wm) {
12820                 ret = dev_priv->display.compute_pipe_wm(crtc_state);
12821                 if (ret) {
12822                         drm_dbg_kms(&dev_priv->drm,
12823                                     "Target pipe watermarks are invalid\n");
12824                         return ret;
12825                 }
12826         }
12827
12828         if (dev_priv->display.compute_intermediate_wm) {
12829                 if (WARN_ON(!dev_priv->display.compute_pipe_wm))
12830                         return 0;
12831
12832                 /*
12833                  * Calculate 'intermediate' watermarks that satisfy both the
12834                  * old state and the new state.  We can program these
12835                  * immediately.
12836                  */
12837                 ret = dev_priv->display.compute_intermediate_wm(crtc_state);
12838                 if (ret) {
12839                         drm_dbg_kms(&dev_priv->drm,
12840                                     "No valid intermediate pipe watermarks are possible\n");
12841                         return ret;
12842                 }
12843         }
12844
12845         if (INTEL_GEN(dev_priv) >= 9) {
12846                 if (mode_changed || crtc_state->update_pipe) {
12847                         ret = skl_update_scaler_crtc(crtc_state);
12848                         if (ret)
12849                                 return ret;
12850                 }
12851
12852                 ret = intel_atomic_setup_scalers(dev_priv, crtc, crtc_state);
12853                 if (ret)
12854                         return ret;
12855         }
12856
12857         if (HAS_IPS(dev_priv)) {
12858                 ret = hsw_compute_ips_config(crtc_state);
12859                 if (ret)
12860                         return ret;
12861         }
12862
12863         if (INTEL_GEN(dev_priv) >= 9 ||
12864             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
12865                 ret = hsw_compute_linetime_wm(state, crtc);
12866                 if (ret)
12867                         return ret;
12868
12869         }
12870
12871         return 0;
12872 }
12873
12874 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
12875 {
12876         struct intel_connector *connector;
12877         struct drm_connector_list_iter conn_iter;
12878
12879         drm_connector_list_iter_begin(dev, &conn_iter);
12880         for_each_intel_connector_iter(connector, &conn_iter) {
12881                 if (connector->base.state->crtc)
12882                         drm_connector_put(&connector->base);
12883
12884                 if (connector->base.encoder) {
12885                         connector->base.state->best_encoder =
12886                                 connector->base.encoder;
12887                         connector->base.state->crtc =
12888                                 connector->base.encoder->crtc;
12889
12890                         drm_connector_get(&connector->base);
12891                 } else {
12892                         connector->base.state->best_encoder = NULL;
12893                         connector->base.state->crtc = NULL;
12894                 }
12895         }
12896         drm_connector_list_iter_end(&conn_iter);
12897 }
12898
12899 static int
12900 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
12901                       struct intel_crtc_state *pipe_config)
12902 {
12903         struct drm_connector *connector = conn_state->connector;
12904         struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
12905         const struct drm_display_info *info = &connector->display_info;
12906         int bpp;
12907
12908         switch (conn_state->max_bpc) {
12909         case 6 ... 7:
12910                 bpp = 6 * 3;
12911                 break;
12912         case 8 ... 9:
12913                 bpp = 8 * 3;
12914                 break;
12915         case 10 ... 11:
12916                 bpp = 10 * 3;
12917                 break;
12918         case 12:
12919                 bpp = 12 * 3;
12920                 break;
12921         default:
12922                 return -EINVAL;
12923         }
12924
12925         if (bpp < pipe_config->pipe_bpp) {
12926                 drm_dbg_kms(&i915->drm,
12927                             "[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
12928                             "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
12929                             connector->base.id, connector->name,
12930                             bpp, 3 * info->bpc,
12931                             3 * conn_state->max_requested_bpc,
12932                             pipe_config->pipe_bpp);
12933
12934                 pipe_config->pipe_bpp = bpp;
12935         }
12936
12937         return 0;
12938 }
12939
12940 static int
12941 compute_baseline_pipe_bpp(struct intel_crtc *crtc,
12942                           struct intel_crtc_state *pipe_config)
12943 {
12944         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12945         struct drm_atomic_state *state = pipe_config->uapi.state;
12946         struct drm_connector *connector;
12947         struct drm_connector_state *connector_state;
12948         int bpp, i;
12949
12950         if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
12951             IS_CHERRYVIEW(dev_priv)))
12952                 bpp = 10*3;
12953         else if (INTEL_GEN(dev_priv) >= 5)
12954                 bpp = 12*3;
12955         else
12956                 bpp = 8*3;
12957
12958         pipe_config->pipe_bpp = bpp;
12959
12960         /* Clamp display bpp to connector max bpp */
12961         for_each_new_connector_in_state(state, connector, connector_state, i) {
12962                 int ret;
12963
12964                 if (connector_state->crtc != &crtc->base)
12965                         continue;
12966
12967                 ret = compute_sink_pipe_bpp(connector_state, pipe_config);
12968                 if (ret)
12969                         return ret;
12970         }
12971
12972         return 0;
12973 }
12974
12975 static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
12976 {
12977         DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
12978                       "type: 0x%x flags: 0x%x\n",
12979                       mode->crtc_clock,
12980                       mode->crtc_hdisplay, mode->crtc_hsync_start,
12981                       mode->crtc_hsync_end, mode->crtc_htotal,
12982                       mode->crtc_vdisplay, mode->crtc_vsync_start,
12983                       mode->crtc_vsync_end, mode->crtc_vtotal,
12984                       mode->type, mode->flags);
12985 }
12986
12987 static inline void
12988 intel_dump_m_n_config(const struct intel_crtc_state *pipe_config,
12989                       const char *id, unsigned int lane_count,
12990                       const struct intel_link_m_n *m_n)
12991 {
12992         struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
12993
12994         drm_dbg_kms(&i915->drm,
12995                     "%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
12996                     id, lane_count,
12997                     m_n->gmch_m, m_n->gmch_n,
12998                     m_n->link_m, m_n->link_n, m_n->tu);
12999 }
13000
13001 static void
13002 intel_dump_infoframe(struct drm_i915_private *dev_priv,
13003                      const union hdmi_infoframe *frame)
13004 {
13005         if (!drm_debug_enabled(DRM_UT_KMS))
13006                 return;
13007
13008         hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame);
13009 }
13010
13011 #define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
13012
13013 static const char * const output_type_str[] = {
13014         OUTPUT_TYPE(UNUSED),
13015         OUTPUT_TYPE(ANALOG),
13016         OUTPUT_TYPE(DVO),
13017         OUTPUT_TYPE(SDVO),
13018         OUTPUT_TYPE(LVDS),
13019         OUTPUT_TYPE(TVOUT),
13020         OUTPUT_TYPE(HDMI),
13021         OUTPUT_TYPE(DP),
13022         OUTPUT_TYPE(EDP),
13023         OUTPUT_TYPE(DSI),
13024         OUTPUT_TYPE(DDI),
13025         OUTPUT_TYPE(DP_MST),
13026 };
13027
13028 #undef OUTPUT_TYPE
13029
13030 static void snprintf_output_types(char *buf, size_t len,
13031                                   unsigned int output_types)
13032 {
13033         char *str = buf;
13034         int i;
13035
13036         str[0] = '\0';
13037
13038         for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
13039                 int r;
13040
13041                 if ((output_types & BIT(i)) == 0)
13042                         continue;
13043
13044                 r = snprintf(str, len, "%s%s",
13045                              str != buf ? "," : "", output_type_str[i]);
13046                 if (r >= len)
13047                         break;
13048                 str += r;
13049                 len -= r;
13050
13051                 output_types &= ~BIT(i);
13052         }
13053
13054         WARN_ON_ONCE(output_types != 0);
13055 }
13056
13057 static const char * const output_format_str[] = {
13058         [INTEL_OUTPUT_FORMAT_INVALID] = "Invalid",
13059         [INTEL_OUTPUT_FORMAT_RGB] = "RGB",
13060         [INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
13061         [INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
13062 };
13063
13064 static const char *output_formats(enum intel_output_format format)
13065 {
13066         if (format >= ARRAY_SIZE(output_format_str))
13067                 format = INTEL_OUTPUT_FORMAT_INVALID;
13068         return output_format_str[format];
13069 }
13070
13071 static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
13072 {
13073         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
13074         struct drm_i915_private *i915 = to_i915(plane->base.dev);
13075         const struct drm_framebuffer *fb = plane_state->hw.fb;
13076         struct drm_format_name_buf format_name;
13077
13078         if (!fb) {
13079                 drm_dbg_kms(&i915->drm,
13080                             "[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
13081                             plane->base.base.id, plane->base.name,
13082                             yesno(plane_state->uapi.visible));
13083                 return;
13084         }
13085
13086         drm_dbg_kms(&i915->drm,
13087                     "[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %s, visible: %s\n",
13088                     plane->base.base.id, plane->base.name,
13089                     fb->base.id, fb->width, fb->height,
13090                     drm_get_format_name(fb->format->format, &format_name),
13091                     yesno(plane_state->uapi.visible));
13092         drm_dbg_kms(&i915->drm, "\trotation: 0x%x, scaler: %d\n",
13093                     plane_state->hw.rotation, plane_state->scaler_id);
13094         if (plane_state->uapi.visible)
13095                 drm_dbg_kms(&i915->drm,
13096                             "\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
13097                             DRM_RECT_FP_ARG(&plane_state->uapi.src),
13098                             DRM_RECT_ARG(&plane_state->uapi.dst));
13099 }
13100
13101 static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
13102                                    struct intel_atomic_state *state,
13103                                    const char *context)
13104 {
13105         struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
13106         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13107         const struct intel_plane_state *plane_state;
13108         struct intel_plane *plane;
13109         char buf[64];
13110         int i;
13111
13112         drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] enable: %s %s\n",
13113                     crtc->base.base.id, crtc->base.name,
13114                     yesno(pipe_config->hw.enable), context);
13115
13116         if (!pipe_config->hw.enable)
13117                 goto dump_planes;
13118
13119         snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
13120         drm_dbg_kms(&dev_priv->drm,
13121                     "active: %s, output_types: %s (0x%x), output format: %s\n",
13122                     yesno(pipe_config->hw.active),
13123                     buf, pipe_config->output_types,
13124                     output_formats(pipe_config->output_format));
13125
13126         drm_dbg_kms(&dev_priv->drm,
13127                     "cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
13128                     transcoder_name(pipe_config->cpu_transcoder),
13129                     pipe_config->pipe_bpp, pipe_config->dither);
13130
13131         if (pipe_config->has_pch_encoder)
13132                 intel_dump_m_n_config(pipe_config, "fdi",
13133                                       pipe_config->fdi_lanes,
13134                                       &pipe_config->fdi_m_n);
13135
13136         if (intel_crtc_has_dp_encoder(pipe_config)) {
13137                 intel_dump_m_n_config(pipe_config, "dp m_n",
13138                                 pipe_config->lane_count, &pipe_config->dp_m_n);
13139                 if (pipe_config->has_drrs)
13140                         intel_dump_m_n_config(pipe_config, "dp m2_n2",
13141                                               pipe_config->lane_count,
13142                                               &pipe_config->dp_m2_n2);
13143         }
13144
13145         drm_dbg_kms(&dev_priv->drm,
13146                     "audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
13147                     pipe_config->has_audio, pipe_config->has_infoframe,
13148                     pipe_config->infoframes.enable);
13149
13150         if (pipe_config->infoframes.enable &
13151             intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL))
13152                 drm_dbg_kms(&dev_priv->drm, "GCP: 0x%x\n",
13153                             pipe_config->infoframes.gcp);
13154         if (pipe_config->infoframes.enable &
13155             intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
13156                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi);
13157         if (pipe_config->infoframes.enable &
13158             intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD))
13159                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.spd);
13160         if (pipe_config->infoframes.enable &
13161             intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
13162                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi);
13163
13164         drm_dbg_kms(&dev_priv->drm, "requested mode:\n");
13165         drm_mode_debug_printmodeline(&pipe_config->hw.mode);
13166         drm_dbg_kms(&dev_priv->drm, "adjusted mode:\n");
13167         drm_mode_debug_printmodeline(&pipe_config->hw.adjusted_mode);
13168         intel_dump_crtc_timings(&pipe_config->hw.adjusted_mode);
13169         drm_dbg_kms(&dev_priv->drm,
13170                     "port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
13171                     pipe_config->port_clock,
13172                     pipe_config->pipe_src_w, pipe_config->pipe_src_h,
13173                     pipe_config->pixel_rate);
13174
13175         drm_dbg_kms(&dev_priv->drm, "linetime: %d, ips linetime: %d\n",
13176                     pipe_config->linetime, pipe_config->ips_linetime);
13177
13178         if (INTEL_GEN(dev_priv) >= 9)
13179                 drm_dbg_kms(&dev_priv->drm,
13180                             "num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
13181                             crtc->num_scalers,
13182                             pipe_config->scaler_state.scaler_users,
13183                             pipe_config->scaler_state.scaler_id);
13184
13185         if (HAS_GMCH(dev_priv))
13186                 drm_dbg_kms(&dev_priv->drm,
13187                             "gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
13188                             pipe_config->gmch_pfit.control,
13189                             pipe_config->gmch_pfit.pgm_ratios,
13190                             pipe_config->gmch_pfit.lvds_border_bits);
13191         else
13192                 drm_dbg_kms(&dev_priv->drm,
13193                             "pch pfit: pos: 0x%08x, size: 0x%08x, %s, force thru: %s\n",
13194                             pipe_config->pch_pfit.pos,
13195                             pipe_config->pch_pfit.size,
13196                             enableddisabled(pipe_config->pch_pfit.enabled),
13197                             yesno(pipe_config->pch_pfit.force_thru));
13198
13199         drm_dbg_kms(&dev_priv->drm, "ips: %i, double wide: %i\n",
13200                     pipe_config->ips_enabled, pipe_config->double_wide);
13201
13202         intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
13203
13204         if (IS_CHERRYVIEW(dev_priv))
13205                 drm_dbg_kms(&dev_priv->drm,
13206                             "cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
13207                             pipe_config->cgm_mode, pipe_config->gamma_mode,
13208                             pipe_config->gamma_enable, pipe_config->csc_enable);
13209         else
13210                 drm_dbg_kms(&dev_priv->drm,
13211                             "csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
13212                             pipe_config->csc_mode, pipe_config->gamma_mode,
13213                             pipe_config->gamma_enable, pipe_config->csc_enable);
13214
13215         drm_dbg_kms(&dev_priv->drm, "MST master transcoder: %s\n",
13216                     transcoder_name(pipe_config->mst_master_transcoder));
13217
13218 dump_planes:
13219         if (!state)
13220                 return;
13221
13222         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
13223                 if (plane->pipe == crtc->pipe)
13224                         intel_dump_plane_state(plane_state);
13225         }
13226 }
13227
13228 static bool check_digital_port_conflicts(struct intel_atomic_state *state)
13229 {
13230         struct drm_device *dev = state->base.dev;
13231         struct drm_connector *connector;
13232         struct drm_connector_list_iter conn_iter;
13233         unsigned int used_ports = 0;
13234         unsigned int used_mst_ports = 0;
13235         bool ret = true;
13236
13237         /*
13238          * We're going to peek into connector->state,
13239          * hence connection_mutex must be held.
13240          */
13241         drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex);
13242
13243         /*
13244          * Walk the connector list instead of the encoder
13245          * list to detect the problem on ddi platforms
13246          * where there's just one encoder per digital port.
13247          */
13248         drm_connector_list_iter_begin(dev, &conn_iter);
13249         drm_for_each_connector_iter(connector, &conn_iter) {
13250                 struct drm_connector_state *connector_state;
13251                 struct intel_encoder *encoder;
13252
13253                 connector_state =
13254                         drm_atomic_get_new_connector_state(&state->base,
13255                                                            connector);
13256                 if (!connector_state)
13257                         connector_state = connector->state;
13258
13259                 if (!connector_state->best_encoder)
13260                         continue;
13261
13262                 encoder = to_intel_encoder(connector_state->best_encoder);
13263
13264                 WARN_ON(!connector_state->crtc);
13265
13266                 switch (encoder->type) {
13267                         unsigned int port_mask;
13268                 case INTEL_OUTPUT_DDI:
13269                         if (WARN_ON(!HAS_DDI(to_i915(dev))))
13270                                 break;
13271                         /* else, fall through */
13272                 case INTEL_OUTPUT_DP:
13273                 case INTEL_OUTPUT_HDMI:
13274                 case INTEL_OUTPUT_EDP:
13275                         port_mask = 1 << encoder->port;
13276
13277                         /* the same port mustn't appear more than once */
13278                         if (used_ports & port_mask)
13279                                 ret = false;
13280
13281                         used_ports |= port_mask;
13282                         break;
13283                 case INTEL_OUTPUT_DP_MST:
13284                         used_mst_ports |=
13285                                 1 << encoder->port;
13286                         break;
13287                 default:
13288                         break;
13289                 }
13290         }
13291         drm_connector_list_iter_end(&conn_iter);
13292
13293         /* can't mix MST and SST/HDMI on the same port */
13294         if (used_ports & used_mst_ports)
13295                 return false;
13296
13297         return ret;
13298 }
13299
13300 static void
13301 intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_crtc_state *crtc_state)
13302 {
13303         intel_crtc_copy_color_blobs(crtc_state);
13304 }
13305
13306 static void
13307 intel_crtc_copy_uapi_to_hw_state(struct intel_crtc_state *crtc_state)
13308 {
13309         crtc_state->hw.enable = crtc_state->uapi.enable;
13310         crtc_state->hw.active = crtc_state->uapi.active;
13311         crtc_state->hw.mode = crtc_state->uapi.mode;
13312         crtc_state->hw.adjusted_mode = crtc_state->uapi.adjusted_mode;
13313         intel_crtc_copy_uapi_to_hw_state_nomodeset(crtc_state);
13314 }
13315
13316 static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state)
13317 {
13318         crtc_state->uapi.enable = crtc_state->hw.enable;
13319         crtc_state->uapi.active = crtc_state->hw.active;
13320         WARN_ON(drm_atomic_set_mode_for_crtc(&crtc_state->uapi, &crtc_state->hw.mode) < 0);
13321
13322         crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode;
13323
13324         /* copy color blobs to uapi */
13325         drm_property_replace_blob(&crtc_state->uapi.degamma_lut,
13326                                   crtc_state->hw.degamma_lut);
13327         drm_property_replace_blob(&crtc_state->uapi.gamma_lut,
13328                                   crtc_state->hw.gamma_lut);
13329         drm_property_replace_blob(&crtc_state->uapi.ctm,
13330                                   crtc_state->hw.ctm);
13331 }
13332
13333 static int
13334 intel_crtc_prepare_cleared_state(struct intel_crtc_state *crtc_state)
13335 {
13336         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
13337         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13338         struct intel_crtc_state *saved_state;
13339
13340         saved_state = intel_crtc_state_alloc(crtc);
13341         if (!saved_state)
13342                 return -ENOMEM;
13343
13344         /* free the old crtc_state->hw members */
13345         intel_crtc_free_hw_state(crtc_state);
13346
13347         /* FIXME: before the switch to atomic started, a new pipe_config was
13348          * kzalloc'd. Code that depends on any field being zero should be
13349          * fixed, so that the crtc_state can be safely duplicated. For now,
13350          * only fields that are know to not cause problems are preserved. */
13351
13352         saved_state->uapi = crtc_state->uapi;
13353         saved_state->scaler_state = crtc_state->scaler_state;
13354         saved_state->shared_dpll = crtc_state->shared_dpll;
13355         saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
13356         memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls,
13357                sizeof(saved_state->icl_port_dplls));
13358         saved_state->crc_enabled = crtc_state->crc_enabled;
13359         if (IS_G4X(dev_priv) ||
13360             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
13361                 saved_state->wm = crtc_state->wm;
13362         /*
13363          * Save the slave bitmask which gets filled for master crtc state during
13364          * slave atomic check call. For all other CRTCs reset the port sync variables
13365          * crtc_state->master_transcoder needs to be set to INVALID
13366          */
13367         reset_port_sync_mode_state(saved_state);
13368         if (intel_atomic_is_master_connector(crtc_state))
13369                 saved_state->sync_mode_slaves_mask =
13370                         crtc_state->sync_mode_slaves_mask;
13371
13372         memcpy(crtc_state, saved_state, sizeof(*crtc_state));
13373         kfree(saved_state);
13374
13375         intel_crtc_copy_uapi_to_hw_state(crtc_state);
13376
13377         return 0;
13378 }
13379
13380 static int
13381 intel_modeset_pipe_config(struct intel_crtc_state *pipe_config)
13382 {
13383         struct drm_crtc *crtc = pipe_config->uapi.crtc;
13384         struct drm_atomic_state *state = pipe_config->uapi.state;
13385         struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
13386         struct drm_connector *connector;
13387         struct drm_connector_state *connector_state;
13388         int base_bpp, ret;
13389         int i, tile_group_id = -1, num_tiled_conns = 0;
13390         bool retry = true;
13391
13392         pipe_config->cpu_transcoder =
13393                 (enum transcoder) to_intel_crtc(crtc)->pipe;
13394
13395         /*
13396          * Sanitize sync polarity flags based on requested ones. If neither
13397          * positive or negative polarity is requested, treat this as meaning
13398          * negative polarity.
13399          */
13400         if (!(pipe_config->hw.adjusted_mode.flags &
13401               (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
13402                 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
13403
13404         if (!(pipe_config->hw.adjusted_mode.flags &
13405               (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
13406                 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
13407
13408         ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
13409                                         pipe_config);
13410         if (ret)
13411                 return ret;
13412
13413         base_bpp = pipe_config->pipe_bpp;
13414
13415         /*
13416          * Determine the real pipe dimensions. Note that stereo modes can
13417          * increase the actual pipe size due to the frame doubling and
13418          * insertion of additional space for blanks between the frame. This
13419          * is stored in the crtc timings. We use the requested mode to do this
13420          * computation to clearly distinguish it from the adjusted mode, which
13421          * can be changed by the connectors in the below retry loop.
13422          */
13423         drm_mode_get_hv_timing(&pipe_config->hw.mode,
13424                                &pipe_config->pipe_src_w,
13425                                &pipe_config->pipe_src_h);
13426
13427         for_each_new_connector_in_state(state, connector, connector_state, i) {
13428                 struct intel_encoder *encoder =
13429                         to_intel_encoder(connector_state->best_encoder);
13430
13431                 if (connector_state->crtc != crtc)
13432                         continue;
13433
13434                 if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
13435                         drm_dbg_kms(&i915->drm,
13436                                     "rejecting invalid cloning configuration\n");
13437                         return -EINVAL;
13438                 }
13439
13440                 /*
13441                  * Determine output_types before calling the .compute_config()
13442                  * hooks so that the hooks can use this information safely.
13443                  */
13444                 if (encoder->compute_output_type)
13445                         pipe_config->output_types |=
13446                                 BIT(encoder->compute_output_type(encoder, pipe_config,
13447                                                                  connector_state));
13448                 else
13449                         pipe_config->output_types |= BIT(encoder->type);
13450         }
13451
13452 encoder_retry:
13453         /* Ensure the port clock defaults are reset when retrying. */
13454         pipe_config->port_clock = 0;
13455         pipe_config->pixel_multiplier = 1;
13456
13457         /* Fill in default crtc timings, allow encoders to overwrite them. */
13458         drm_mode_set_crtcinfo(&pipe_config->hw.adjusted_mode,
13459                               CRTC_STEREO_DOUBLE);
13460
13461         /* Get tile_group_id of tiled connector */
13462         for_each_new_connector_in_state(state, connector, connector_state, i) {
13463                 if (connector_state->crtc == crtc &&
13464                     connector->has_tile) {
13465                         tile_group_id = connector->tile_group->id;
13466                         break;
13467                 }
13468         }
13469
13470         /* Get total number of tiled connectors in state that belong to
13471          * this tile group.
13472          */
13473         for_each_new_connector_in_state(state, connector, connector_state, i) {
13474                 if (connector->has_tile &&
13475                     connector->tile_group->id == tile_group_id)
13476                         num_tiled_conns++;
13477         }
13478
13479         /* Pass our mode to the connectors and the CRTC to give them a chance to
13480          * adjust it according to limitations or connector properties, and also
13481          * a chance to reject the mode entirely.
13482          */
13483         for_each_new_connector_in_state(state, connector, connector_state, i) {
13484                 struct intel_encoder *encoder =
13485                         to_intel_encoder(connector_state->best_encoder);
13486
13487                 if (connector_state->crtc != crtc)
13488                         continue;
13489
13490                 ret = icl_compute_port_sync_crtc_state(connector, pipe_config,
13491                                                        num_tiled_conns);
13492                 if (ret) {
13493                         drm_dbg_kms(&i915->drm,
13494                                     "Cannot assign Sync Mode CRTCs: %d\n",
13495                                     ret);
13496                         return ret;
13497                 }
13498
13499                 ret = encoder->compute_config(encoder, pipe_config,
13500                                               connector_state);
13501                 if (ret < 0) {
13502                         if (ret != -EDEADLK)
13503                                 drm_dbg_kms(&i915->drm,
13504                                             "Encoder config failure: %d\n",
13505                                             ret);
13506                         return ret;
13507                 }
13508         }
13509
13510         /* Set default port clock if not overwritten by the encoder. Needs to be
13511          * done afterwards in case the encoder adjusts the mode. */
13512         if (!pipe_config->port_clock)
13513                 pipe_config->port_clock = pipe_config->hw.adjusted_mode.crtc_clock
13514                         * pipe_config->pixel_multiplier;
13515
13516         ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
13517         if (ret == -EDEADLK)
13518                 return ret;
13519         if (ret < 0) {
13520                 drm_dbg_kms(&i915->drm, "CRTC fixup failed\n");
13521                 return ret;
13522         }
13523
13524         if (ret == RETRY) {
13525                 if (WARN(!retry, "loop in pipe configuration computation\n"))
13526                         return -EINVAL;
13527
13528                 drm_dbg_kms(&i915->drm, "CRTC bw constrained, retrying\n");
13529                 retry = false;
13530                 goto encoder_retry;
13531         }
13532
13533         /* Dithering seems to not pass-through bits correctly when it should, so
13534          * only enable it on 6bpc panels and when its not a compliance
13535          * test requesting 6bpc video pattern.
13536          */
13537         pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
13538                 !pipe_config->dither_force_disable;
13539         drm_dbg_kms(&i915->drm,
13540                     "hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
13541                     base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
13542
13543         /*
13544          * Make drm_calc_timestamping_constants in
13545          * drm_atomic_helper_update_legacy_modeset_state() happy
13546          */
13547         pipe_config->uapi.adjusted_mode = pipe_config->hw.adjusted_mode;
13548
13549         return 0;
13550 }
13551
13552 bool intel_fuzzy_clock_check(int clock1, int clock2)
13553 {
13554         int diff;
13555
13556         if (clock1 == clock2)
13557                 return true;
13558
13559         if (!clock1 || !clock2)
13560                 return false;
13561
13562         diff = abs(clock1 - clock2);
13563
13564         if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
13565                 return true;
13566
13567         return false;
13568 }
13569
13570 static bool
13571 intel_compare_m_n(unsigned int m, unsigned int n,
13572                   unsigned int m2, unsigned int n2,
13573                   bool exact)
13574 {
13575         if (m == m2 && n == n2)
13576                 return true;
13577
13578         if (exact || !m || !n || !m2 || !n2)
13579                 return false;
13580
13581         BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
13582
13583         if (n > n2) {
13584                 while (n > n2) {
13585                         m2 <<= 1;
13586                         n2 <<= 1;
13587                 }
13588         } else if (n < n2) {
13589                 while (n < n2) {
13590                         m <<= 1;
13591                         n <<= 1;
13592                 }
13593         }
13594
13595         if (n != n2)
13596                 return false;
13597
13598         return intel_fuzzy_clock_check(m, m2);
13599 }
13600
13601 static bool
13602 intel_compare_link_m_n(const struct intel_link_m_n *m_n,
13603                        const struct intel_link_m_n *m2_n2,
13604                        bool exact)
13605 {
13606         return m_n->tu == m2_n2->tu &&
13607                 intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
13608                                   m2_n2->gmch_m, m2_n2->gmch_n, exact) &&
13609                 intel_compare_m_n(m_n->link_m, m_n->link_n,
13610                                   m2_n2->link_m, m2_n2->link_n, exact);
13611 }
13612
13613 static bool
13614 intel_compare_infoframe(const union hdmi_infoframe *a,
13615                         const union hdmi_infoframe *b)
13616 {
13617         return memcmp(a, b, sizeof(*a)) == 0;
13618 }
13619
13620 static void
13621 pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
13622                                bool fastset, const char *name,
13623                                const union hdmi_infoframe *a,
13624                                const union hdmi_infoframe *b)
13625 {
13626         if (fastset) {
13627                 if (!drm_debug_enabled(DRM_UT_KMS))
13628                         return;
13629
13630                 drm_dbg_kms(&dev_priv->drm,
13631                             "fastset mismatch in %s infoframe\n", name);
13632                 drm_dbg_kms(&dev_priv->drm, "expected:\n");
13633                 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
13634                 drm_dbg_kms(&dev_priv->drm, "found:\n");
13635                 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
13636         } else {
13637                 drm_err(&dev_priv->drm, "mismatch in %s infoframe\n", name);
13638                 drm_err(&dev_priv->drm, "expected:\n");
13639                 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
13640                 drm_err(&dev_priv->drm, "found:\n");
13641                 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
13642         }
13643 }
13644
13645 static void __printf(4, 5)
13646 pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
13647                      const char *name, const char *format, ...)
13648 {
13649         struct drm_i915_private *i915 = to_i915(crtc->base.dev);
13650         struct va_format vaf;
13651         va_list args;
13652
13653         va_start(args, format);
13654         vaf.fmt = format;
13655         vaf.va = &args;
13656
13657         if (fastset)
13658                 drm_dbg_kms(&i915->drm,
13659                             "[CRTC:%d:%s] fastset mismatch in %s %pV\n",
13660                             crtc->base.base.id, crtc->base.name, name, &vaf);
13661         else
13662                 drm_err(&i915->drm, "[CRTC:%d:%s] mismatch in %s %pV\n",
13663                         crtc->base.base.id, crtc->base.name, name, &vaf);
13664
13665         va_end(args);
13666 }
13667
13668 static bool fastboot_enabled(struct drm_i915_private *dev_priv)
13669 {
13670         if (i915_modparams.fastboot != -1)
13671                 return i915_modparams.fastboot;
13672
13673         /* Enable fastboot by default on Skylake and newer */
13674         if (INTEL_GEN(dev_priv) >= 9)
13675                 return true;
13676
13677         /* Enable fastboot by default on VLV and CHV */
13678         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
13679                 return true;
13680
13681         /* Disabled by default on all others */
13682         return false;
13683 }
13684
13685 static bool
13686 intel_pipe_config_compare(const struct intel_crtc_state *current_config,
13687                           const struct intel_crtc_state *pipe_config,
13688                           bool fastset)
13689 {
13690         struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev);
13691         struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
13692         bool ret = true;
13693         u32 bp_gamma = 0;
13694         bool fixup_inherited = fastset &&
13695                 (current_config->hw.mode.private_flags & I915_MODE_FLAG_INHERITED) &&
13696                 !(pipe_config->hw.mode.private_flags & I915_MODE_FLAG_INHERITED);
13697
13698         if (fixup_inherited && !fastboot_enabled(dev_priv)) {
13699                 drm_dbg_kms(&dev_priv->drm,
13700                             "initial modeset and fastboot not set\n");
13701                 ret = false;
13702         }
13703
13704 #define PIPE_CONF_CHECK_X(name) do { \
13705         if (current_config->name != pipe_config->name) { \
13706                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13707                                      "(expected 0x%08x, found 0x%08x)", \
13708                                      current_config->name, \
13709                                      pipe_config->name); \
13710                 ret = false; \
13711         } \
13712 } while (0)
13713
13714 #define PIPE_CONF_CHECK_I(name) do { \
13715         if (current_config->name != pipe_config->name) { \
13716                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13717                                      "(expected %i, found %i)", \
13718                                      current_config->name, \
13719                                      pipe_config->name); \
13720                 ret = false; \
13721         } \
13722 } while (0)
13723
13724 #define PIPE_CONF_CHECK_BOOL(name) do { \
13725         if (current_config->name != pipe_config->name) { \
13726                 pipe_config_mismatch(fastset, crtc,  __stringify(name), \
13727                                      "(expected %s, found %s)", \
13728                                      yesno(current_config->name), \
13729                                      yesno(pipe_config->name)); \
13730                 ret = false; \
13731         } \
13732 } while (0)
13733
13734 /*
13735  * Checks state where we only read out the enabling, but not the entire
13736  * state itself (like full infoframes or ELD for audio). These states
13737  * require a full modeset on bootup to fix up.
13738  */
13739 #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
13740         if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
13741                 PIPE_CONF_CHECK_BOOL(name); \
13742         } else { \
13743                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13744                                      "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)", \
13745                                      yesno(current_config->name), \
13746                                      yesno(pipe_config->name)); \
13747                 ret = false; \
13748         } \
13749 } while (0)
13750
13751 #define PIPE_CONF_CHECK_P(name) do { \
13752         if (current_config->name != pipe_config->name) { \
13753                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13754                                      "(expected %p, found %p)", \
13755                                      current_config->name, \
13756                                      pipe_config->name); \
13757                 ret = false; \
13758         } \
13759 } while (0)
13760
13761 #define PIPE_CONF_CHECK_M_N(name) do { \
13762         if (!intel_compare_link_m_n(&current_config->name, \
13763                                     &pipe_config->name,\
13764                                     !fastset)) { \
13765                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13766                                      "(expected tu %i gmch %i/%i link %i/%i, " \
13767                                      "found tu %i, gmch %i/%i link %i/%i)", \
13768                                      current_config->name.tu, \
13769                                      current_config->name.gmch_m, \
13770                                      current_config->name.gmch_n, \
13771                                      current_config->name.link_m, \
13772                                      current_config->name.link_n, \
13773                                      pipe_config->name.tu, \
13774                                      pipe_config->name.gmch_m, \
13775                                      pipe_config->name.gmch_n, \
13776                                      pipe_config->name.link_m, \
13777                                      pipe_config->name.link_n); \
13778                 ret = false; \
13779         } \
13780 } while (0)
13781
13782 /* This is required for BDW+ where there is only one set of registers for
13783  * switching between high and low RR.
13784  * This macro can be used whenever a comparison has to be made between one
13785  * hw state and multiple sw state variables.
13786  */
13787 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
13788         if (!intel_compare_link_m_n(&current_config->name, \
13789                                     &pipe_config->name, !fastset) && \
13790             !intel_compare_link_m_n(&current_config->alt_name, \
13791                                     &pipe_config->name, !fastset)) { \
13792                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13793                                      "(expected tu %i gmch %i/%i link %i/%i, " \
13794                                      "or tu %i gmch %i/%i link %i/%i, " \
13795                                      "found tu %i, gmch %i/%i link %i/%i)", \
13796                                      current_config->name.tu, \
13797                                      current_config->name.gmch_m, \
13798                                      current_config->name.gmch_n, \
13799                                      current_config->name.link_m, \
13800                                      current_config->name.link_n, \
13801                                      current_config->alt_name.tu, \
13802                                      current_config->alt_name.gmch_m, \
13803                                      current_config->alt_name.gmch_n, \
13804                                      current_config->alt_name.link_m, \
13805                                      current_config->alt_name.link_n, \
13806                                      pipe_config->name.tu, \
13807                                      pipe_config->name.gmch_m, \
13808                                      pipe_config->name.gmch_n, \
13809                                      pipe_config->name.link_m, \
13810                                      pipe_config->name.link_n); \
13811                 ret = false; \
13812         } \
13813 } while (0)
13814
13815 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
13816         if ((current_config->name ^ pipe_config->name) & (mask)) { \
13817                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13818                                      "(%x) (expected %i, found %i)", \
13819                                      (mask), \
13820                                      current_config->name & (mask), \
13821                                      pipe_config->name & (mask)); \
13822                 ret = false; \
13823         } \
13824 } while (0)
13825
13826 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
13827         if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
13828                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13829                                      "(expected %i, found %i)", \
13830                                      current_config->name, \
13831                                      pipe_config->name); \
13832                 ret = false; \
13833         } \
13834 } while (0)
13835
13836 #define PIPE_CONF_CHECK_INFOFRAME(name) do { \
13837         if (!intel_compare_infoframe(&current_config->infoframes.name, \
13838                                      &pipe_config->infoframes.name)) { \
13839                 pipe_config_infoframe_mismatch(dev_priv, fastset, __stringify(name), \
13840                                                &current_config->infoframes.name, \
13841                                                &pipe_config->infoframes.name); \
13842                 ret = false; \
13843         } \
13844 } while (0)
13845
13846 #define PIPE_CONF_CHECK_COLOR_LUT(name1, name2, bit_precision) do { \
13847         if (current_config->name1 != pipe_config->name1) { \
13848                 pipe_config_mismatch(fastset, crtc, __stringify(name1), \
13849                                 "(expected %i, found %i, won't compare lut values)", \
13850                                 current_config->name1, \
13851                                 pipe_config->name1); \
13852                 ret = false;\
13853         } else { \
13854                 if (!intel_color_lut_equal(current_config->name2, \
13855                                         pipe_config->name2, pipe_config->name1, \
13856                                         bit_precision)) { \
13857                         pipe_config_mismatch(fastset, crtc, __stringify(name2), \
13858                                         "hw_state doesn't match sw_state"); \
13859                         ret = false; \
13860                 } \
13861         } \
13862 } while (0)
13863
13864 #define PIPE_CONF_QUIRK(quirk) \
13865         ((current_config->quirks | pipe_config->quirks) & (quirk))
13866
13867         PIPE_CONF_CHECK_I(cpu_transcoder);
13868
13869         PIPE_CONF_CHECK_BOOL(has_pch_encoder);
13870         PIPE_CONF_CHECK_I(fdi_lanes);
13871         PIPE_CONF_CHECK_M_N(fdi_m_n);
13872
13873         PIPE_CONF_CHECK_I(lane_count);
13874         PIPE_CONF_CHECK_X(lane_lat_optim_mask);
13875
13876         if (INTEL_GEN(dev_priv) < 8) {
13877                 PIPE_CONF_CHECK_M_N(dp_m_n);
13878
13879                 if (current_config->has_drrs)
13880                         PIPE_CONF_CHECK_M_N(dp_m2_n2);
13881         } else
13882                 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
13883
13884         PIPE_CONF_CHECK_X(output_types);
13885
13886         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hdisplay);
13887         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_htotal);
13888         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_start);
13889         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_end);
13890         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_start);
13891         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_end);
13892
13893         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vdisplay);
13894         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vtotal);
13895         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_start);
13896         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_end);
13897         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_start);
13898         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_end);
13899
13900         PIPE_CONF_CHECK_I(pixel_multiplier);
13901         PIPE_CONF_CHECK_I(output_format);
13902         PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
13903         if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
13904             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
13905                 PIPE_CONF_CHECK_BOOL(limited_color_range);
13906
13907         PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
13908         PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
13909         PIPE_CONF_CHECK_BOOL(has_infoframe);
13910         PIPE_CONF_CHECK_BOOL(fec_enable);
13911
13912         PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
13913
13914         PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
13915                               DRM_MODE_FLAG_INTERLACE);
13916
13917         if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
13918                 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
13919                                       DRM_MODE_FLAG_PHSYNC);
13920                 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
13921                                       DRM_MODE_FLAG_NHSYNC);
13922                 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
13923                                       DRM_MODE_FLAG_PVSYNC);
13924                 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
13925                                       DRM_MODE_FLAG_NVSYNC);
13926         }
13927
13928         PIPE_CONF_CHECK_X(gmch_pfit.control);
13929         /* pfit ratios are autocomputed by the hw on gen4+ */
13930         if (INTEL_GEN(dev_priv) < 4)
13931                 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
13932         PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
13933
13934         /*
13935          * Changing the EDP transcoder input mux
13936          * (A_ONOFF vs. A_ON) requires a full modeset.
13937          */
13938         PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru);
13939
13940         if (!fastset) {
13941                 PIPE_CONF_CHECK_I(pipe_src_w);
13942                 PIPE_CONF_CHECK_I(pipe_src_h);
13943
13944                 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
13945                 if (current_config->pch_pfit.enabled) {
13946                         PIPE_CONF_CHECK_X(pch_pfit.pos);
13947                         PIPE_CONF_CHECK_X(pch_pfit.size);
13948                 }
13949
13950                 PIPE_CONF_CHECK_I(scaler_state.scaler_id);
13951                 PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
13952
13953                 PIPE_CONF_CHECK_X(gamma_mode);
13954                 if (IS_CHERRYVIEW(dev_priv))
13955                         PIPE_CONF_CHECK_X(cgm_mode);
13956                 else
13957                         PIPE_CONF_CHECK_X(csc_mode);
13958                 PIPE_CONF_CHECK_BOOL(gamma_enable);
13959                 PIPE_CONF_CHECK_BOOL(csc_enable);
13960
13961                 PIPE_CONF_CHECK_I(linetime);
13962                 PIPE_CONF_CHECK_I(ips_linetime);
13963
13964                 bp_gamma = intel_color_get_gamma_bit_precision(pipe_config);
13965                 if (bp_gamma)
13966                         PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, hw.gamma_lut, bp_gamma);
13967         }
13968
13969         PIPE_CONF_CHECK_BOOL(double_wide);
13970
13971         PIPE_CONF_CHECK_P(shared_dpll);
13972         PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
13973         PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
13974         PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
13975         PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
13976         PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
13977         PIPE_CONF_CHECK_X(dpll_hw_state.spll);
13978         PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
13979         PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
13980         PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
13981         PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
13982         PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
13983         PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
13984         PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
13985         PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
13986         PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
13987         PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
13988         PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
13989         PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
13990         PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
13991         PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
13992         PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
13993         PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
13994         PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
13995         PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
13996         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
13997         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
13998         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
13999         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
14000         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
14001         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
14002         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
14003
14004         PIPE_CONF_CHECK_X(dsi_pll.ctrl);
14005         PIPE_CONF_CHECK_X(dsi_pll.div);
14006
14007         if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5)
14008                 PIPE_CONF_CHECK_I(pipe_bpp);
14009
14010         PIPE_CONF_CHECK_CLOCK_FUZZY(hw.adjusted_mode.crtc_clock);
14011         PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
14012
14013         PIPE_CONF_CHECK_I(min_voltage_level);
14014
14015         PIPE_CONF_CHECK_X(infoframes.enable);
14016         PIPE_CONF_CHECK_X(infoframes.gcp);
14017         PIPE_CONF_CHECK_INFOFRAME(avi);
14018         PIPE_CONF_CHECK_INFOFRAME(spd);
14019         PIPE_CONF_CHECK_INFOFRAME(hdmi);
14020         PIPE_CONF_CHECK_INFOFRAME(drm);
14021
14022         PIPE_CONF_CHECK_X(sync_mode_slaves_mask);
14023         PIPE_CONF_CHECK_I(master_transcoder);
14024
14025         PIPE_CONF_CHECK_I(dsc.compression_enable);
14026         PIPE_CONF_CHECK_I(dsc.dsc_split);
14027         PIPE_CONF_CHECK_I(dsc.compressed_bpp);
14028
14029         PIPE_CONF_CHECK_I(mst_master_transcoder);
14030
14031 #undef PIPE_CONF_CHECK_X
14032 #undef PIPE_CONF_CHECK_I
14033 #undef PIPE_CONF_CHECK_BOOL
14034 #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
14035 #undef PIPE_CONF_CHECK_P
14036 #undef PIPE_CONF_CHECK_FLAGS
14037 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
14038 #undef PIPE_CONF_CHECK_COLOR_LUT
14039 #undef PIPE_CONF_QUIRK
14040
14041         return ret;
14042 }
14043
14044 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
14045                                            const struct intel_crtc_state *pipe_config)
14046 {
14047         if (pipe_config->has_pch_encoder) {
14048                 int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
14049                                                             &pipe_config->fdi_m_n);
14050                 int dotclock = pipe_config->hw.adjusted_mode.crtc_clock;
14051
14052                 /*
14053                  * FDI already provided one idea for the dotclock.
14054                  * Yell if the encoder disagrees.
14055                  */
14056                 WARN(!intel_fuzzy_clock_check(fdi_dotclock, dotclock),
14057                      "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
14058                      fdi_dotclock, dotclock);
14059         }
14060 }
14061
14062 static void verify_wm_state(struct intel_crtc *crtc,
14063                             struct intel_crtc_state *new_crtc_state)
14064 {
14065         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14066         struct skl_hw_state {
14067                 struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
14068                 struct skl_ddb_entry ddb_uv[I915_MAX_PLANES];
14069                 struct skl_pipe_wm wm;
14070         } *hw;
14071         struct skl_pipe_wm *sw_wm;
14072         struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
14073         u8 hw_enabled_slices;
14074         const enum pipe pipe = crtc->pipe;
14075         int plane, level, max_level = ilk_wm_max_level(dev_priv);
14076
14077         if (INTEL_GEN(dev_priv) < 9 || !new_crtc_state->hw.active)
14078                 return;
14079
14080         hw = kzalloc(sizeof(*hw), GFP_KERNEL);
14081         if (!hw)
14082                 return;
14083
14084         skl_pipe_wm_get_hw_state(crtc, &hw->wm);
14085         sw_wm = &new_crtc_state->wm.skl.optimal;
14086
14087         skl_pipe_ddb_get_hw_state(crtc, hw->ddb_y, hw->ddb_uv);
14088
14089         hw_enabled_slices = intel_enabled_dbuf_slices_mask(dev_priv);
14090
14091         if (INTEL_GEN(dev_priv) >= 11 &&
14092             hw_enabled_slices != dev_priv->enabled_dbuf_slices_mask)
14093                 drm_err(&dev_priv->drm,
14094                         "mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n",
14095                         dev_priv->enabled_dbuf_slices_mask,
14096                         hw_enabled_slices);
14097
14098         /* planes */
14099         for_each_universal_plane(dev_priv, pipe, plane) {
14100                 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
14101
14102                 hw_plane_wm = &hw->wm.planes[plane];
14103                 sw_plane_wm = &sw_wm->planes[plane];
14104
14105                 /* Watermarks */
14106                 for (level = 0; level <= max_level; level++) {
14107                         if (skl_wm_level_equals(&hw_plane_wm->wm[level],
14108                                                 &sw_plane_wm->wm[level]))
14109                                 continue;
14110
14111                         drm_err(&dev_priv->drm,
14112                                 "mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
14113                                 pipe_name(pipe), plane + 1, level,
14114                                 sw_plane_wm->wm[level].plane_en,
14115                                 sw_plane_wm->wm[level].plane_res_b,
14116                                 sw_plane_wm->wm[level].plane_res_l,
14117                                 hw_plane_wm->wm[level].plane_en,
14118                                 hw_plane_wm->wm[level].plane_res_b,
14119                                 hw_plane_wm->wm[level].plane_res_l);
14120                 }
14121
14122                 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
14123                                          &sw_plane_wm->trans_wm)) {
14124                         drm_err(&dev_priv->drm,
14125                                 "mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
14126                                 pipe_name(pipe), plane + 1,
14127                                 sw_plane_wm->trans_wm.plane_en,
14128                                 sw_plane_wm->trans_wm.plane_res_b,
14129                                 sw_plane_wm->trans_wm.plane_res_l,
14130                                 hw_plane_wm->trans_wm.plane_en,
14131                                 hw_plane_wm->trans_wm.plane_res_b,
14132                                 hw_plane_wm->trans_wm.plane_res_l);
14133                 }
14134
14135                 /* DDB */
14136                 hw_ddb_entry = &hw->ddb_y[plane];
14137                 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[plane];
14138
14139                 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
14140                         drm_err(&dev_priv->drm,
14141                                 "mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
14142                                 pipe_name(pipe), plane + 1,
14143                                 sw_ddb_entry->start, sw_ddb_entry->end,
14144                                 hw_ddb_entry->start, hw_ddb_entry->end);
14145                 }
14146         }
14147
14148         /*
14149          * cursor
14150          * If the cursor plane isn't active, we may not have updated it's ddb
14151          * allocation. In that case since the ddb allocation will be updated
14152          * once the plane becomes visible, we can skip this check
14153          */
14154         if (1) {
14155                 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
14156
14157                 hw_plane_wm = &hw->wm.planes[PLANE_CURSOR];
14158                 sw_plane_wm = &sw_wm->planes[PLANE_CURSOR];
14159
14160                 /* Watermarks */
14161                 for (level = 0; level <= max_level; level++) {
14162                         if (skl_wm_level_equals(&hw_plane_wm->wm[level],
14163                                                 &sw_plane_wm->wm[level]))
14164                                 continue;
14165
14166                         drm_err(&dev_priv->drm,
14167                                 "mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
14168                                 pipe_name(pipe), level,
14169                                 sw_plane_wm->wm[level].plane_en,
14170                                 sw_plane_wm->wm[level].plane_res_b,
14171                                 sw_plane_wm->wm[level].plane_res_l,
14172                                 hw_plane_wm->wm[level].plane_en,
14173                                 hw_plane_wm->wm[level].plane_res_b,
14174                                 hw_plane_wm->wm[level].plane_res_l);
14175                 }
14176
14177                 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
14178                                          &sw_plane_wm->trans_wm)) {
14179                         drm_err(&dev_priv->drm,
14180                                 "mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
14181                                 pipe_name(pipe),
14182                                 sw_plane_wm->trans_wm.plane_en,
14183                                 sw_plane_wm->trans_wm.plane_res_b,
14184                                 sw_plane_wm->trans_wm.plane_res_l,
14185                                 hw_plane_wm->trans_wm.plane_en,
14186                                 hw_plane_wm->trans_wm.plane_res_b,
14187                                 hw_plane_wm->trans_wm.plane_res_l);
14188                 }
14189
14190                 /* DDB */
14191                 hw_ddb_entry = &hw->ddb_y[PLANE_CURSOR];
14192                 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
14193
14194                 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
14195                         drm_err(&dev_priv->drm,
14196                                 "mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
14197                                 pipe_name(pipe),
14198                                 sw_ddb_entry->start, sw_ddb_entry->end,
14199                                 hw_ddb_entry->start, hw_ddb_entry->end);
14200                 }
14201         }
14202
14203         kfree(hw);
14204 }
14205
14206 static void
14207 verify_connector_state(struct intel_atomic_state *state,
14208                        struct intel_crtc *crtc)
14209 {
14210         struct drm_connector *connector;
14211         struct drm_connector_state *new_conn_state;
14212         int i;
14213
14214         for_each_new_connector_in_state(&state->base, connector, new_conn_state, i) {
14215                 struct drm_encoder *encoder = connector->encoder;
14216                 struct intel_crtc_state *crtc_state = NULL;
14217
14218                 if (new_conn_state->crtc != &crtc->base)
14219                         continue;
14220
14221                 if (crtc)
14222                         crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
14223
14224                 intel_connector_verify_state(crtc_state, new_conn_state);
14225
14226                 I915_STATE_WARN(new_conn_state->best_encoder != encoder,
14227                      "connector's atomic encoder doesn't match legacy encoder\n");
14228         }
14229 }
14230
14231 static void
14232 verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_state *state)
14233 {
14234         struct intel_encoder *encoder;
14235         struct drm_connector *connector;
14236         struct drm_connector_state *old_conn_state, *new_conn_state;
14237         int i;
14238
14239         for_each_intel_encoder(&dev_priv->drm, encoder) {
14240                 bool enabled = false, found = false;
14241                 enum pipe pipe;
14242
14243                 drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s]\n",
14244                             encoder->base.base.id,
14245                             encoder->base.name);
14246
14247                 for_each_oldnew_connector_in_state(&state->base, connector, old_conn_state,
14248                                                    new_conn_state, i) {
14249                         if (old_conn_state->best_encoder == &encoder->base)
14250                                 found = true;
14251
14252                         if (new_conn_state->best_encoder != &encoder->base)
14253                                 continue;
14254                         found = enabled = true;
14255
14256                         I915_STATE_WARN(new_conn_state->crtc !=
14257                                         encoder->base.crtc,
14258                              "connector's crtc doesn't match encoder crtc\n");
14259                 }
14260
14261                 if (!found)
14262                         continue;
14263
14264                 I915_STATE_WARN(!!encoder->base.crtc != enabled,
14265                      "encoder's enabled state mismatch "
14266                      "(expected %i, found %i)\n",
14267                      !!encoder->base.crtc, enabled);
14268
14269                 if (!encoder->base.crtc) {
14270                         bool active;
14271
14272                         active = encoder->get_hw_state(encoder, &pipe);
14273                         I915_STATE_WARN(active,
14274                              "encoder detached but still enabled on pipe %c.\n",
14275                              pipe_name(pipe));
14276                 }
14277         }
14278 }
14279
14280 static void
14281 verify_crtc_state(struct intel_crtc *crtc,
14282                   struct intel_crtc_state *old_crtc_state,
14283                   struct intel_crtc_state *new_crtc_state)
14284 {
14285         struct drm_device *dev = crtc->base.dev;
14286         struct drm_i915_private *dev_priv = to_i915(dev);
14287         struct intel_encoder *encoder;
14288         struct intel_crtc_state *pipe_config = old_crtc_state;
14289         struct drm_atomic_state *state = old_crtc_state->uapi.state;
14290         bool active;
14291
14292         __drm_atomic_helper_crtc_destroy_state(&old_crtc_state->uapi);
14293         intel_crtc_free_hw_state(old_crtc_state);
14294         intel_crtc_state_reset(old_crtc_state, crtc);
14295         old_crtc_state->uapi.state = state;
14296
14297         drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s]\n", crtc->base.base.id,
14298                     crtc->base.name);
14299
14300         active = dev_priv->display.get_pipe_config(crtc, pipe_config);
14301
14302         /* we keep both pipes enabled on 830 */
14303         if (IS_I830(dev_priv))
14304                 active = new_crtc_state->hw.active;
14305
14306         I915_STATE_WARN(new_crtc_state->hw.active != active,
14307                         "crtc active state doesn't match with hw state "
14308                         "(expected %i, found %i)\n",
14309                         new_crtc_state->hw.active, active);
14310
14311         I915_STATE_WARN(crtc->active != new_crtc_state->hw.active,
14312                         "transitional active state does not match atomic hw state "
14313                         "(expected %i, found %i)\n",
14314                         new_crtc_state->hw.active, crtc->active);
14315
14316         for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
14317                 enum pipe pipe;
14318
14319                 active = encoder->get_hw_state(encoder, &pipe);
14320                 I915_STATE_WARN(active != new_crtc_state->hw.active,
14321                                 "[ENCODER:%i] active %i with crtc active %i\n",
14322                                 encoder->base.base.id, active,
14323                                 new_crtc_state->hw.active);
14324
14325                 I915_STATE_WARN(active && crtc->pipe != pipe,
14326                                 "Encoder connected to wrong pipe %c\n",
14327                                 pipe_name(pipe));
14328
14329                 if (active)
14330                         encoder->get_config(encoder, pipe_config);
14331         }
14332
14333         intel_crtc_compute_pixel_rate(pipe_config);
14334
14335         if (!new_crtc_state->hw.active)
14336                 return;
14337
14338         intel_pipe_config_sanity_check(dev_priv, pipe_config);
14339
14340         if (!intel_pipe_config_compare(new_crtc_state,
14341                                        pipe_config, false)) {
14342                 I915_STATE_WARN(1, "pipe state doesn't match!\n");
14343                 intel_dump_pipe_config(pipe_config, NULL, "[hw state]");
14344                 intel_dump_pipe_config(new_crtc_state, NULL, "[sw state]");
14345         }
14346 }
14347
14348 static void
14349 intel_verify_planes(struct intel_atomic_state *state)
14350 {
14351         struct intel_plane *plane;
14352         const struct intel_plane_state *plane_state;
14353         int i;
14354
14355         for_each_new_intel_plane_in_state(state, plane,
14356                                           plane_state, i)
14357                 assert_plane(plane, plane_state->planar_slave ||
14358                              plane_state->uapi.visible);
14359 }
14360
14361 static void
14362 verify_single_dpll_state(struct drm_i915_private *dev_priv,
14363                          struct intel_shared_dpll *pll,
14364                          struct intel_crtc *crtc,
14365                          struct intel_crtc_state *new_crtc_state)
14366 {
14367         struct intel_dpll_hw_state dpll_hw_state;
14368         unsigned int crtc_mask;
14369         bool active;
14370
14371         memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
14372
14373         drm_dbg_kms(&dev_priv->drm, "%s\n", pll->info->name);
14374
14375         active = pll->info->funcs->get_hw_state(dev_priv, pll, &dpll_hw_state);
14376
14377         if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
14378                 I915_STATE_WARN(!pll->on && pll->active_mask,
14379                      "pll in active use but not on in sw tracking\n");
14380                 I915_STATE_WARN(pll->on && !pll->active_mask,
14381                      "pll is on but not used by any active crtc\n");
14382                 I915_STATE_WARN(pll->on != active,
14383                      "pll on state mismatch (expected %i, found %i)\n",
14384                      pll->on, active);
14385         }
14386
14387         if (!crtc) {
14388                 I915_STATE_WARN(pll->active_mask & ~pll->state.crtc_mask,
14389                                 "more active pll users than references: %x vs %x\n",
14390                                 pll->active_mask, pll->state.crtc_mask);
14391
14392                 return;
14393         }
14394
14395         crtc_mask = drm_crtc_mask(&crtc->base);
14396
14397         if (new_crtc_state->hw.active)
14398                 I915_STATE_WARN(!(pll->active_mask & crtc_mask),
14399                                 "pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
14400                                 pipe_name(drm_crtc_index(&crtc->base)), pll->active_mask);
14401         else
14402                 I915_STATE_WARN(pll->active_mask & crtc_mask,
14403                                 "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
14404                                 pipe_name(drm_crtc_index(&crtc->base)), pll->active_mask);
14405
14406         I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask),
14407                         "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
14408                         crtc_mask, pll->state.crtc_mask);
14409
14410         I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
14411                                           &dpll_hw_state,
14412                                           sizeof(dpll_hw_state)),
14413                         "pll hw state mismatch\n");
14414 }
14415
14416 static void
14417 verify_shared_dpll_state(struct intel_crtc *crtc,
14418                          struct intel_crtc_state *old_crtc_state,
14419                          struct intel_crtc_state *new_crtc_state)
14420 {
14421         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14422
14423         if (new_crtc_state->shared_dpll)
14424                 verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll, crtc, new_crtc_state);
14425
14426         if (old_crtc_state->shared_dpll &&
14427             old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
14428                 unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
14429                 struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
14430
14431                 I915_STATE_WARN(pll->active_mask & crtc_mask,
14432                                 "pll active mismatch (didn't expect pipe %c in active mask)\n",
14433                                 pipe_name(drm_crtc_index(&crtc->base)));
14434                 I915_STATE_WARN(pll->state.crtc_mask & crtc_mask,
14435                                 "pll enabled crtcs mismatch (found %x in enabled mask)\n",
14436                                 pipe_name(drm_crtc_index(&crtc->base)));
14437         }
14438 }
14439
14440 static void
14441 intel_modeset_verify_crtc(struct intel_crtc *crtc,
14442                           struct intel_atomic_state *state,
14443                           struct intel_crtc_state *old_crtc_state,
14444                           struct intel_crtc_state *new_crtc_state)
14445 {
14446         if (!needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe)
14447                 return;
14448
14449         verify_wm_state(crtc, new_crtc_state);
14450         verify_connector_state(state, crtc);
14451         verify_crtc_state(crtc, old_crtc_state, new_crtc_state);
14452         verify_shared_dpll_state(crtc, old_crtc_state, new_crtc_state);
14453 }
14454
14455 static void
14456 verify_disabled_dpll_state(struct drm_i915_private *dev_priv)
14457 {
14458         int i;
14459
14460         for (i = 0; i < dev_priv->num_shared_dpll; i++)
14461                 verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL);
14462 }
14463
14464 static void
14465 intel_modeset_verify_disabled(struct drm_i915_private *dev_priv,
14466                               struct intel_atomic_state *state)
14467 {
14468         verify_encoder_state(dev_priv, state);
14469         verify_connector_state(state, NULL);
14470         verify_disabled_dpll_state(dev_priv);
14471 }
14472
14473 static void
14474 intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
14475 {
14476         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
14477         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14478         const struct drm_display_mode *adjusted_mode =
14479                 &crtc_state->hw.adjusted_mode;
14480
14481         drm_calc_timestamping_constants(&crtc->base, adjusted_mode);
14482
14483         /*
14484          * The scanline counter increments at the leading edge of hsync.
14485          *
14486          * On most platforms it starts counting from vtotal-1 on the
14487          * first active line. That means the scanline counter value is
14488          * always one less than what we would expect. Ie. just after
14489          * start of vblank, which also occurs at start of hsync (on the
14490          * last active line), the scanline counter will read vblank_start-1.
14491          *
14492          * On gen2 the scanline counter starts counting from 1 instead
14493          * of vtotal-1, so we have to subtract one (or rather add vtotal-1
14494          * to keep the value positive), instead of adding one.
14495          *
14496          * On HSW+ the behaviour of the scanline counter depends on the output
14497          * type. For DP ports it behaves like most other platforms, but on HDMI
14498          * there's an extra 1 line difference. So we need to add two instead of
14499          * one to the value.
14500          *
14501          * On VLV/CHV DSI the scanline counter would appear to increment
14502          * approx. 1/3 of a scanline before start of vblank. Unfortunately
14503          * that means we can't tell whether we're in vblank or not while
14504          * we're on that particular line. We must still set scanline_offset
14505          * to 1 so that the vblank timestamps come out correct when we query
14506          * the scanline counter from within the vblank interrupt handler.
14507          * However if queried just before the start of vblank we'll get an
14508          * answer that's slightly in the future.
14509          */
14510         if (IS_GEN(dev_priv, 2)) {
14511                 int vtotal;
14512
14513                 vtotal = adjusted_mode->crtc_vtotal;
14514                 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
14515                         vtotal /= 2;
14516
14517                 crtc->scanline_offset = vtotal - 1;
14518         } else if (HAS_DDI(dev_priv) &&
14519                    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
14520                 crtc->scanline_offset = 2;
14521         } else {
14522                 crtc->scanline_offset = 1;
14523         }
14524 }
14525
14526 static void intel_modeset_clear_plls(struct intel_atomic_state *state)
14527 {
14528         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14529         struct intel_crtc_state *new_crtc_state;
14530         struct intel_crtc *crtc;
14531         int i;
14532
14533         if (!dev_priv->display.crtc_compute_clock)
14534                 return;
14535
14536         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
14537                 if (!needs_modeset(new_crtc_state))
14538                         continue;
14539
14540                 intel_release_shared_dplls(state, crtc);
14541         }
14542 }
14543
14544 /*
14545  * This implements the workaround described in the "notes" section of the mode
14546  * set sequence documentation. When going from no pipes or single pipe to
14547  * multiple pipes, and planes are enabled after the pipe, we need to wait at
14548  * least 2 vblanks on the first pipe before enabling planes on the second pipe.
14549  */
14550 static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state)
14551 {
14552         struct intel_crtc_state *crtc_state;
14553         struct intel_crtc *crtc;
14554         struct intel_crtc_state *first_crtc_state = NULL;
14555         struct intel_crtc_state *other_crtc_state = NULL;
14556         enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
14557         int i;
14558
14559         /* look at all crtc's that are going to be enabled in during modeset */
14560         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
14561                 if (!crtc_state->hw.active ||
14562                     !needs_modeset(crtc_state))
14563                         continue;
14564
14565                 if (first_crtc_state) {
14566                         other_crtc_state = crtc_state;
14567                         break;
14568                 } else {
14569                         first_crtc_state = crtc_state;
14570                         first_pipe = crtc->pipe;
14571                 }
14572         }
14573
14574         /* No workaround needed? */
14575         if (!first_crtc_state)
14576                 return 0;
14577
14578         /* w/a possibly needed, check how many crtc's are already enabled. */
14579         for_each_intel_crtc(state->base.dev, crtc) {
14580                 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
14581                 if (IS_ERR(crtc_state))
14582                         return PTR_ERR(crtc_state);
14583
14584                 crtc_state->hsw_workaround_pipe = INVALID_PIPE;
14585
14586                 if (!crtc_state->hw.active ||
14587                     needs_modeset(crtc_state))
14588                         continue;
14589
14590                 /* 2 or more enabled crtcs means no need for w/a */
14591                 if (enabled_pipe != INVALID_PIPE)
14592                         return 0;
14593
14594                 enabled_pipe = crtc->pipe;
14595         }
14596
14597         if (enabled_pipe != INVALID_PIPE)
14598                 first_crtc_state->hsw_workaround_pipe = enabled_pipe;
14599         else if (other_crtc_state)
14600                 other_crtc_state->hsw_workaround_pipe = first_pipe;
14601
14602         return 0;
14603 }
14604
14605 u8 intel_calc_active_pipes(struct intel_atomic_state *state,
14606                            u8 active_pipes)
14607 {
14608         const struct intel_crtc_state *crtc_state;
14609         struct intel_crtc *crtc;
14610         int i;
14611
14612         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
14613                 if (crtc_state->hw.active)
14614                         active_pipes |= BIT(crtc->pipe);
14615                 else
14616                         active_pipes &= ~BIT(crtc->pipe);
14617         }
14618
14619         return active_pipes;
14620 }
14621
14622 static int intel_modeset_checks(struct intel_atomic_state *state)
14623 {
14624         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14625         int ret;
14626
14627         state->modeset = true;
14628         state->active_pipes = intel_calc_active_pipes(state, dev_priv->active_pipes);
14629
14630         state->active_pipe_changes = state->active_pipes ^ dev_priv->active_pipes;
14631
14632         if (state->active_pipe_changes) {
14633                 ret = _intel_atomic_lock_global_state(state);
14634                 if (ret)
14635                         return ret;
14636         }
14637
14638         ret = intel_modeset_calc_cdclk(state);
14639         if (ret)
14640                 return ret;
14641
14642         intel_modeset_clear_plls(state);
14643
14644         if (IS_HASWELL(dev_priv))
14645                 return hsw_mode_set_planes_workaround(state);
14646
14647         return 0;
14648 }
14649
14650 /*
14651  * Handle calculation of various watermark data at the end of the atomic check
14652  * phase.  The code here should be run after the per-crtc and per-plane 'check'
14653  * handlers to ensure that all derived state has been updated.
14654  */
14655 static int calc_watermark_data(struct intel_atomic_state *state)
14656 {
14657         struct drm_device *dev = state->base.dev;
14658         struct drm_i915_private *dev_priv = to_i915(dev);
14659
14660         /* Is there platform-specific watermark information to calculate? */
14661         if (dev_priv->display.compute_global_watermarks)
14662                 return dev_priv->display.compute_global_watermarks(state);
14663
14664         return 0;
14665 }
14666
14667 static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state,
14668                                      struct intel_crtc_state *new_crtc_state)
14669 {
14670         if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true))
14671                 return;
14672
14673         new_crtc_state->uapi.mode_changed = false;
14674         new_crtc_state->update_pipe = true;
14675 }
14676
14677 static void intel_crtc_copy_fastset(const struct intel_crtc_state *old_crtc_state,
14678                                     struct intel_crtc_state *new_crtc_state)
14679 {
14680         /*
14681          * If we're not doing the full modeset we want to
14682          * keep the current M/N values as they may be
14683          * sufficiently different to the computed values
14684          * to cause problems.
14685          *
14686          * FIXME: should really copy more fuzzy state here
14687          */
14688         new_crtc_state->fdi_m_n = old_crtc_state->fdi_m_n;
14689         new_crtc_state->dp_m_n = old_crtc_state->dp_m_n;
14690         new_crtc_state->dp_m2_n2 = old_crtc_state->dp_m2_n2;
14691         new_crtc_state->has_drrs = old_crtc_state->has_drrs;
14692 }
14693
14694 static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state,
14695                                           struct intel_crtc *crtc,
14696                                           u8 plane_ids_mask)
14697 {
14698         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14699         struct intel_plane *plane;
14700
14701         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
14702                 struct intel_plane_state *plane_state;
14703
14704                 if ((plane_ids_mask & BIT(plane->id)) == 0)
14705                         continue;
14706
14707                 plane_state = intel_atomic_get_plane_state(state, plane);
14708                 if (IS_ERR(plane_state))
14709                         return PTR_ERR(plane_state);
14710         }
14711
14712         return 0;
14713 }
14714
14715 static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv)
14716 {
14717         /* See {hsw,vlv,ivb}_plane_ratio() */
14718         return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) ||
14719                 IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
14720                 IS_IVYBRIDGE(dev_priv);
14721 }
14722
14723 static int intel_atomic_check_planes(struct intel_atomic_state *state,
14724                                      bool *need_cdclk_calc)
14725 {
14726         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14727         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
14728         struct intel_plane_state *plane_state;
14729         struct intel_plane *plane;
14730         struct intel_crtc *crtc;
14731         int i, ret;
14732
14733         ret = icl_add_linked_planes(state);
14734         if (ret)
14735                 return ret;
14736
14737         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
14738                 ret = intel_plane_atomic_check(state, plane);
14739                 if (ret) {
14740                         drm_dbg_atomic(&dev_priv->drm,
14741                                        "[PLANE:%d:%s] atomic driver check failed\n",
14742                                        plane->base.base.id, plane->base.name);
14743                         return ret;
14744                 }
14745         }
14746
14747         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14748                                             new_crtc_state, i) {
14749                 u8 old_active_planes, new_active_planes;
14750
14751                 ret = icl_check_nv12_planes(new_crtc_state);
14752                 if (ret)
14753                         return ret;
14754
14755                 /*
14756                  * On some platforms the number of active planes affects
14757                  * the planes' minimum cdclk calculation. Add such planes
14758                  * to the state before we compute the minimum cdclk.
14759                  */
14760                 if (!active_planes_affects_min_cdclk(dev_priv))
14761                         continue;
14762
14763                 old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
14764                 new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
14765
14766                 if (hweight8(old_active_planes) == hweight8(new_active_planes))
14767                         continue;
14768
14769                 ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes);
14770                 if (ret)
14771                         return ret;
14772         }
14773
14774         /*
14775          * active_planes bitmask has been updated, and potentially
14776          * affected planes are part of the state. We can now
14777          * compute the minimum cdclk for each plane.
14778          */
14779         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
14780                 ret = intel_plane_calc_min_cdclk(state, plane, need_cdclk_calc);
14781                 if (ret)
14782                         return ret;
14783         }
14784
14785         return 0;
14786 }
14787
14788 static int intel_atomic_check_crtcs(struct intel_atomic_state *state)
14789 {
14790         struct intel_crtc_state *crtc_state;
14791         struct intel_crtc *crtc;
14792         int i;
14793
14794         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
14795                 int ret = intel_crtc_atomic_check(state, crtc);
14796                 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
14797                 if (ret) {
14798                         drm_dbg_atomic(&i915->drm,
14799                                        "[CRTC:%d:%s] atomic driver check failed\n",
14800                                        crtc->base.base.id, crtc->base.name);
14801                         return ret;
14802                 }
14803         }
14804
14805         return 0;
14806 }
14807
14808 static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state,
14809                                                u8 transcoders)
14810 {
14811         const struct intel_crtc_state *new_crtc_state;
14812         struct intel_crtc *crtc;
14813         int i;
14814
14815         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
14816                 if (new_crtc_state->hw.enable &&
14817                     transcoders & BIT(new_crtc_state->cpu_transcoder) &&
14818                     needs_modeset(new_crtc_state))
14819                         return true;
14820         }
14821
14822         return false;
14823 }
14824
14825 static int
14826 intel_modeset_all_tiles(struct intel_atomic_state *state, int tile_grp_id)
14827 {
14828         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14829         struct drm_connector *connector;
14830         struct drm_connector_list_iter conn_iter;
14831         int ret = 0;
14832
14833         drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
14834         drm_for_each_connector_iter(connector, &conn_iter) {
14835                 struct drm_connector_state *conn_state;
14836                 struct drm_crtc_state *crtc_state;
14837
14838                 if (!connector->has_tile ||
14839                     connector->tile_group->id != tile_grp_id)
14840                         continue;
14841                 conn_state = drm_atomic_get_connector_state(&state->base,
14842                                                             connector);
14843                 if (IS_ERR(conn_state)) {
14844                         ret =  PTR_ERR(conn_state);
14845                         break;
14846                 }
14847
14848                 if (!conn_state->crtc)
14849                         continue;
14850
14851                 crtc_state = drm_atomic_get_crtc_state(&state->base,
14852                                                        conn_state->crtc);
14853                 if (IS_ERR(crtc_state)) {
14854                         ret = PTR_ERR(crtc_state);
14855                         break;
14856                 }
14857                 crtc_state->mode_changed = true;
14858                 ret = drm_atomic_add_affected_connectors(&state->base,
14859                                                          conn_state->crtc);
14860                 if (ret)
14861                         break;
14862         }
14863         drm_connector_list_iter_end(&conn_iter);
14864
14865         return ret;
14866 }
14867
14868 static int
14869 intel_atomic_check_tiled_conns(struct intel_atomic_state *state)
14870 {
14871         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14872         struct drm_connector *connector;
14873         struct drm_connector_state *old_conn_state, *new_conn_state;
14874         int i, ret;
14875
14876         if (INTEL_GEN(dev_priv) < 11)
14877                 return 0;
14878
14879         /* Is tiled, mark all other tiled CRTCs as needing a modeset */
14880         for_each_oldnew_connector_in_state(&state->base, connector,
14881                                            old_conn_state, new_conn_state, i) {
14882                 if (!connector->has_tile)
14883                         continue;
14884                 if (!intel_connector_needs_modeset(state, connector))
14885                         continue;
14886
14887                 ret = intel_modeset_all_tiles(state, connector->tile_group->id);
14888                 if (ret)
14889                         return ret;
14890         }
14891
14892         return 0;
14893 }
14894
14895 /**
14896  * intel_atomic_check - validate state object
14897  * @dev: drm device
14898  * @_state: state to validate
14899  */
14900 static int intel_atomic_check(struct drm_device *dev,
14901                               struct drm_atomic_state *_state)
14902 {
14903         struct drm_i915_private *dev_priv = to_i915(dev);
14904         struct intel_atomic_state *state = to_intel_atomic_state(_state);
14905         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
14906         struct intel_cdclk_state *new_cdclk_state;
14907         struct intel_crtc *crtc;
14908         int ret, i;
14909         bool any_ms = false;
14910
14911         /* Catch I915_MODE_FLAG_INHERITED */
14912         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14913                                             new_crtc_state, i) {
14914                 if (new_crtc_state->hw.mode.private_flags !=
14915                     old_crtc_state->hw.mode.private_flags)
14916                         new_crtc_state->uapi.mode_changed = true;
14917         }
14918
14919         ret = drm_atomic_helper_check_modeset(dev, &state->base);
14920         if (ret)
14921                 goto fail;
14922
14923         /**
14924          * This check adds all the connectors in current state that belong to
14925          * the same tile group to a full modeset.
14926          * This function directly sets the mode_changed to true and we also call
14927          * drm_atomic_add_affected_connectors(). Hence we are not explicitly
14928          * calling drm_atomic_helper_check_modeset() after this.
14929          *
14930          * Fixme: Handle some corner cases where one of the
14931          * tiled connectors gets disconnected and tile info is lost but since it
14932          * was previously synced to other conn, we need to add that to the modeset.
14933          */
14934         ret = intel_atomic_check_tiled_conns(state);
14935         if (ret)
14936                 goto fail;
14937
14938         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14939                                             new_crtc_state, i) {
14940                 if (!needs_modeset(new_crtc_state)) {
14941                         /* Light copy */
14942                         intel_crtc_copy_uapi_to_hw_state_nomodeset(new_crtc_state);
14943
14944                         continue;
14945                 }
14946
14947                 ret = intel_crtc_prepare_cleared_state(new_crtc_state);
14948                 if (ret)
14949                         goto fail;
14950
14951                 if (!new_crtc_state->hw.enable)
14952                         continue;
14953
14954                 ret = intel_modeset_pipe_config(new_crtc_state);
14955                 if (ret)
14956                         goto fail;
14957
14958                 intel_crtc_check_fastset(old_crtc_state, new_crtc_state);
14959         }
14960
14961         /**
14962          * Check if fastset is allowed by external dependencies like other
14963          * pipes and transcoders.
14964          *
14965          * Right now it only forces a fullmodeset when the MST master
14966          * transcoder did not changed but the pipe of the master transcoder
14967          * needs a fullmodeset so all slaves also needs to do a fullmodeset or
14968          * in case of port synced crtcs, if one of the synced crtcs
14969          * needs a full modeset, all other synced crtcs should be
14970          * forced a full modeset.
14971          */
14972         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
14973                 if (!new_crtc_state->hw.enable || needs_modeset(new_crtc_state))
14974                         continue;
14975
14976                 if (intel_dp_mst_is_slave_trans(new_crtc_state)) {
14977                         enum transcoder master = new_crtc_state->mst_master_transcoder;
14978
14979                         if (intel_cpu_transcoders_need_modeset(state, BIT(master))) {
14980                                 new_crtc_state->uapi.mode_changed = true;
14981                                 new_crtc_state->update_pipe = false;
14982                         }
14983                 }
14984
14985                 if (is_trans_port_sync_mode(new_crtc_state)) {
14986                         u8 trans = new_crtc_state->sync_mode_slaves_mask;
14987
14988                         if (new_crtc_state->master_transcoder != INVALID_TRANSCODER)
14989                                 trans |= BIT(new_crtc_state->master_transcoder);
14990
14991                         if (intel_cpu_transcoders_need_modeset(state, trans)) {
14992                                 new_crtc_state->uapi.mode_changed = true;
14993                                 new_crtc_state->update_pipe = false;
14994                         }
14995                 }
14996         }
14997
14998         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14999                                             new_crtc_state, i) {
15000                 if (needs_modeset(new_crtc_state)) {
15001                         any_ms = true;
15002                         continue;
15003                 }
15004
15005                 if (!new_crtc_state->update_pipe)
15006                         continue;
15007
15008                 intel_crtc_copy_fastset(old_crtc_state, new_crtc_state);
15009         }
15010
15011         if (any_ms && !check_digital_port_conflicts(state)) {
15012                 drm_dbg_kms(&dev_priv->drm,
15013                             "rejecting conflicting digital port configuration\n");
15014                 ret = EINVAL;
15015                 goto fail;
15016         }
15017
15018         ret = drm_dp_mst_atomic_check(&state->base);
15019         if (ret)
15020                 goto fail;
15021
15022         ret = intel_atomic_check_planes(state, &any_ms);
15023         if (ret)
15024                 goto fail;
15025
15026         new_cdclk_state = intel_atomic_get_new_cdclk_state(state);
15027         if (new_cdclk_state && new_cdclk_state->force_min_cdclk_changed)
15028                 any_ms = true;
15029
15030         /*
15031          * distrust_bios_wm will force a full dbuf recomputation
15032          * but the hardware state will only get updated accordingly
15033          * if state->modeset==true. Hence distrust_bios_wm==true &&
15034          * state->modeset==false is an invalid combination which
15035          * would cause the hardware and software dbuf state to get
15036          * out of sync. We must prevent that.
15037          *
15038          * FIXME clean up this mess and introduce better
15039          * state tracking for dbuf.
15040          */
15041         if (dev_priv->wm.distrust_bios_wm)
15042                 any_ms = true;
15043
15044         if (any_ms) {
15045                 ret = intel_modeset_checks(state);
15046                 if (ret)
15047                         goto fail;
15048         }
15049
15050         ret = intel_atomic_check_crtcs(state);
15051         if (ret)
15052                 goto fail;
15053
15054         intel_fbc_choose_crtc(dev_priv, state);
15055         ret = calc_watermark_data(state);
15056         if (ret)
15057                 goto fail;
15058
15059         ret = intel_bw_atomic_check(state);
15060         if (ret)
15061                 goto fail;
15062
15063         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
15064                                             new_crtc_state, i) {
15065                 if (!needs_modeset(new_crtc_state) &&
15066                     !new_crtc_state->update_pipe)
15067                         continue;
15068
15069                 intel_dump_pipe_config(new_crtc_state, state,
15070                                        needs_modeset(new_crtc_state) ?
15071                                        "[modeset]" : "[fastset]");
15072         }
15073
15074         return 0;
15075
15076  fail:
15077         if (ret == -EDEADLK)
15078                 return ret;
15079
15080         /*
15081          * FIXME would probably be nice to know which crtc specifically
15082          * caused the failure, in cases where we can pinpoint it.
15083          */
15084         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
15085                                             new_crtc_state, i)
15086                 intel_dump_pipe_config(new_crtc_state, state, "[failed]");
15087
15088         return ret;
15089 }
15090
15091 static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
15092 {
15093         return drm_atomic_helper_prepare_planes(state->base.dev,
15094                                                 &state->base);
15095 }
15096
15097 u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
15098 {
15099         struct drm_device *dev = crtc->base.dev;
15100         struct drm_vblank_crtc *vblank = &dev->vblank[drm_crtc_index(&crtc->base)];
15101
15102         if (!vblank->max_vblank_count)
15103                 return (u32)drm_crtc_accurate_vblank_count(&crtc->base);
15104
15105         return crtc->base.funcs->get_vblank_counter(&crtc->base);
15106 }
15107
15108 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
15109                                   struct intel_crtc_state *crtc_state)
15110 {
15111         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
15112
15113         if (!IS_GEN(dev_priv, 2) || crtc_state->active_planes)
15114                 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
15115
15116         if (crtc_state->has_pch_encoder) {
15117                 enum pipe pch_transcoder =
15118                         intel_crtc_pch_transcoder(crtc);
15119
15120                 intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
15121         }
15122 }
15123
15124 static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
15125                                const struct intel_crtc_state *new_crtc_state)
15126 {
15127         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
15128         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
15129
15130         /*
15131          * Update pipe size and adjust fitter if needed: the reason for this is
15132          * that in compute_mode_changes we check the native mode (not the pfit
15133          * mode) to see if we can flip rather than do a full mode set. In the
15134          * fastboot case, we'll flip, but if we don't update the pipesrc and
15135          * pfit state, we'll end up with a big fb scanned out into the wrong
15136          * sized surface.
15137          */
15138         intel_set_pipe_src_size(new_crtc_state);
15139
15140         /* on skylake this is done by detaching scalers */
15141         if (INTEL_GEN(dev_priv) >= 9) {
15142                 skl_detach_scalers(new_crtc_state);
15143
15144                 if (new_crtc_state->pch_pfit.enabled)
15145                         skl_pfit_enable(new_crtc_state);
15146         } else if (HAS_PCH_SPLIT(dev_priv)) {
15147                 if (new_crtc_state->pch_pfit.enabled)
15148                         ilk_pfit_enable(new_crtc_state);
15149                 else if (old_crtc_state->pch_pfit.enabled)
15150                         ilk_pfit_disable(old_crtc_state);
15151         }
15152
15153         /*
15154          * The register is supposedly single buffered so perhaps
15155          * not 100% correct to do this here. But SKL+ calculate
15156          * this based on the adjust pixel rate so pfit changes do
15157          * affect it and so it must be updated for fastsets.
15158          * HSW/BDW only really need this here for fastboot, after
15159          * that the value should not change without a full modeset.
15160          */
15161         if (INTEL_GEN(dev_priv) >= 9 ||
15162             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
15163                 hsw_set_linetime_wm(new_crtc_state);
15164
15165         if (INTEL_GEN(dev_priv) >= 11)
15166                 icl_set_pipe_chicken(crtc);
15167 }
15168
15169 static void commit_pipe_config(struct intel_atomic_state *state,
15170                                struct intel_crtc_state *old_crtc_state,
15171                                struct intel_crtc_state *new_crtc_state)
15172 {
15173         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
15174         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
15175         bool modeset = needs_modeset(new_crtc_state);
15176
15177         /*
15178          * During modesets pipe configuration was programmed as the
15179          * CRTC was enabled.
15180          */
15181         if (!modeset) {
15182                 if (new_crtc_state->uapi.color_mgmt_changed ||
15183                     new_crtc_state->update_pipe)
15184                         intel_color_commit(new_crtc_state);
15185
15186                 if (INTEL_GEN(dev_priv) >= 9)
15187                         skl_detach_scalers(new_crtc_state);
15188
15189                 if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
15190                         bdw_set_pipemisc(new_crtc_state);
15191
15192                 if (new_crtc_state->update_pipe)
15193                         intel_pipe_fastset(old_crtc_state, new_crtc_state);
15194         }
15195
15196         if (dev_priv->display.atomic_update_watermarks)
15197                 dev_priv->display.atomic_update_watermarks(state, crtc);
15198 }
15199
15200 static void intel_update_crtc(struct intel_crtc *crtc,
15201                               struct intel_atomic_state *state,
15202                               struct intel_crtc_state *old_crtc_state,
15203                               struct intel_crtc_state *new_crtc_state)
15204 {
15205         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
15206         bool modeset = needs_modeset(new_crtc_state);
15207
15208         if (modeset) {
15209                 intel_crtc_update_active_timings(new_crtc_state);
15210
15211                 dev_priv->display.crtc_enable(state, crtc);
15212
15213                 /* vblanks work again, re-enable pipe CRC. */
15214                 intel_crtc_enable_pipe_crc(crtc);
15215         } else {
15216                 if (new_crtc_state->preload_luts &&
15217                     (new_crtc_state->uapi.color_mgmt_changed ||
15218                      new_crtc_state->update_pipe))
15219                         intel_color_load_luts(new_crtc_state);
15220
15221                 intel_pre_plane_update(state, crtc);
15222
15223                 if (new_crtc_state->update_pipe)
15224                         intel_encoders_update_pipe(state, crtc);
15225         }
15226
15227         if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc)
15228                 intel_fbc_disable(crtc);
15229         else
15230                 intel_fbc_enable(state, crtc);
15231
15232         /* Perform vblank evasion around commit operation */
15233         intel_pipe_update_start(new_crtc_state);
15234
15235         commit_pipe_config(state, old_crtc_state, new_crtc_state);
15236
15237         if (INTEL_GEN(dev_priv) >= 9)
15238                 skl_update_planes_on_crtc(state, crtc);
15239         else
15240                 i9xx_update_planes_on_crtc(state, crtc);
15241
15242         intel_pipe_update_end(new_crtc_state);
15243
15244         /*
15245          * We usually enable FIFO underrun interrupts as part of the
15246          * CRTC enable sequence during modesets.  But when we inherit a
15247          * valid pipe configuration from the BIOS we need to take care
15248          * of enabling them on the CRTC's first fastset.
15249          */
15250         if (new_crtc_state->update_pipe && !modeset &&
15251             old_crtc_state->hw.mode.private_flags & I915_MODE_FLAG_INHERITED)
15252                 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
15253 }
15254
15255 static struct intel_crtc *intel_get_slave_crtc(const struct intel_crtc_state *new_crtc_state)
15256 {
15257         struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev);
15258         enum transcoder slave_transcoder;
15259
15260         WARN_ON(!is_power_of_2(new_crtc_state->sync_mode_slaves_mask));
15261
15262         slave_transcoder = ffs(new_crtc_state->sync_mode_slaves_mask) - 1;
15263         return intel_get_crtc_for_pipe(dev_priv,
15264                                        (enum pipe)slave_transcoder);
15265 }
15266
15267 static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
15268                                           struct intel_crtc_state *old_crtc_state,
15269                                           struct intel_crtc_state *new_crtc_state,
15270                                           struct intel_crtc *crtc)
15271 {
15272         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
15273
15274         intel_crtc_disable_planes(state, crtc);
15275
15276         /*
15277          * We need to disable pipe CRC before disabling the pipe,
15278          * or we race against vblank off.
15279          */
15280         intel_crtc_disable_pipe_crc(crtc);
15281
15282         dev_priv->display.crtc_disable(state, crtc);
15283         crtc->active = false;
15284         intel_fbc_disable(crtc);
15285         intel_disable_shared_dpll(old_crtc_state);
15286
15287         /* FIXME unify this for all platforms */
15288         if (!new_crtc_state->hw.active &&
15289             !HAS_GMCH(dev_priv) &&
15290             dev_priv->display.initial_watermarks)
15291                 dev_priv->display.initial_watermarks(state, crtc);
15292 }
15293
15294 static void intel_commit_modeset_disables(struct intel_atomic_state *state)
15295 {
15296         struct intel_crtc_state *new_crtc_state, *old_crtc_state;
15297         struct intel_crtc *crtc;
15298         u32 handled = 0;
15299         int i;
15300
15301         /* Only disable port sync and MST slaves */
15302         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
15303                                             new_crtc_state, i) {
15304                 if (!needs_modeset(new_crtc_state))
15305                         continue;
15306
15307                 if (!old_crtc_state->hw.active)
15308                         continue;
15309
15310                 /* In case of Transcoder port Sync master slave CRTCs can be
15311                  * assigned in any order and we need to make sure that
15312                  * slave CRTCs are disabled first and then master CRTC since
15313                  * Slave vblanks are masked till Master Vblanks.
15314                  */
15315                 if (!is_trans_port_sync_slave(old_crtc_state) &&
15316                     !intel_dp_mst_is_slave_trans(old_crtc_state))
15317                         continue;
15318
15319                 intel_pre_plane_update(state, crtc);
15320                 intel_old_crtc_state_disables(state, old_crtc_state,
15321                                               new_crtc_state, crtc);
15322                 handled |= BIT(crtc->pipe);
15323         }
15324
15325         /* Disable everything else left on */
15326         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
15327                                             new_crtc_state, i) {
15328                 if (!needs_modeset(new_crtc_state) ||
15329                     (handled & BIT(crtc->pipe)))
15330                         continue;
15331
15332                 intel_pre_plane_update(state, crtc);
15333                 if (old_crtc_state->hw.active)
15334                         intel_old_crtc_state_disables(state, old_crtc_state,
15335                                                       new_crtc_state, crtc);
15336         }
15337 }
15338
15339 static void intel_commit_modeset_enables(struct intel_atomic_state *state)
15340 {
15341         struct intel_crtc *crtc;
15342         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
15343         int i;
15344
15345         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
15346                 if (!new_crtc_state->hw.active)
15347                         continue;
15348
15349                 intel_update_crtc(crtc, state, old_crtc_state,
15350                                   new_crtc_state);
15351         }
15352 }
15353
15354 static void intel_crtc_enable_trans_port_sync(struct intel_crtc *crtc,
15355                                               struct intel_atomic_state *state,
15356                                               struct intel_crtc_state *new_crtc_state)
15357 {
15358         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
15359
15360         intel_crtc_update_active_timings(new_crtc_state);
15361         dev_priv->display.crtc_enable(state, crtc);
15362         intel_crtc_enable_pipe_crc(crtc);
15363 }
15364
15365 static void intel_set_dp_tp_ctl_normal(struct intel_crtc *crtc,
15366                                        struct intel_atomic_state *state)
15367 {
15368         struct drm_connector *uninitialized_var(conn);
15369         struct drm_connector_state *conn_state;
15370         struct intel_dp *intel_dp;
15371         int i;
15372
15373         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
15374                 if (conn_state->crtc == &crtc->base)
15375                         break;
15376         }
15377         intel_dp = intel_attached_dp(to_intel_connector(conn));
15378         intel_dp_stop_link_train(intel_dp);
15379 }
15380
15381 /*
15382  * TODO: This is only called from port sync and it is identical to what will be
15383  * executed again in intel_update_crtc() over port sync pipes
15384  */
15385 static void intel_post_crtc_enable_updates(struct intel_crtc *crtc,
15386                                            struct intel_atomic_state *state)
15387 {
15388         struct intel_crtc_state *new_crtc_state =
15389                 intel_atomic_get_new_crtc_state(state, crtc);
15390         struct intel_crtc_state *old_crtc_state =
15391                 intel_atomic_get_old_crtc_state(state, crtc);
15392         bool modeset = needs_modeset(new_crtc_state);
15393
15394         if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc)
15395                 intel_fbc_disable(crtc);
15396         else
15397                 intel_fbc_enable(state, crtc);
15398
15399         /* Perform vblank evasion around commit operation */
15400         intel_pipe_update_start(new_crtc_state);
15401         commit_pipe_config(state, old_crtc_state, new_crtc_state);
15402         skl_update_planes_on_crtc(state, crtc);
15403         intel_pipe_update_end(new_crtc_state);
15404
15405         /*
15406          * We usually enable FIFO underrun interrupts as part of the
15407          * CRTC enable sequence during modesets.  But when we inherit a
15408          * valid pipe configuration from the BIOS we need to take care
15409          * of enabling them on the CRTC's first fastset.
15410          */
15411         if (new_crtc_state->update_pipe && !modeset &&
15412             old_crtc_state->hw.mode.private_flags & I915_MODE_FLAG_INHERITED)
15413                 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
15414 }
15415
15416 static void intel_update_trans_port_sync_crtcs(struct intel_crtc *crtc,
15417                                                struct intel_atomic_state *state,
15418                                                struct intel_crtc_state *old_crtc_state,
15419                                                struct intel_crtc_state *new_crtc_state)
15420 {
15421         struct drm_i915_private *i915 = to_i915(crtc->base.dev);
15422         struct intel_crtc *slave_crtc = intel_get_slave_crtc(new_crtc_state);
15423         struct intel_crtc_state *new_slave_crtc_state =
15424                 intel_atomic_get_new_crtc_state(state, slave_crtc);
15425         struct intel_crtc_state *old_slave_crtc_state =
15426                 intel_atomic_get_old_crtc_state(state, slave_crtc);
15427
15428         WARN_ON(!slave_crtc || !new_slave_crtc_state ||
15429                 !old_slave_crtc_state);
15430
15431         drm_dbg_kms(&i915->drm,
15432                     "Updating Transcoder Port Sync Master CRTC = %d %s and Slave CRTC %d %s\n",
15433                     crtc->base.base.id, crtc->base.name,
15434                     slave_crtc->base.base.id, slave_crtc->base.name);
15435
15436         /* Enable seq for slave with with DP_TP_CTL left Idle until the
15437          * master is ready
15438          */
15439         intel_crtc_enable_trans_port_sync(slave_crtc,
15440                                           state,
15441                                           new_slave_crtc_state);
15442
15443         /* Enable seq for master with with DP_TP_CTL left Idle */
15444         intel_crtc_enable_trans_port_sync(crtc,
15445                                           state,
15446                                           new_crtc_state);
15447
15448         /* Set Slave's DP_TP_CTL to Normal */
15449         intel_set_dp_tp_ctl_normal(slave_crtc,
15450                                    state);
15451
15452         /* Set Master's DP_TP_CTL To Normal */
15453         usleep_range(200, 400);
15454         intel_set_dp_tp_ctl_normal(crtc,
15455                                    state);
15456
15457         /* Now do the post crtc enable for all master and slaves */
15458         intel_post_crtc_enable_updates(slave_crtc,
15459                                        state);
15460         intel_post_crtc_enable_updates(crtc,
15461                                        state);
15462 }
15463
15464 static void icl_dbuf_slice_pre_update(struct intel_atomic_state *state)
15465 {
15466         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
15467         u8 hw_enabled_slices = dev_priv->enabled_dbuf_slices_mask;
15468         u8 required_slices = state->enabled_dbuf_slices_mask;
15469         u8 slices_union = hw_enabled_slices | required_slices;
15470
15471         /* If 2nd DBuf slice required, enable it here */
15472         if (INTEL_GEN(dev_priv) >= 11 && slices_union != hw_enabled_slices)
15473                 icl_dbuf_slices_update(dev_priv, slices_union);
15474 }
15475
15476 static void icl_dbuf_slice_post_update(struct intel_atomic_state *state)
15477 {
15478         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
15479         u8 hw_enabled_slices = dev_priv->enabled_dbuf_slices_mask;
15480         u8 required_slices = state->enabled_dbuf_slices_mask;
15481
15482         /* If 2nd DBuf slice is no more required disable it */
15483         if (INTEL_GEN(dev_priv) >= 11 && required_slices != hw_enabled_slices)
15484                 icl_dbuf_slices_update(dev_priv, required_slices);
15485 }
15486
15487 static void skl_commit_modeset_enables(struct intel_atomic_state *state)
15488 {
15489         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
15490         struct intel_crtc *crtc;
15491         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
15492         struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
15493         const u8 num_pipes = INTEL_NUM_PIPES(dev_priv);
15494         u8 update_pipes = 0, modeset_pipes = 0;
15495         int i;
15496
15497         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
15498                 enum pipe pipe = crtc->pipe;
15499
15500                 if (!new_crtc_state->hw.active)
15501                         continue;
15502
15503                 /* ignore allocations for crtc's that have been turned off. */
15504                 if (!needs_modeset(new_crtc_state)) {
15505                         entries[pipe] = old_crtc_state->wm.skl.ddb;
15506                         update_pipes |= BIT(pipe);
15507                 } else {
15508                         modeset_pipes |= BIT(pipe);
15509                 }
15510         }
15511
15512         /*
15513          * Whenever the number of active pipes changes, we need to make sure we
15514          * update the pipes in the right order so that their ddb allocations
15515          * never overlap with each other between CRTC updates. Otherwise we'll
15516          * cause pipe underruns and other bad stuff.
15517          *
15518          * So first lets enable all pipes that do not need a fullmodeset as
15519          * those don't have any external dependency.
15520          */
15521         while (update_pipes) {
15522                 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
15523                                                     new_crtc_state, i) {
15524                         enum pipe pipe = crtc->pipe;
15525
15526                         if ((update_pipes & BIT(pipe)) == 0)
15527                                 continue;
15528
15529                         if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
15530                                                         entries, num_pipes, pipe))
15531                                 continue;
15532
15533                         entries[pipe] = new_crtc_state->wm.skl.ddb;
15534                         update_pipes &= ~BIT(pipe);
15535
15536                         intel_update_crtc(crtc, state, old_crtc_state,
15537                                           new_crtc_state);
15538
15539                         /*
15540                          * If this is an already active pipe, it's DDB changed,
15541                          * and this isn't the last pipe that needs updating
15542                          * then we need to wait for a vblank to pass for the
15543                          * new ddb allocation to take effect.
15544                          */
15545                         if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb,
15546                                                  &old_crtc_state->wm.skl.ddb) &&
15547                             (update_pipes | modeset_pipes))
15548                                 intel_wait_for_vblank(dev_priv, pipe);
15549                 }
15550         }
15551
15552         /*
15553          * Enable all pipes that needs a modeset and do not depends on other
15554          * pipes
15555          */
15556         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
15557                                             new_crtc_state, i) {
15558                 enum pipe pipe = crtc->pipe;
15559
15560                 if ((modeset_pipes & BIT(pipe)) == 0)
15561                         continue;
15562
15563                 if (intel_dp_mst_is_slave_trans(new_crtc_state) ||
15564                     is_trans_port_sync_slave(new_crtc_state))
15565                         continue;
15566
15567                 WARN_ON(skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
15568                                                     entries, num_pipes, pipe));
15569
15570                 entries[pipe] = new_crtc_state->wm.skl.ddb;
15571                 modeset_pipes &= ~BIT(pipe);
15572
15573                 if (is_trans_port_sync_mode(new_crtc_state)) {
15574                         struct intel_crtc *slave_crtc;
15575
15576                         intel_update_trans_port_sync_crtcs(crtc, state,
15577                                                            old_crtc_state,
15578                                                            new_crtc_state);
15579
15580                         slave_crtc = intel_get_slave_crtc(new_crtc_state);
15581                         /* TODO: update entries[] of slave */
15582                         modeset_pipes &= ~BIT(slave_crtc->pipe);
15583
15584                 } else {
15585                         intel_update_crtc(crtc, state, old_crtc_state,
15586                                           new_crtc_state);
15587                 }
15588         }
15589
15590         /*
15591          * Finally enable all pipes that needs a modeset and depends on
15592          * other pipes, right now it is only MST slaves as both port sync slave
15593          * and master are enabled together
15594          */
15595         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
15596                                             new_crtc_state, i) {
15597                 enum pipe pipe = crtc->pipe;
15598
15599                 if ((modeset_pipes & BIT(pipe)) == 0)
15600                         continue;
15601
15602                 WARN_ON(skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
15603                                                     entries, num_pipes, pipe));
15604
15605                 entries[pipe] = new_crtc_state->wm.skl.ddb;
15606                 modeset_pipes &= ~BIT(pipe);
15607
15608                 intel_update_crtc(crtc, state, old_crtc_state, new_crtc_state);
15609         }
15610
15611         WARN_ON(modeset_pipes);
15612
15613 }
15614
15615 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
15616 {
15617         struct intel_atomic_state *state, *next;
15618         struct llist_node *freed;
15619
15620         freed = llist_del_all(&dev_priv->atomic_helper.free_list);
15621         llist_for_each_entry_safe(state, next, freed, freed)
15622                 drm_atomic_state_put(&state->base);
15623 }
15624
15625 static void intel_atomic_helper_free_state_worker(struct work_struct *work)
15626 {
15627         struct drm_i915_private *dev_priv =
15628                 container_of(work, typeof(*dev_priv), atomic_helper.free_work);
15629
15630         intel_atomic_helper_free_state(dev_priv);
15631 }
15632
15633 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
15634 {
15635         struct wait_queue_entry wait_fence, wait_reset;
15636         struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
15637
15638         init_wait_entry(&wait_fence, 0);
15639         init_wait_entry(&wait_reset, 0);
15640         for (;;) {
15641                 prepare_to_wait(&intel_state->commit_ready.wait,
15642                                 &wait_fence, TASK_UNINTERRUPTIBLE);
15643                 prepare_to_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
15644                                               I915_RESET_MODESET),
15645                                 &wait_reset, TASK_UNINTERRUPTIBLE);
15646
15647
15648                 if (i915_sw_fence_done(&intel_state->commit_ready) ||
15649                     test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
15650                         break;
15651
15652                 schedule();
15653         }
15654         finish_wait(&intel_state->commit_ready.wait, &wait_fence);
15655         finish_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
15656                                   I915_RESET_MODESET),
15657                     &wait_reset);
15658 }
15659
15660 static void intel_atomic_cleanup_work(struct work_struct *work)
15661 {
15662         struct drm_atomic_state *state =
15663                 container_of(work, struct drm_atomic_state, commit_work);
15664         struct drm_i915_private *i915 = to_i915(state->dev);
15665
15666         drm_atomic_helper_cleanup_planes(&i915->drm, state);
15667         drm_atomic_helper_commit_cleanup_done(state);
15668         drm_atomic_state_put(state);
15669
15670         intel_atomic_helper_free_state(i915);
15671 }
15672
15673 static void intel_atomic_commit_tail(struct intel_atomic_state *state)
15674 {
15675         struct drm_device *dev = state->base.dev;
15676         struct drm_i915_private *dev_priv = to_i915(dev);
15677         struct intel_crtc_state *new_crtc_state, *old_crtc_state;
15678         struct intel_crtc *crtc;
15679         u64 put_domains[I915_MAX_PIPES] = {};
15680         intel_wakeref_t wakeref = 0;
15681         int i;
15682
15683         intel_atomic_commit_fence_wait(state);
15684
15685         drm_atomic_helper_wait_for_dependencies(&state->base);
15686
15687         if (state->modeset)
15688                 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
15689
15690         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
15691                                             new_crtc_state, i) {
15692                 if (needs_modeset(new_crtc_state) ||
15693                     new_crtc_state->update_pipe) {
15694
15695                         put_domains[crtc->pipe] =
15696                                 modeset_get_crtc_power_domains(new_crtc_state);
15697                 }
15698         }
15699
15700         intel_commit_modeset_disables(state);
15701
15702         /* FIXME: Eventually get rid of our crtc->config pointer */
15703         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
15704                 crtc->config = new_crtc_state;
15705
15706         if (state->modeset) {
15707                 drm_atomic_helper_update_legacy_modeset_state(dev, &state->base);
15708
15709                 intel_set_cdclk_pre_plane_update(state);
15710
15711                 /*
15712                  * SKL workaround: bspec recommends we disable the SAGV when we
15713                  * have more then one pipe enabled
15714                  */
15715                 if (!intel_can_enable_sagv(state))
15716                         intel_disable_sagv(dev_priv);
15717
15718                 intel_modeset_verify_disabled(dev_priv, state);
15719         }
15720
15721         /* Complete the events for pipes that have now been disabled */
15722         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
15723                 bool modeset = needs_modeset(new_crtc_state);
15724
15725                 /* Complete events for now disable pipes here. */
15726                 if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) {
15727                         spin_lock_irq(&dev->event_lock);
15728                         drm_crtc_send_vblank_event(&crtc->base,
15729                                                    new_crtc_state->uapi.event);
15730                         spin_unlock_irq(&dev->event_lock);
15731
15732                         new_crtc_state->uapi.event = NULL;
15733                 }
15734         }
15735
15736         if (state->modeset)
15737                 intel_encoders_update_prepare(state);
15738
15739         /* Enable all new slices, we might need */
15740         if (state->modeset)
15741                 icl_dbuf_slice_pre_update(state);
15742
15743         /* Now enable the clocks, plane, pipe, and connectors that we set up. */
15744         dev_priv->display.commit_modeset_enables(state);
15745
15746         if (state->modeset) {
15747                 intel_encoders_update_complete(state);
15748
15749                 intel_set_cdclk_post_plane_update(state);
15750         }
15751
15752         /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
15753          * already, but still need the state for the delayed optimization. To
15754          * fix this:
15755          * - wrap the optimization/post_plane_update stuff into a per-crtc work.
15756          * - schedule that vblank worker _before_ calling hw_done
15757          * - at the start of commit_tail, cancel it _synchrously
15758          * - switch over to the vblank wait helper in the core after that since
15759          *   we don't need out special handling any more.
15760          */
15761         drm_atomic_helper_wait_for_flip_done(dev, &state->base);
15762
15763         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
15764                 if (new_crtc_state->hw.active &&
15765                     !needs_modeset(new_crtc_state) &&
15766                     !new_crtc_state->preload_luts &&
15767                     (new_crtc_state->uapi.color_mgmt_changed ||
15768                      new_crtc_state->update_pipe))
15769                         intel_color_load_luts(new_crtc_state);
15770         }
15771
15772         /*
15773          * Now that the vblank has passed, we can go ahead and program the
15774          * optimal watermarks on platforms that need two-step watermark
15775          * programming.
15776          *
15777          * TODO: Move this (and other cleanup) to an async worker eventually.
15778          */
15779         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
15780                                             new_crtc_state, i) {
15781                 /*
15782                  * Gen2 reports pipe underruns whenever all planes are disabled.
15783                  * So re-enable underrun reporting after some planes get enabled.
15784                  *
15785                  * We do this before .optimize_watermarks() so that we have a
15786                  * chance of catching underruns with the intermediate watermarks
15787                  * vs. the new plane configuration.
15788                  */
15789                 if (IS_GEN(dev_priv, 2) && planes_enabling(old_crtc_state, new_crtc_state))
15790                         intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
15791
15792                 if (dev_priv->display.optimize_watermarks)
15793                         dev_priv->display.optimize_watermarks(state, crtc);
15794         }
15795
15796         /* Disable all slices, we don't need */
15797         if (state->modeset)
15798                 icl_dbuf_slice_post_update(state);
15799
15800         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
15801                 intel_post_plane_update(state, crtc);
15802
15803                 if (put_domains[i])
15804                         modeset_put_power_domains(dev_priv, put_domains[i]);
15805
15806                 intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
15807         }
15808
15809         /* Underruns don't always raise interrupts, so check manually */
15810         intel_check_cpu_fifo_underruns(dev_priv);
15811         intel_check_pch_fifo_underruns(dev_priv);
15812
15813         if (state->modeset)
15814                 intel_verify_planes(state);
15815
15816         if (state->modeset && intel_can_enable_sagv(state))
15817                 intel_enable_sagv(dev_priv);
15818
15819         drm_atomic_helper_commit_hw_done(&state->base);
15820
15821         if (state->modeset) {
15822                 /* As one of the primary mmio accessors, KMS has a high
15823                  * likelihood of triggering bugs in unclaimed access. After we
15824                  * finish modesetting, see if an error has been flagged, and if
15825                  * so enable debugging for the next modeset - and hope we catch
15826                  * the culprit.
15827                  */
15828                 intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
15829                 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
15830         }
15831         intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
15832
15833         /*
15834          * Defer the cleanup of the old state to a separate worker to not
15835          * impede the current task (userspace for blocking modesets) that
15836          * are executed inline. For out-of-line asynchronous modesets/flips,
15837          * deferring to a new worker seems overkill, but we would place a
15838          * schedule point (cond_resched()) here anyway to keep latencies
15839          * down.
15840          */
15841         INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work);
15842         queue_work(system_highpri_wq, &state->base.commit_work);
15843 }
15844
15845 static void intel_atomic_commit_work(struct work_struct *work)
15846 {
15847         struct intel_atomic_state *state =
15848                 container_of(work, struct intel_atomic_state, base.commit_work);
15849
15850         intel_atomic_commit_tail(state);
15851 }
15852
15853 static int __i915_sw_fence_call
15854 intel_atomic_commit_ready(struct i915_sw_fence *fence,
15855                           enum i915_sw_fence_notify notify)
15856 {
15857         struct intel_atomic_state *state =
15858                 container_of(fence, struct intel_atomic_state, commit_ready);
15859
15860         switch (notify) {
15861         case FENCE_COMPLETE:
15862                 /* we do blocking waits in the worker, nothing to do here */
15863                 break;
15864         case FENCE_FREE:
15865                 {
15866                         struct intel_atomic_helper *helper =
15867                                 &to_i915(state->base.dev)->atomic_helper;
15868
15869                         if (llist_add(&state->freed, &helper->free_list))
15870                                 schedule_work(&helper->free_work);
15871                         break;
15872                 }
15873         }
15874
15875         return NOTIFY_DONE;
15876 }
15877
15878 static void intel_atomic_track_fbs(struct intel_atomic_state *state)
15879 {
15880         struct intel_plane_state *old_plane_state, *new_plane_state;
15881         struct intel_plane *plane;
15882         int i;
15883
15884         for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
15885                                              new_plane_state, i)
15886                 intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
15887                                         to_intel_frontbuffer(new_plane_state->hw.fb),
15888                                         plane->frontbuffer_bit);
15889 }
15890
15891 static void assert_global_state_locked(struct drm_i915_private *dev_priv)
15892 {
15893         struct intel_crtc *crtc;
15894
15895         for_each_intel_crtc(&dev_priv->drm, crtc)
15896                 drm_modeset_lock_assert_held(&crtc->base.mutex);
15897 }
15898
15899 static int intel_atomic_commit(struct drm_device *dev,
15900                                struct drm_atomic_state *_state,
15901                                bool nonblock)
15902 {
15903         struct intel_atomic_state *state = to_intel_atomic_state(_state);
15904         struct drm_i915_private *dev_priv = to_i915(dev);
15905         int ret = 0;
15906
15907         state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
15908
15909         drm_atomic_state_get(&state->base);
15910         i915_sw_fence_init(&state->commit_ready,
15911                            intel_atomic_commit_ready);
15912
15913         /*
15914          * The intel_legacy_cursor_update() fast path takes care
15915          * of avoiding the vblank waits for simple cursor
15916          * movement and flips. For cursor on/off and size changes,
15917          * we want to perform the vblank waits so that watermark
15918          * updates happen during the correct frames. Gen9+ have
15919          * double buffered watermarks and so shouldn't need this.
15920          *
15921          * Unset state->legacy_cursor_update before the call to
15922          * drm_atomic_helper_setup_commit() because otherwise
15923          * drm_atomic_helper_wait_for_flip_done() is a noop and
15924          * we get FIFO underruns because we didn't wait
15925          * for vblank.
15926          *
15927          * FIXME doing watermarks and fb cleanup from a vblank worker
15928          * (assuming we had any) would solve these problems.
15929          */
15930         if (INTEL_GEN(dev_priv) < 9 && state->base.legacy_cursor_update) {
15931                 struct intel_crtc_state *new_crtc_state;
15932                 struct intel_crtc *crtc;
15933                 int i;
15934
15935                 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
15936                         if (new_crtc_state->wm.need_postvbl_update ||
15937                             new_crtc_state->update_wm_post)
15938                                 state->base.legacy_cursor_update = false;
15939         }
15940
15941         ret = intel_atomic_prepare_commit(state);
15942         if (ret) {
15943                 drm_dbg_atomic(&dev_priv->drm,
15944                                "Preparing state failed with %i\n", ret);
15945                 i915_sw_fence_commit(&state->commit_ready);
15946                 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
15947                 return ret;
15948         }
15949
15950         ret = drm_atomic_helper_setup_commit(&state->base, nonblock);
15951         if (!ret)
15952                 ret = drm_atomic_helper_swap_state(&state->base, true);
15953         if (!ret)
15954                 intel_atomic_swap_global_state(state);
15955
15956         if (ret) {
15957                 i915_sw_fence_commit(&state->commit_ready);
15958
15959                 drm_atomic_helper_cleanup_planes(dev, &state->base);
15960                 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
15961                 return ret;
15962         }
15963         dev_priv->wm.distrust_bios_wm = false;
15964         intel_shared_dpll_swap_state(state);
15965         intel_atomic_track_fbs(state);
15966
15967         if (state->global_state_changed) {
15968                 assert_global_state_locked(dev_priv);
15969
15970                 dev_priv->active_pipes = state->active_pipes;
15971         }
15972
15973         drm_atomic_state_get(&state->base);
15974         INIT_WORK(&state->base.commit_work, intel_atomic_commit_work);
15975
15976         i915_sw_fence_commit(&state->commit_ready);
15977         if (nonblock && state->modeset) {
15978                 queue_work(dev_priv->modeset_wq, &state->base.commit_work);
15979         } else if (nonblock) {
15980                 queue_work(dev_priv->flip_wq, &state->base.commit_work);
15981         } else {
15982                 if (state->modeset)
15983                         flush_workqueue(dev_priv->modeset_wq);
15984                 intel_atomic_commit_tail(state);
15985         }
15986
15987         return 0;
15988 }
15989
15990 struct wait_rps_boost {
15991         struct wait_queue_entry wait;
15992
15993         struct drm_crtc *crtc;
15994         struct i915_request *request;
15995 };
15996
15997 static int do_rps_boost(struct wait_queue_entry *_wait,
15998                         unsigned mode, int sync, void *key)
15999 {
16000         struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
16001         struct i915_request *rq = wait->request;
16002
16003         /*
16004          * If we missed the vblank, but the request is already running it
16005          * is reasonable to assume that it will complete before the next
16006          * vblank without our intervention, so leave RPS alone.
16007          */
16008         if (!i915_request_started(rq))
16009                 intel_rps_boost(rq);
16010         i915_request_put(rq);
16011
16012         drm_crtc_vblank_put(wait->crtc);
16013
16014         list_del(&wait->wait.entry);
16015         kfree(wait);
16016         return 1;
16017 }
16018
16019 static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
16020                                        struct dma_fence *fence)
16021 {
16022         struct wait_rps_boost *wait;
16023
16024         if (!dma_fence_is_i915(fence))
16025                 return;
16026
16027         if (INTEL_GEN(to_i915(crtc->dev)) < 6)
16028                 return;
16029
16030         if (drm_crtc_vblank_get(crtc))
16031                 return;
16032
16033         wait = kmalloc(sizeof(*wait), GFP_KERNEL);
16034         if (!wait) {
16035                 drm_crtc_vblank_put(crtc);
16036                 return;
16037         }
16038
16039         wait->request = to_request(dma_fence_get(fence));
16040         wait->crtc = crtc;
16041
16042         wait->wait.func = do_rps_boost;
16043         wait->wait.flags = 0;
16044
16045         add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
16046 }
16047
16048 static int intel_plane_pin_fb(struct intel_plane_state *plane_state)
16049 {
16050         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
16051         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
16052         struct drm_framebuffer *fb = plane_state->hw.fb;
16053         struct i915_vma *vma;
16054
16055         if (plane->id == PLANE_CURSOR &&
16056             INTEL_INFO(dev_priv)->display.cursor_needs_physical) {
16057                 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
16058                 const int align = intel_cursor_alignment(dev_priv);
16059                 int err;
16060
16061                 err = i915_gem_object_attach_phys(obj, align);
16062                 if (err)
16063                         return err;
16064         }
16065
16066         vma = intel_pin_and_fence_fb_obj(fb,
16067                                          &plane_state->view,
16068                                          intel_plane_uses_fence(plane_state),
16069                                          &plane_state->flags);
16070         if (IS_ERR(vma))
16071                 return PTR_ERR(vma);
16072
16073         plane_state->vma = vma;
16074
16075         return 0;
16076 }
16077
16078 static void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
16079 {
16080         struct i915_vma *vma;
16081
16082         vma = fetch_and_zero(&old_plane_state->vma);
16083         if (vma)
16084                 intel_unpin_fb_vma(vma, old_plane_state->flags);
16085 }
16086
16087 static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj)
16088 {
16089         struct i915_sched_attr attr = {
16090                 .priority = I915_USER_PRIORITY(I915_PRIORITY_DISPLAY),
16091         };
16092
16093         i915_gem_object_wait_priority(obj, 0, &attr);
16094 }
16095
16096 /**
16097  * intel_prepare_plane_fb - Prepare fb for usage on plane
16098  * @_plane: drm plane to prepare for
16099  * @_new_plane_state: the plane state being prepared
16100  *
16101  * Prepares a framebuffer for usage on a display plane.  Generally this
16102  * involves pinning the underlying object and updating the frontbuffer tracking
16103  * bits.  Some older platforms need special physical address handling for
16104  * cursor planes.
16105  *
16106  * Returns 0 on success, negative error code on failure.
16107  */
16108 int
16109 intel_prepare_plane_fb(struct drm_plane *_plane,
16110                        struct drm_plane_state *_new_plane_state)
16111 {
16112         struct intel_plane *plane = to_intel_plane(_plane);
16113         struct intel_plane_state *new_plane_state =
16114                 to_intel_plane_state(_new_plane_state);
16115         struct intel_atomic_state *state =
16116                 to_intel_atomic_state(new_plane_state->uapi.state);
16117         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
16118         const struct intel_plane_state *old_plane_state =
16119                 intel_atomic_get_old_plane_state(state, plane);
16120         struct drm_i915_gem_object *obj = intel_fb_obj(new_plane_state->hw.fb);
16121         struct drm_i915_gem_object *old_obj = intel_fb_obj(old_plane_state->hw.fb);
16122         int ret;
16123
16124         if (old_obj) {
16125                 const struct intel_crtc_state *crtc_state =
16126                         intel_atomic_get_new_crtc_state(state,
16127                                                         to_intel_crtc(old_plane_state->hw.crtc));
16128
16129                 /* Big Hammer, we also need to ensure that any pending
16130                  * MI_WAIT_FOR_EVENT inside a user batch buffer on the
16131                  * current scanout is retired before unpinning the old
16132                  * framebuffer. Note that we rely on userspace rendering
16133                  * into the buffer attached to the pipe they are waiting
16134                  * on. If not, userspace generates a GPU hang with IPEHR
16135                  * point to the MI_WAIT_FOR_EVENT.
16136                  *
16137                  * This should only fail upon a hung GPU, in which case we
16138                  * can safely continue.
16139                  */
16140                 if (needs_modeset(crtc_state)) {
16141                         ret = i915_sw_fence_await_reservation(&state->commit_ready,
16142                                                               old_obj->base.resv, NULL,
16143                                                               false, 0,
16144                                                               GFP_KERNEL);
16145                         if (ret < 0)
16146                                 return ret;
16147                 }
16148         }
16149
16150         if (new_plane_state->uapi.fence) { /* explicit fencing */
16151                 ret = i915_sw_fence_await_dma_fence(&state->commit_ready,
16152                                                     new_plane_state->uapi.fence,
16153                                                     I915_FENCE_TIMEOUT,
16154                                                     GFP_KERNEL);
16155                 if (ret < 0)
16156                         return ret;
16157         }
16158
16159         if (!obj)
16160                 return 0;
16161
16162         ret = i915_gem_object_pin_pages(obj);
16163         if (ret)
16164                 return ret;
16165
16166         ret = intel_plane_pin_fb(new_plane_state);
16167
16168         i915_gem_object_unpin_pages(obj);
16169         if (ret)
16170                 return ret;
16171
16172         fb_obj_bump_render_priority(obj);
16173         i915_gem_object_flush_frontbuffer(obj, ORIGIN_DIRTYFB);
16174
16175         if (!new_plane_state->uapi.fence) { /* implicit fencing */
16176                 struct dma_fence *fence;
16177
16178                 ret = i915_sw_fence_await_reservation(&state->commit_ready,
16179                                                       obj->base.resv, NULL,
16180                                                       false, I915_FENCE_TIMEOUT,
16181                                                       GFP_KERNEL);
16182                 if (ret < 0)
16183                         goto unpin_fb;
16184
16185                 fence = dma_resv_get_excl_rcu(obj->base.resv);
16186                 if (fence) {
16187                         add_rps_boost_after_vblank(new_plane_state->hw.crtc,
16188                                                    fence);
16189                         dma_fence_put(fence);
16190                 }
16191         } else {
16192                 add_rps_boost_after_vblank(new_plane_state->hw.crtc,
16193                                            new_plane_state->uapi.fence);
16194         }
16195
16196         /*
16197          * We declare pageflips to be interactive and so merit a small bias
16198          * towards upclocking to deliver the frame on time. By only changing
16199          * the RPS thresholds to sample more regularly and aim for higher
16200          * clocks we can hopefully deliver low power workloads (like kodi)
16201          * that are not quite steady state without resorting to forcing
16202          * maximum clocks following a vblank miss (see do_rps_boost()).
16203          */
16204         if (!state->rps_interactive) {
16205                 intel_rps_mark_interactive(&dev_priv->gt.rps, true);
16206                 state->rps_interactive = true;
16207         }
16208
16209         return 0;
16210
16211 unpin_fb:
16212         intel_plane_unpin_fb(new_plane_state);
16213
16214         return ret;
16215 }
16216
16217 /**
16218  * intel_cleanup_plane_fb - Cleans up an fb after plane use
16219  * @plane: drm plane to clean up for
16220  * @_old_plane_state: the state from the previous modeset
16221  *
16222  * Cleans up a framebuffer that has just been removed from a plane.
16223  */
16224 void
16225 intel_cleanup_plane_fb(struct drm_plane *plane,
16226                        struct drm_plane_state *_old_plane_state)
16227 {
16228         struct intel_plane_state *old_plane_state =
16229                 to_intel_plane_state(_old_plane_state);
16230         struct intel_atomic_state *state =
16231                 to_intel_atomic_state(old_plane_state->uapi.state);
16232         struct drm_i915_private *dev_priv = to_i915(plane->dev);
16233         struct drm_i915_gem_object *obj = intel_fb_obj(old_plane_state->hw.fb);
16234
16235         if (!obj)
16236                 return;
16237
16238         if (state->rps_interactive) {
16239                 intel_rps_mark_interactive(&dev_priv->gt.rps, false);
16240                 state->rps_interactive = false;
16241         }
16242
16243         /* Should only be called after a successful intel_prepare_plane_fb()! */
16244         intel_plane_unpin_fb(old_plane_state);
16245 }
16246
16247 /**
16248  * intel_plane_destroy - destroy a plane
16249  * @plane: plane to destroy
16250  *
16251  * Common destruction function for all types of planes (primary, cursor,
16252  * sprite).
16253  */
16254 void intel_plane_destroy(struct drm_plane *plane)
16255 {
16256         drm_plane_cleanup(plane);
16257         kfree(to_intel_plane(plane));
16258 }
16259
16260 static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane,
16261                                             u32 format, u64 modifier)
16262 {
16263         switch (modifier) {
16264         case DRM_FORMAT_MOD_LINEAR:
16265         case I915_FORMAT_MOD_X_TILED:
16266                 break;
16267         default:
16268                 return false;
16269         }
16270
16271         switch (format) {
16272         case DRM_FORMAT_C8:
16273         case DRM_FORMAT_RGB565:
16274         case DRM_FORMAT_XRGB1555:
16275         case DRM_FORMAT_XRGB8888:
16276                 return modifier == DRM_FORMAT_MOD_LINEAR ||
16277                         modifier == I915_FORMAT_MOD_X_TILED;
16278         default:
16279                 return false;
16280         }
16281 }
16282
16283 static bool i965_plane_format_mod_supported(struct drm_plane *_plane,
16284                                             u32 format, u64 modifier)
16285 {
16286         switch (modifier) {
16287         case DRM_FORMAT_MOD_LINEAR:
16288         case I915_FORMAT_MOD_X_TILED:
16289                 break;
16290         default:
16291                 return false;
16292         }
16293
16294         switch (format) {
16295         case DRM_FORMAT_C8:
16296         case DRM_FORMAT_RGB565:
16297         case DRM_FORMAT_XRGB8888:
16298         case DRM_FORMAT_XBGR8888:
16299         case DRM_FORMAT_ARGB8888:
16300         case DRM_FORMAT_ABGR8888:
16301         case DRM_FORMAT_XRGB2101010:
16302         case DRM_FORMAT_XBGR2101010:
16303         case DRM_FORMAT_ARGB2101010:
16304         case DRM_FORMAT_ABGR2101010:
16305         case DRM_FORMAT_XBGR16161616F:
16306                 return modifier == DRM_FORMAT_MOD_LINEAR ||
16307                         modifier == I915_FORMAT_MOD_X_TILED;
16308         default:
16309                 return false;
16310         }
16311 }
16312
16313 static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
16314                                               u32 format, u64 modifier)
16315 {
16316         return modifier == DRM_FORMAT_MOD_LINEAR &&
16317                 format == DRM_FORMAT_ARGB8888;
16318 }
16319
16320 static const struct drm_plane_funcs i965_plane_funcs = {
16321         .update_plane = drm_atomic_helper_update_plane,
16322         .disable_plane = drm_atomic_helper_disable_plane,
16323         .destroy = intel_plane_destroy,
16324         .atomic_duplicate_state = intel_plane_duplicate_state,
16325         .atomic_destroy_state = intel_plane_destroy_state,
16326         .format_mod_supported = i965_plane_format_mod_supported,
16327 };
16328
16329 static const struct drm_plane_funcs i8xx_plane_funcs = {
16330         .update_plane = drm_atomic_helper_update_plane,
16331         .disable_plane = drm_atomic_helper_disable_plane,
16332         .destroy = intel_plane_destroy,
16333         .atomic_duplicate_state = intel_plane_duplicate_state,
16334         .atomic_destroy_state = intel_plane_destroy_state,
16335         .format_mod_supported = i8xx_plane_format_mod_supported,
16336 };
16337
16338 static int
16339 intel_legacy_cursor_update(struct drm_plane *_plane,
16340                            struct drm_crtc *_crtc,
16341                            struct drm_framebuffer *fb,
16342                            int crtc_x, int crtc_y,
16343                            unsigned int crtc_w, unsigned int crtc_h,
16344                            u32 src_x, u32 src_y,
16345                            u32 src_w, u32 src_h,
16346                            struct drm_modeset_acquire_ctx *ctx)
16347 {
16348         struct intel_plane *plane = to_intel_plane(_plane);
16349         struct intel_crtc *crtc = to_intel_crtc(_crtc);
16350         struct intel_plane_state *old_plane_state =
16351                 to_intel_plane_state(plane->base.state);
16352         struct intel_plane_state *new_plane_state;
16353         struct intel_crtc_state *crtc_state =
16354                 to_intel_crtc_state(crtc->base.state);
16355         struct intel_crtc_state *new_crtc_state;
16356         int ret;
16357
16358         /*
16359          * When crtc is inactive or there is a modeset pending,
16360          * wait for it to complete in the slowpath
16361          */
16362         if (!crtc_state->hw.active || needs_modeset(crtc_state) ||
16363             crtc_state->update_pipe)
16364                 goto slow;
16365
16366         /*
16367          * Don't do an async update if there is an outstanding commit modifying
16368          * the plane.  This prevents our async update's changes from getting
16369          * overridden by a previous synchronous update's state.
16370          */
16371         if (old_plane_state->uapi.commit &&
16372             !try_wait_for_completion(&old_plane_state->uapi.commit->hw_done))
16373                 goto slow;
16374
16375         /*
16376          * If any parameters change that may affect watermarks,
16377          * take the slowpath. Only changing fb or position should be
16378          * in the fastpath.
16379          */
16380         if (old_plane_state->uapi.crtc != &crtc->base ||
16381             old_plane_state->uapi.src_w != src_w ||
16382             old_plane_state->uapi.src_h != src_h ||
16383             old_plane_state->uapi.crtc_w != crtc_w ||
16384             old_plane_state->uapi.crtc_h != crtc_h ||
16385             !old_plane_state->uapi.fb != !fb)
16386                 goto slow;
16387
16388         new_plane_state = to_intel_plane_state(intel_plane_duplicate_state(&plane->base));
16389         if (!new_plane_state)
16390                 return -ENOMEM;
16391
16392         new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(&crtc->base));
16393         if (!new_crtc_state) {
16394                 ret = -ENOMEM;
16395                 goto out_free;
16396         }
16397
16398         drm_atomic_set_fb_for_plane(&new_plane_state->uapi, fb);
16399
16400         new_plane_state->uapi.src_x = src_x;
16401         new_plane_state->uapi.src_y = src_y;
16402         new_plane_state->uapi.src_w = src_w;
16403         new_plane_state->uapi.src_h = src_h;
16404         new_plane_state->uapi.crtc_x = crtc_x;
16405         new_plane_state->uapi.crtc_y = crtc_y;
16406         new_plane_state->uapi.crtc_w = crtc_w;
16407         new_plane_state->uapi.crtc_h = crtc_h;
16408
16409         intel_plane_copy_uapi_to_hw_state(new_plane_state, new_plane_state);
16410
16411         ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state,
16412                                                   old_plane_state, new_plane_state);
16413         if (ret)
16414                 goto out_free;
16415
16416         ret = intel_plane_pin_fb(new_plane_state);
16417         if (ret)
16418                 goto out_free;
16419
16420         intel_frontbuffer_flush(to_intel_frontbuffer(new_plane_state->hw.fb),
16421                                 ORIGIN_FLIP);
16422         intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
16423                                 to_intel_frontbuffer(new_plane_state->hw.fb),
16424                                 plane->frontbuffer_bit);
16425
16426         /* Swap plane state */
16427         plane->base.state = &new_plane_state->uapi;
16428
16429         /*
16430          * We cannot swap crtc_state as it may be in use by an atomic commit or
16431          * page flip that's running simultaneously. If we swap crtc_state and
16432          * destroy the old state, we will cause a use-after-free there.
16433          *
16434          * Only update active_planes, which is needed for our internal
16435          * bookkeeping. Either value will do the right thing when updating
16436          * planes atomically. If the cursor was part of the atomic update then
16437          * we would have taken the slowpath.
16438          */
16439         crtc_state->active_planes = new_crtc_state->active_planes;
16440
16441         if (new_plane_state->uapi.visible)
16442                 intel_update_plane(plane, crtc_state, new_plane_state);
16443         else
16444                 intel_disable_plane(plane, crtc_state);
16445
16446         intel_plane_unpin_fb(old_plane_state);
16447
16448 out_free:
16449         if (new_crtc_state)
16450                 intel_crtc_destroy_state(&crtc->base, &new_crtc_state->uapi);
16451         if (ret)
16452                 intel_plane_destroy_state(&plane->base, &new_plane_state->uapi);
16453         else
16454                 intel_plane_destroy_state(&plane->base, &old_plane_state->uapi);
16455         return ret;
16456
16457 slow:
16458         return drm_atomic_helper_update_plane(&plane->base, &crtc->base, fb,
16459                                               crtc_x, crtc_y, crtc_w, crtc_h,
16460                                               src_x, src_y, src_w, src_h, ctx);
16461 }
16462
16463 static const struct drm_plane_funcs intel_cursor_plane_funcs = {
16464         .update_plane = intel_legacy_cursor_update,
16465         .disable_plane = drm_atomic_helper_disable_plane,
16466         .destroy = intel_plane_destroy,
16467         .atomic_duplicate_state = intel_plane_duplicate_state,
16468         .atomic_destroy_state = intel_plane_destroy_state,
16469         .format_mod_supported = intel_cursor_format_mod_supported,
16470 };
16471
16472 static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv,
16473                                enum i9xx_plane_id i9xx_plane)
16474 {
16475         if (!HAS_FBC(dev_priv))
16476                 return false;
16477
16478         if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
16479                 return i9xx_plane == PLANE_A; /* tied to pipe A */
16480         else if (IS_IVYBRIDGE(dev_priv))
16481                 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B ||
16482                         i9xx_plane == PLANE_C;
16483         else if (INTEL_GEN(dev_priv) >= 4)
16484                 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B;
16485         else
16486                 return i9xx_plane == PLANE_A;
16487 }
16488
16489 static struct intel_plane *
16490 intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
16491 {
16492         struct intel_plane *plane;
16493         const struct drm_plane_funcs *plane_funcs;
16494         unsigned int supported_rotations;
16495         unsigned int possible_crtcs;
16496         const u32 *formats;
16497         int num_formats;
16498         int ret, zpos;
16499
16500         if (INTEL_GEN(dev_priv) >= 9)
16501                 return skl_universal_plane_create(dev_priv, pipe,
16502                                                   PLANE_PRIMARY);
16503
16504         plane = intel_plane_alloc();
16505         if (IS_ERR(plane))
16506                 return plane;
16507
16508         plane->pipe = pipe;
16509         /*
16510          * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
16511          * port is hooked to pipe B. Hence we want plane A feeding pipe B.
16512          */
16513         if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4)
16514                 plane->i9xx_plane = (enum i9xx_plane_id) !pipe;
16515         else
16516                 plane->i9xx_plane = (enum i9xx_plane_id) pipe;
16517         plane->id = PLANE_PRIMARY;
16518         plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id);
16519
16520         plane->has_fbc = i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane);
16521         if (plane->has_fbc) {
16522                 struct intel_fbc *fbc = &dev_priv->fbc;
16523
16524                 fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
16525         }
16526
16527         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
16528                 formats = vlv_primary_formats;
16529                 num_formats = ARRAY_SIZE(vlv_primary_formats);
16530         } else if (INTEL_GEN(dev_priv) >= 4) {
16531                 /*
16532                  * WaFP16GammaEnabling:ivb
16533                  * "Workaround : When using the 64-bit format, the plane
16534                  *  output on each color channel has one quarter amplitude.
16535                  *  It can be brought up to full amplitude by using pipe
16536                  *  gamma correction or pipe color space conversion to
16537                  *  multiply the plane output by four."
16538                  *
16539                  * There is no dedicated plane gamma for the primary plane,
16540                  * and using the pipe gamma/csc could conflict with other
16541                  * planes, so we choose not to expose fp16 on IVB primary
16542                  * planes. HSW primary planes no longer have this problem.
16543                  */
16544                 if (IS_IVYBRIDGE(dev_priv)) {
16545                         formats = ivb_primary_formats;
16546                         num_formats = ARRAY_SIZE(ivb_primary_formats);
16547                 } else {
16548                         formats = i965_primary_formats;
16549                         num_formats = ARRAY_SIZE(i965_primary_formats);
16550                 }
16551         } else {
16552                 formats = i8xx_primary_formats;
16553                 num_formats = ARRAY_SIZE(i8xx_primary_formats);
16554         }
16555
16556         if (INTEL_GEN(dev_priv) >= 4)
16557                 plane_funcs = &i965_plane_funcs;
16558         else
16559                 plane_funcs = &i8xx_plane_funcs;
16560
16561         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
16562                 plane->min_cdclk = vlv_plane_min_cdclk;
16563         else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
16564                 plane->min_cdclk = hsw_plane_min_cdclk;
16565         else if (IS_IVYBRIDGE(dev_priv))
16566                 plane->min_cdclk = ivb_plane_min_cdclk;
16567         else
16568                 plane->min_cdclk = i9xx_plane_min_cdclk;
16569
16570         plane->max_stride = i9xx_plane_max_stride;
16571         plane->update_plane = i9xx_update_plane;
16572         plane->disable_plane = i9xx_disable_plane;
16573         plane->get_hw_state = i9xx_plane_get_hw_state;
16574         plane->check_plane = i9xx_plane_check;
16575
16576         possible_crtcs = BIT(pipe);
16577
16578         if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
16579                 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
16580                                                possible_crtcs, plane_funcs,
16581                                                formats, num_formats,
16582                                                i9xx_format_modifiers,
16583                                                DRM_PLANE_TYPE_PRIMARY,
16584                                                "primary %c", pipe_name(pipe));
16585         else
16586                 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
16587                                                possible_crtcs, plane_funcs,
16588                                                formats, num_formats,
16589                                                i9xx_format_modifiers,
16590                                                DRM_PLANE_TYPE_PRIMARY,
16591                                                "plane %c",
16592                                                plane_name(plane->i9xx_plane));
16593         if (ret)
16594                 goto fail;
16595
16596         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
16597                 supported_rotations =
16598                         DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
16599                         DRM_MODE_REFLECT_X;
16600         } else if (INTEL_GEN(dev_priv) >= 4) {
16601                 supported_rotations =
16602                         DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
16603         } else {
16604                 supported_rotations = DRM_MODE_ROTATE_0;
16605         }
16606
16607         if (INTEL_GEN(dev_priv) >= 4)
16608                 drm_plane_create_rotation_property(&plane->base,
16609                                                    DRM_MODE_ROTATE_0,
16610                                                    supported_rotations);
16611
16612         zpos = 0;
16613         drm_plane_create_zpos_immutable_property(&plane->base, zpos);
16614
16615         drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
16616
16617         return plane;
16618
16619 fail:
16620         intel_plane_free(plane);
16621
16622         return ERR_PTR(ret);
16623 }
16624
16625 static struct intel_plane *
16626 intel_cursor_plane_create(struct drm_i915_private *dev_priv,
16627                           enum pipe pipe)
16628 {
16629         unsigned int possible_crtcs;
16630         struct intel_plane *cursor;
16631         int ret, zpos;
16632
16633         cursor = intel_plane_alloc();
16634         if (IS_ERR(cursor))
16635                 return cursor;
16636
16637         cursor->pipe = pipe;
16638         cursor->i9xx_plane = (enum i9xx_plane_id) pipe;
16639         cursor->id = PLANE_CURSOR;
16640         cursor->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, cursor->id);
16641
16642         if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
16643                 cursor->max_stride = i845_cursor_max_stride;
16644                 cursor->update_plane = i845_update_cursor;
16645                 cursor->disable_plane = i845_disable_cursor;
16646                 cursor->get_hw_state = i845_cursor_get_hw_state;
16647                 cursor->check_plane = i845_check_cursor;
16648         } else {
16649                 cursor->max_stride = i9xx_cursor_max_stride;
16650                 cursor->update_plane = i9xx_update_cursor;
16651                 cursor->disable_plane = i9xx_disable_cursor;
16652                 cursor->get_hw_state = i9xx_cursor_get_hw_state;
16653                 cursor->check_plane = i9xx_check_cursor;
16654         }
16655
16656         cursor->cursor.base = ~0;
16657         cursor->cursor.cntl = ~0;
16658
16659         if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv))
16660                 cursor->cursor.size = ~0;
16661
16662         possible_crtcs = BIT(pipe);
16663
16664         ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
16665                                        possible_crtcs, &intel_cursor_plane_funcs,
16666                                        intel_cursor_formats,
16667                                        ARRAY_SIZE(intel_cursor_formats),
16668                                        cursor_format_modifiers,
16669                                        DRM_PLANE_TYPE_CURSOR,
16670                                        "cursor %c", pipe_name(pipe));
16671         if (ret)
16672                 goto fail;
16673
16674         if (INTEL_GEN(dev_priv) >= 4)
16675                 drm_plane_create_rotation_property(&cursor->base,
16676                                                    DRM_MODE_ROTATE_0,
16677                                                    DRM_MODE_ROTATE_0 |
16678                                                    DRM_MODE_ROTATE_180);
16679
16680         zpos = RUNTIME_INFO(dev_priv)->num_sprites[pipe] + 1;
16681         drm_plane_create_zpos_immutable_property(&cursor->base, zpos);
16682
16683         drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
16684
16685         return cursor;
16686
16687 fail:
16688         intel_plane_free(cursor);
16689
16690         return ERR_PTR(ret);
16691 }
16692
16693 #define INTEL_CRTC_FUNCS \
16694         .gamma_set = drm_atomic_helper_legacy_gamma_set, \
16695         .set_config = drm_atomic_helper_set_config, \
16696         .destroy = intel_crtc_destroy, \
16697         .page_flip = drm_atomic_helper_page_flip, \
16698         .atomic_duplicate_state = intel_crtc_duplicate_state, \
16699         .atomic_destroy_state = intel_crtc_destroy_state, \
16700         .set_crc_source = intel_crtc_set_crc_source, \
16701         .verify_crc_source = intel_crtc_verify_crc_source, \
16702         .get_crc_sources = intel_crtc_get_crc_sources
16703
16704 static const struct drm_crtc_funcs bdw_crtc_funcs = {
16705         INTEL_CRTC_FUNCS,
16706
16707         .get_vblank_counter = g4x_get_vblank_counter,
16708         .enable_vblank = bdw_enable_vblank,
16709         .disable_vblank = bdw_disable_vblank,
16710 };
16711
16712 static const struct drm_crtc_funcs ilk_crtc_funcs = {
16713         INTEL_CRTC_FUNCS,
16714
16715         .get_vblank_counter = g4x_get_vblank_counter,
16716         .enable_vblank = ilk_enable_vblank,
16717         .disable_vblank = ilk_disable_vblank,
16718 };
16719
16720 static const struct drm_crtc_funcs g4x_crtc_funcs = {
16721         INTEL_CRTC_FUNCS,
16722
16723         .get_vblank_counter = g4x_get_vblank_counter,
16724         .enable_vblank = i965_enable_vblank,
16725         .disable_vblank = i965_disable_vblank,
16726 };
16727
16728 static const struct drm_crtc_funcs i965_crtc_funcs = {
16729         INTEL_CRTC_FUNCS,
16730
16731         .get_vblank_counter = i915_get_vblank_counter,
16732         .enable_vblank = i965_enable_vblank,
16733         .disable_vblank = i965_disable_vblank,
16734 };
16735
16736 static const struct drm_crtc_funcs i915gm_crtc_funcs = {
16737         INTEL_CRTC_FUNCS,
16738
16739         .get_vblank_counter = i915_get_vblank_counter,
16740         .enable_vblank = i915gm_enable_vblank,
16741         .disable_vblank = i915gm_disable_vblank,
16742 };
16743
16744 static const struct drm_crtc_funcs i915_crtc_funcs = {
16745         INTEL_CRTC_FUNCS,
16746
16747         .get_vblank_counter = i915_get_vblank_counter,
16748         .enable_vblank = i8xx_enable_vblank,
16749         .disable_vblank = i8xx_disable_vblank,
16750 };
16751
16752 static const struct drm_crtc_funcs i8xx_crtc_funcs = {
16753         INTEL_CRTC_FUNCS,
16754
16755         /* no hw vblank counter */
16756         .enable_vblank = i8xx_enable_vblank,
16757         .disable_vblank = i8xx_disable_vblank,
16758 };
16759
16760 static struct intel_crtc *intel_crtc_alloc(void)
16761 {
16762         struct intel_crtc_state *crtc_state;
16763         struct intel_crtc *crtc;
16764
16765         crtc = kzalloc(sizeof(*crtc), GFP_KERNEL);
16766         if (!crtc)
16767                 return ERR_PTR(-ENOMEM);
16768
16769         crtc_state = intel_crtc_state_alloc(crtc);
16770         if (!crtc_state) {
16771                 kfree(crtc);
16772                 return ERR_PTR(-ENOMEM);
16773         }
16774
16775         crtc->base.state = &crtc_state->uapi;
16776         crtc->config = crtc_state;
16777
16778         return crtc;
16779 }
16780
16781 static void intel_crtc_free(struct intel_crtc *crtc)
16782 {
16783         intel_crtc_destroy_state(&crtc->base, crtc->base.state);
16784         kfree(crtc);
16785 }
16786
16787 static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
16788 {
16789         struct intel_plane *primary, *cursor;
16790         const struct drm_crtc_funcs *funcs;
16791         struct intel_crtc *crtc;
16792         int sprite, ret;
16793
16794         crtc = intel_crtc_alloc();
16795         if (IS_ERR(crtc))
16796                 return PTR_ERR(crtc);
16797
16798         crtc->pipe = pipe;
16799         crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[pipe];
16800
16801         primary = intel_primary_plane_create(dev_priv, pipe);
16802         if (IS_ERR(primary)) {
16803                 ret = PTR_ERR(primary);
16804                 goto fail;
16805         }
16806         crtc->plane_ids_mask |= BIT(primary->id);
16807
16808         for_each_sprite(dev_priv, pipe, sprite) {
16809                 struct intel_plane *plane;
16810
16811                 plane = intel_sprite_plane_create(dev_priv, pipe, sprite);
16812                 if (IS_ERR(plane)) {
16813                         ret = PTR_ERR(plane);
16814                         goto fail;
16815                 }
16816                 crtc->plane_ids_mask |= BIT(plane->id);
16817         }
16818
16819         cursor = intel_cursor_plane_create(dev_priv, pipe);
16820         if (IS_ERR(cursor)) {
16821                 ret = PTR_ERR(cursor);
16822                 goto fail;
16823         }
16824         crtc->plane_ids_mask |= BIT(cursor->id);
16825
16826         if (HAS_GMCH(dev_priv)) {
16827                 if (IS_CHERRYVIEW(dev_priv) ||
16828                     IS_VALLEYVIEW(dev_priv) || IS_G4X(dev_priv))
16829                         funcs = &g4x_crtc_funcs;
16830                 else if (IS_GEN(dev_priv, 4))
16831                         funcs = &i965_crtc_funcs;
16832                 else if (IS_I945GM(dev_priv) || IS_I915GM(dev_priv))
16833                         funcs = &i915gm_crtc_funcs;
16834                 else if (IS_GEN(dev_priv, 3))
16835                         funcs = &i915_crtc_funcs;
16836                 else
16837                         funcs = &i8xx_crtc_funcs;
16838         } else {
16839                 if (INTEL_GEN(dev_priv) >= 8)
16840                         funcs = &bdw_crtc_funcs;
16841                 else
16842                         funcs = &ilk_crtc_funcs;
16843         }
16844
16845         ret = drm_crtc_init_with_planes(&dev_priv->drm, &crtc->base,
16846                                         &primary->base, &cursor->base,
16847                                         funcs, "pipe %c", pipe_name(pipe));
16848         if (ret)
16849                 goto fail;
16850
16851         BUG_ON(pipe >= ARRAY_SIZE(dev_priv->pipe_to_crtc_mapping) ||
16852                dev_priv->pipe_to_crtc_mapping[pipe] != NULL);
16853         dev_priv->pipe_to_crtc_mapping[pipe] = crtc;
16854
16855         if (INTEL_GEN(dev_priv) < 9) {
16856                 enum i9xx_plane_id i9xx_plane = primary->i9xx_plane;
16857
16858                 BUG_ON(i9xx_plane >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
16859                        dev_priv->plane_to_crtc_mapping[i9xx_plane] != NULL);
16860                 dev_priv->plane_to_crtc_mapping[i9xx_plane] = crtc;
16861         }
16862
16863         intel_color_init(crtc);
16864
16865         WARN_ON(drm_crtc_index(&crtc->base) != crtc->pipe);
16866
16867         return 0;
16868
16869 fail:
16870         intel_crtc_free(crtc);
16871
16872         return ret;
16873 }
16874
16875 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
16876                                       struct drm_file *file)
16877 {
16878         struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
16879         struct drm_crtc *drmmode_crtc;
16880         struct intel_crtc *crtc;
16881
16882         drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
16883         if (!drmmode_crtc)
16884                 return -ENOENT;
16885
16886         crtc = to_intel_crtc(drmmode_crtc);
16887         pipe_from_crtc_id->pipe = crtc->pipe;
16888
16889         return 0;
16890 }
16891
16892 static u32 intel_encoder_possible_clones(struct intel_encoder *encoder)
16893 {
16894         struct drm_device *dev = encoder->base.dev;
16895         struct intel_encoder *source_encoder;
16896         u32 possible_clones = 0;
16897
16898         for_each_intel_encoder(dev, source_encoder) {
16899                 if (encoders_cloneable(encoder, source_encoder))
16900                         possible_clones |= drm_encoder_mask(&source_encoder->base);
16901         }
16902
16903         return possible_clones;
16904 }
16905
16906 static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder)
16907 {
16908         struct drm_device *dev = encoder->base.dev;
16909         struct intel_crtc *crtc;
16910         u32 possible_crtcs = 0;
16911
16912         for_each_intel_crtc(dev, crtc) {
16913                 if (encoder->pipe_mask & BIT(crtc->pipe))
16914                         possible_crtcs |= drm_crtc_mask(&crtc->base);
16915         }
16916
16917         return possible_crtcs;
16918 }
16919
16920 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
16921 {
16922         if (!IS_MOBILE(dev_priv))
16923                 return false;
16924
16925         if ((intel_de_read(dev_priv, DP_A) & DP_DETECTED) == 0)
16926                 return false;
16927
16928         if (IS_GEN(dev_priv, 5) && (intel_de_read(dev_priv, FUSE_STRAP) & ILK_eDP_A_DISABLE))
16929                 return false;
16930
16931         return true;
16932 }
16933
16934 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
16935 {
16936         if (INTEL_GEN(dev_priv) >= 9)
16937                 return false;
16938
16939         if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
16940                 return false;
16941
16942         if (HAS_PCH_LPT_H(dev_priv) &&
16943             intel_de_read(dev_priv, SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
16944                 return false;
16945
16946         /* DDI E can't be used if DDI A requires 4 lanes */
16947         if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
16948                 return false;
16949
16950         if (!dev_priv->vbt.int_crt_support)
16951                 return false;
16952
16953         return true;
16954 }
16955
16956 void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
16957 {
16958         int pps_num;
16959         int pps_idx;
16960
16961         if (HAS_DDI(dev_priv))
16962                 return;
16963         /*
16964          * This w/a is needed at least on CPT/PPT, but to be sure apply it
16965          * everywhere where registers can be write protected.
16966          */
16967         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
16968                 pps_num = 2;
16969         else
16970                 pps_num = 1;
16971
16972         for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
16973                 u32 val = intel_de_read(dev_priv, PP_CONTROL(pps_idx));
16974
16975                 val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS;
16976                 intel_de_write(dev_priv, PP_CONTROL(pps_idx), val);
16977         }
16978 }
16979
16980 static void intel_pps_init(struct drm_i915_private *dev_priv)
16981 {
16982         if (HAS_PCH_SPLIT(dev_priv) || IS_GEN9_LP(dev_priv))
16983                 dev_priv->pps_mmio_base = PCH_PPS_BASE;
16984         else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
16985                 dev_priv->pps_mmio_base = VLV_PPS_BASE;
16986         else
16987                 dev_priv->pps_mmio_base = PPS_BASE;
16988
16989         intel_pps_unlock_regs_wa(dev_priv);
16990 }
16991
16992 static void intel_setup_outputs(struct drm_i915_private *dev_priv)
16993 {
16994         struct intel_encoder *encoder;
16995         bool dpd_is_edp = false;
16996
16997         intel_pps_init(dev_priv);
16998
16999         if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv))
17000                 return;
17001
17002         if (INTEL_GEN(dev_priv) >= 12) {
17003                 intel_ddi_init(dev_priv, PORT_A);
17004                 intel_ddi_init(dev_priv, PORT_B);
17005                 intel_ddi_init(dev_priv, PORT_D);
17006                 intel_ddi_init(dev_priv, PORT_E);
17007                 intel_ddi_init(dev_priv, PORT_F);
17008                 intel_ddi_init(dev_priv, PORT_G);
17009                 intel_ddi_init(dev_priv, PORT_H);
17010                 intel_ddi_init(dev_priv, PORT_I);
17011                 icl_dsi_init(dev_priv);
17012         } else if (IS_ELKHARTLAKE(dev_priv)) {
17013                 intel_ddi_init(dev_priv, PORT_A);
17014                 intel_ddi_init(dev_priv, PORT_B);
17015                 intel_ddi_init(dev_priv, PORT_C);
17016                 intel_ddi_init(dev_priv, PORT_D);
17017                 icl_dsi_init(dev_priv);
17018         } else if (IS_GEN(dev_priv, 11)) {
17019                 intel_ddi_init(dev_priv, PORT_A);
17020                 intel_ddi_init(dev_priv, PORT_B);
17021                 intel_ddi_init(dev_priv, PORT_C);
17022                 intel_ddi_init(dev_priv, PORT_D);
17023                 intel_ddi_init(dev_priv, PORT_E);
17024                 /*
17025                  * On some ICL SKUs port F is not present. No strap bits for
17026                  * this, so rely on VBT.
17027                  * Work around broken VBTs on SKUs known to have no port F.
17028                  */
17029                 if (IS_ICL_WITH_PORT_F(dev_priv) &&
17030                     intel_bios_is_port_present(dev_priv, PORT_F))
17031                         intel_ddi_init(dev_priv, PORT_F);
17032
17033                 icl_dsi_init(dev_priv);
17034         } else if (IS_GEN9_LP(dev_priv)) {
17035                 /*
17036                  * FIXME: Broxton doesn't support port detection via the
17037                  * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
17038                  * detect the ports.
17039                  */
17040                 intel_ddi_init(dev_priv, PORT_A);
17041                 intel_ddi_init(dev_priv, PORT_B);
17042                 intel_ddi_init(dev_priv, PORT_C);
17043
17044                 vlv_dsi_init(dev_priv);
17045         } else if (HAS_DDI(dev_priv)) {
17046                 int found;
17047
17048                 if (intel_ddi_crt_present(dev_priv))
17049                         intel_crt_init(dev_priv);
17050
17051                 /*
17052                  * Haswell uses DDI functions to detect digital outputs.
17053                  * On SKL pre-D0 the strap isn't connected, so we assume
17054                  * it's there.
17055                  */
17056                 found = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
17057                 /* WaIgnoreDDIAStrap: skl */
17058                 if (found || IS_GEN9_BC(dev_priv))
17059                         intel_ddi_init(dev_priv, PORT_A);
17060
17061                 /* DDI B, C, D, and F detection is indicated by the SFUSE_STRAP
17062                  * register */
17063                 found = intel_de_read(dev_priv, SFUSE_STRAP);
17064
17065                 if (found & SFUSE_STRAP_DDIB_DETECTED)
17066                         intel_ddi_init(dev_priv, PORT_B);
17067                 if (found & SFUSE_STRAP_DDIC_DETECTED)
17068                         intel_ddi_init(dev_priv, PORT_C);
17069                 if (found & SFUSE_STRAP_DDID_DETECTED)
17070                         intel_ddi_init(dev_priv, PORT_D);
17071                 if (found & SFUSE_STRAP_DDIF_DETECTED)
17072                         intel_ddi_init(dev_priv, PORT_F);
17073                 /*
17074                  * On SKL we don't have a way to detect DDI-E so we rely on VBT.
17075                  */
17076                 if (IS_GEN9_BC(dev_priv) &&
17077                     intel_bios_is_port_present(dev_priv, PORT_E))
17078                         intel_ddi_init(dev_priv, PORT_E);
17079
17080         } else if (HAS_PCH_SPLIT(dev_priv)) {
17081                 int found;
17082
17083                 /*
17084                  * intel_edp_init_connector() depends on this completing first,
17085                  * to prevent the registration of both eDP and LVDS and the
17086                  * incorrect sharing of the PPS.
17087                  */
17088                 intel_lvds_init(dev_priv);
17089                 intel_crt_init(dev_priv);
17090
17091                 dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
17092
17093                 if (ilk_has_edp_a(dev_priv))
17094                         intel_dp_init(dev_priv, DP_A, PORT_A);
17095
17096                 if (intel_de_read(dev_priv, PCH_HDMIB) & SDVO_DETECTED) {
17097                         /* PCH SDVOB multiplex with HDMIB */
17098                         found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
17099                         if (!found)
17100                                 intel_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
17101                         if (!found && (intel_de_read(dev_priv, PCH_DP_B) & DP_DETECTED))
17102                                 intel_dp_init(dev_priv, PCH_DP_B, PORT_B);
17103                 }
17104
17105                 if (intel_de_read(dev_priv, PCH_HDMIC) & SDVO_DETECTED)
17106                         intel_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
17107
17108                 if (!dpd_is_edp && intel_de_read(dev_priv, PCH_HDMID) & SDVO_DETECTED)
17109                         intel_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
17110
17111                 if (intel_de_read(dev_priv, PCH_DP_C) & DP_DETECTED)
17112                         intel_dp_init(dev_priv, PCH_DP_C, PORT_C);
17113
17114                 if (intel_de_read(dev_priv, PCH_DP_D) & DP_DETECTED)
17115                         intel_dp_init(dev_priv, PCH_DP_D, PORT_D);
17116         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
17117                 bool has_edp, has_port;
17118
17119                 if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support)
17120                         intel_crt_init(dev_priv);
17121
17122                 /*
17123                  * The DP_DETECTED bit is the latched state of the DDC
17124                  * SDA pin at boot. However since eDP doesn't require DDC
17125                  * (no way to plug in a DP->HDMI dongle) the DDC pins for
17126                  * eDP ports may have been muxed to an alternate function.
17127                  * Thus we can't rely on the DP_DETECTED bit alone to detect
17128                  * eDP ports. Consult the VBT as well as DP_DETECTED to
17129                  * detect eDP ports.
17130                  *
17131                  * Sadly the straps seem to be missing sometimes even for HDMI
17132                  * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
17133                  * and VBT for the presence of the port. Additionally we can't
17134                  * trust the port type the VBT declares as we've seen at least
17135                  * HDMI ports that the VBT claim are DP or eDP.
17136                  */
17137                 has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
17138                 has_port = intel_bios_is_port_present(dev_priv, PORT_B);
17139                 if (intel_de_read(dev_priv, VLV_DP_B) & DP_DETECTED || has_port)
17140                         has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B);
17141                 if ((intel_de_read(dev_priv, VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
17142                         intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
17143
17144                 has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
17145                 has_port = intel_bios_is_port_present(dev_priv, PORT_C);
17146                 if (intel_de_read(dev_priv, VLV_DP_C) & DP_DETECTED || has_port)
17147                         has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C);
17148                 if ((intel_de_read(dev_priv, VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
17149                         intel_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
17150
17151                 if (IS_CHERRYVIEW(dev_priv)) {
17152                         /*
17153                          * eDP not supported on port D,
17154                          * so no need to worry about it
17155                          */
17156                         has_port = intel_bios_is_port_present(dev_priv, PORT_D);
17157                         if (intel_de_read(dev_priv, CHV_DP_D) & DP_DETECTED || has_port)
17158                                 intel_dp_init(dev_priv, CHV_DP_D, PORT_D);
17159                         if (intel_de_read(dev_priv, CHV_HDMID) & SDVO_DETECTED || has_port)
17160                                 intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
17161                 }
17162
17163                 vlv_dsi_init(dev_priv);
17164         } else if (IS_PINEVIEW(dev_priv)) {
17165                 intel_lvds_init(dev_priv);
17166                 intel_crt_init(dev_priv);
17167         } else if (IS_GEN_RANGE(dev_priv, 3, 4)) {
17168                 bool found = false;
17169
17170                 if (IS_MOBILE(dev_priv))
17171                         intel_lvds_init(dev_priv);
17172
17173                 intel_crt_init(dev_priv);
17174
17175                 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
17176                         drm_dbg_kms(&dev_priv->drm, "probing SDVOB\n");
17177                         found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
17178                         if (!found && IS_G4X(dev_priv)) {
17179                                 drm_dbg_kms(&dev_priv->drm,
17180                                             "probing HDMI on SDVOB\n");
17181                                 intel_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
17182                         }
17183
17184                         if (!found && IS_G4X(dev_priv))
17185                                 intel_dp_init(dev_priv, DP_B, PORT_B);
17186                 }
17187
17188                 /* Before G4X SDVOC doesn't have its own detect register */
17189
17190                 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
17191                         drm_dbg_kms(&dev_priv->drm, "probing SDVOC\n");
17192                         found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
17193                 }
17194
17195                 if (!found && (intel_de_read(dev_priv, GEN3_SDVOC) & SDVO_DETECTED)) {
17196
17197                         if (IS_G4X(dev_priv)) {
17198                                 drm_dbg_kms(&dev_priv->drm,
17199                                             "probing HDMI on SDVOC\n");
17200                                 intel_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
17201                         }
17202                         if (IS_G4X(dev_priv))
17203                                 intel_dp_init(dev_priv, DP_C, PORT_C);
17204                 }
17205
17206                 if (IS_G4X(dev_priv) && (intel_de_read(dev_priv, DP_D) & DP_DETECTED))
17207                         intel_dp_init(dev_priv, DP_D, PORT_D);
17208
17209                 if (SUPPORTS_TV(dev_priv))
17210                         intel_tv_init(dev_priv);
17211         } else if (IS_GEN(dev_priv, 2)) {
17212                 if (IS_I85X(dev_priv))
17213                         intel_lvds_init(dev_priv);
17214
17215                 intel_crt_init(dev_priv);
17216                 intel_dvo_init(dev_priv);
17217         }
17218
17219         intel_psr_init(dev_priv);
17220
17221         for_each_intel_encoder(&dev_priv->drm, encoder) {
17222                 encoder->base.possible_crtcs =
17223                         intel_encoder_possible_crtcs(encoder);
17224                 encoder->base.possible_clones =
17225                         intel_encoder_possible_clones(encoder);
17226         }
17227
17228         intel_init_pch_refclk(dev_priv);
17229
17230         drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
17231 }
17232
17233 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
17234 {
17235         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
17236
17237         drm_framebuffer_cleanup(fb);
17238         intel_frontbuffer_put(intel_fb->frontbuffer);
17239
17240         kfree(intel_fb);
17241 }
17242
17243 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
17244                                                 struct drm_file *file,
17245                                                 unsigned int *handle)
17246 {
17247         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
17248         struct drm_i915_private *i915 = to_i915(obj->base.dev);
17249
17250         if (obj->userptr.mm) {
17251                 drm_dbg(&i915->drm,
17252                         "attempting to use a userptr for a framebuffer, denied\n");
17253                 return -EINVAL;
17254         }
17255
17256         return drm_gem_handle_create(file, &obj->base, handle);
17257 }
17258
17259 static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
17260                                         struct drm_file *file,
17261                                         unsigned flags, unsigned color,
17262                                         struct drm_clip_rect *clips,
17263                                         unsigned num_clips)
17264 {
17265         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
17266
17267         i915_gem_object_flush_if_display(obj);
17268         intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
17269
17270         return 0;
17271 }
17272
17273 static const struct drm_framebuffer_funcs intel_fb_funcs = {
17274         .destroy = intel_user_framebuffer_destroy,
17275         .create_handle = intel_user_framebuffer_create_handle,
17276         .dirty = intel_user_framebuffer_dirty,
17277 };
17278
17279 static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
17280                                   struct drm_i915_gem_object *obj,
17281                                   struct drm_mode_fb_cmd2 *mode_cmd)
17282 {
17283         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
17284         struct drm_framebuffer *fb = &intel_fb->base;
17285         u32 max_stride;
17286         unsigned int tiling, stride;
17287         int ret = -EINVAL;
17288         int i;
17289
17290         intel_fb->frontbuffer = intel_frontbuffer_get(obj);
17291         if (!intel_fb->frontbuffer)
17292                 return -ENOMEM;
17293
17294         i915_gem_object_lock(obj);
17295         tiling = i915_gem_object_get_tiling(obj);
17296         stride = i915_gem_object_get_stride(obj);
17297         i915_gem_object_unlock(obj);
17298
17299         if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
17300                 /*
17301                  * If there's a fence, enforce that
17302                  * the fb modifier and tiling mode match.
17303                  */
17304                 if (tiling != I915_TILING_NONE &&
17305                     tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
17306                         drm_dbg_kms(&dev_priv->drm,
17307                                     "tiling_mode doesn't match fb modifier\n");
17308                         goto err;
17309                 }
17310         } else {
17311                 if (tiling == I915_TILING_X) {
17312                         mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
17313                 } else if (tiling == I915_TILING_Y) {
17314                         drm_dbg_kms(&dev_priv->drm,
17315                                     "No Y tiling for legacy addfb\n");
17316                         goto err;
17317                 }
17318         }
17319
17320         if (!drm_any_plane_has_format(&dev_priv->drm,
17321                                       mode_cmd->pixel_format,
17322                                       mode_cmd->modifier[0])) {
17323                 struct drm_format_name_buf format_name;
17324
17325                 drm_dbg_kms(&dev_priv->drm,
17326                             "unsupported pixel format %s / modifier 0x%llx\n",
17327                             drm_get_format_name(mode_cmd->pixel_format,
17328                                                 &format_name),
17329                             mode_cmd->modifier[0]);
17330                 goto err;
17331         }
17332
17333         /*
17334          * gen2/3 display engine uses the fence if present,
17335          * so the tiling mode must match the fb modifier exactly.
17336          */
17337         if (INTEL_GEN(dev_priv) < 4 &&
17338             tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
17339                 drm_dbg_kms(&dev_priv->drm,
17340                             "tiling_mode must match fb modifier exactly on gen2/3\n");
17341                 goto err;
17342         }
17343
17344         max_stride = intel_fb_max_stride(dev_priv, mode_cmd->pixel_format,
17345                                          mode_cmd->modifier[0]);
17346         if (mode_cmd->pitches[0] > max_stride) {
17347                 drm_dbg_kms(&dev_priv->drm,
17348                             "%s pitch (%u) must be at most %d\n",
17349                             mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
17350                             "tiled" : "linear",
17351                             mode_cmd->pitches[0], max_stride);
17352                 goto err;
17353         }
17354
17355         /*
17356          * If there's a fence, enforce that
17357          * the fb pitch and fence stride match.
17358          */
17359         if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
17360                 drm_dbg_kms(&dev_priv->drm,
17361                             "pitch (%d) must match tiling stride (%d)\n",
17362                             mode_cmd->pitches[0], stride);
17363                 goto err;
17364         }
17365
17366         /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
17367         if (mode_cmd->offsets[0] != 0) {
17368                 drm_dbg_kms(&dev_priv->drm,
17369                             "plane 0 offset (0x%08x) must be 0\n",
17370                             mode_cmd->offsets[0]);
17371                 goto err;
17372         }
17373
17374         drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
17375
17376         for (i = 0; i < fb->format->num_planes; i++) {
17377                 u32 stride_alignment;
17378
17379                 if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
17380                         drm_dbg_kms(&dev_priv->drm, "bad plane %d handle\n",
17381                                     i);
17382                         goto err;
17383                 }
17384
17385                 stride_alignment = intel_fb_stride_alignment(fb, i);
17386                 if (fb->pitches[i] & (stride_alignment - 1)) {
17387                         drm_dbg_kms(&dev_priv->drm,
17388                                     "plane %d pitch (%d) must be at least %u byte aligned\n",
17389                                     i, fb->pitches[i], stride_alignment);
17390                         goto err;
17391                 }
17392
17393                 if (is_gen12_ccs_plane(fb, i)) {
17394                         int ccs_aux_stride = gen12_ccs_aux_stride(fb, i);
17395
17396                         if (fb->pitches[i] != ccs_aux_stride) {
17397                                 drm_dbg_kms(&dev_priv->drm,
17398                                             "ccs aux plane %d pitch (%d) must be %d\n",
17399                                             i,
17400                                             fb->pitches[i], ccs_aux_stride);
17401                                 goto err;
17402                         }
17403                 }
17404
17405                 fb->obj[i] = &obj->base;
17406         }
17407
17408         ret = intel_fill_fb_info(dev_priv, fb);
17409         if (ret)
17410                 goto err;
17411
17412         ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs);
17413         if (ret) {
17414                 drm_err(&dev_priv->drm, "framebuffer init failed %d\n", ret);
17415                 goto err;
17416         }
17417
17418         return 0;
17419
17420 err:
17421         intel_frontbuffer_put(intel_fb->frontbuffer);
17422         return ret;
17423 }
17424
17425 static struct drm_framebuffer *
17426 intel_user_framebuffer_create(struct drm_device *dev,
17427                               struct drm_file *filp,
17428                               const struct drm_mode_fb_cmd2 *user_mode_cmd)
17429 {
17430         struct drm_framebuffer *fb;
17431         struct drm_i915_gem_object *obj;
17432         struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
17433
17434         obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
17435         if (!obj)
17436                 return ERR_PTR(-ENOENT);
17437
17438         fb = intel_framebuffer_create(obj, &mode_cmd);
17439         i915_gem_object_put(obj);
17440
17441         return fb;
17442 }
17443
17444 static enum drm_mode_status
17445 intel_mode_valid(struct drm_device *dev,
17446                  const struct drm_display_mode *mode)
17447 {
17448         struct drm_i915_private *dev_priv = to_i915(dev);
17449         int hdisplay_max, htotal_max;
17450         int vdisplay_max, vtotal_max;
17451
17452         /*
17453          * Can't reject DBLSCAN here because Xorg ddxen can add piles
17454          * of DBLSCAN modes to the output's mode list when they detect
17455          * the scaling mode property on the connector. And they don't
17456          * ask the kernel to validate those modes in any way until
17457          * modeset time at which point the client gets a protocol error.
17458          * So in order to not upset those clients we silently ignore the
17459          * DBLSCAN flag on such connectors. For other connectors we will
17460          * reject modes with the DBLSCAN flag in encoder->compute_config().
17461          * And we always reject DBLSCAN modes in connector->mode_valid()
17462          * as we never want such modes on the connector's mode list.
17463          */
17464
17465         if (mode->vscan > 1)
17466                 return MODE_NO_VSCAN;
17467
17468         if (mode->flags & DRM_MODE_FLAG_HSKEW)
17469                 return MODE_H_ILLEGAL;
17470
17471         if (mode->flags & (DRM_MODE_FLAG_CSYNC |
17472                            DRM_MODE_FLAG_NCSYNC |
17473                            DRM_MODE_FLAG_PCSYNC))
17474                 return MODE_HSYNC;
17475
17476         if (mode->flags & (DRM_MODE_FLAG_BCAST |
17477                            DRM_MODE_FLAG_PIXMUX |
17478                            DRM_MODE_FLAG_CLKDIV2))
17479                 return MODE_BAD;
17480
17481         /* Transcoder timing limits */
17482         if (INTEL_GEN(dev_priv) >= 11) {
17483                 hdisplay_max = 16384;
17484                 vdisplay_max = 8192;
17485                 htotal_max = 16384;
17486                 vtotal_max = 8192;
17487         } else if (INTEL_GEN(dev_priv) >= 9 ||
17488                    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
17489                 hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
17490                 vdisplay_max = 4096;
17491                 htotal_max = 8192;
17492                 vtotal_max = 8192;
17493         } else if (INTEL_GEN(dev_priv) >= 3) {
17494                 hdisplay_max = 4096;
17495                 vdisplay_max = 4096;
17496                 htotal_max = 8192;
17497                 vtotal_max = 8192;
17498         } else {
17499                 hdisplay_max = 2048;
17500                 vdisplay_max = 2048;
17501                 htotal_max = 4096;
17502                 vtotal_max = 4096;
17503         }
17504
17505         if (mode->hdisplay > hdisplay_max ||
17506             mode->hsync_start > htotal_max ||
17507             mode->hsync_end > htotal_max ||
17508             mode->htotal > htotal_max)
17509                 return MODE_H_ILLEGAL;
17510
17511         if (mode->vdisplay > vdisplay_max ||
17512             mode->vsync_start > vtotal_max ||
17513             mode->vsync_end > vtotal_max ||
17514             mode->vtotal > vtotal_max)
17515                 return MODE_V_ILLEGAL;
17516
17517         if (INTEL_GEN(dev_priv) >= 5) {
17518                 if (mode->hdisplay < 64 ||
17519                     mode->htotal - mode->hdisplay < 32)
17520                         return MODE_H_ILLEGAL;
17521
17522                 if (mode->vtotal - mode->vdisplay < 5)
17523                         return MODE_V_ILLEGAL;
17524         } else {
17525                 if (mode->htotal - mode->hdisplay < 32)
17526                         return MODE_H_ILLEGAL;
17527
17528                 if (mode->vtotal - mode->vdisplay < 3)
17529                         return MODE_V_ILLEGAL;
17530         }
17531
17532         return MODE_OK;
17533 }
17534
17535 enum drm_mode_status
17536 intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
17537                                 const struct drm_display_mode *mode)
17538 {
17539         int plane_width_max, plane_height_max;
17540
17541         /*
17542          * intel_mode_valid() should be
17543          * sufficient on older platforms.
17544          */
17545         if (INTEL_GEN(dev_priv) < 9)
17546                 return MODE_OK;
17547
17548         /*
17549          * Most people will probably want a fullscreen
17550          * plane so let's not advertize modes that are
17551          * too big for that.
17552          */
17553         if (INTEL_GEN(dev_priv) >= 11) {
17554                 plane_width_max = 5120;
17555                 plane_height_max = 4320;
17556         } else {
17557                 plane_width_max = 5120;
17558                 plane_height_max = 4096;
17559         }
17560
17561         if (mode->hdisplay > plane_width_max)
17562                 return MODE_H_ILLEGAL;
17563
17564         if (mode->vdisplay > plane_height_max)
17565                 return MODE_V_ILLEGAL;
17566
17567         return MODE_OK;
17568 }
17569
17570 static const struct drm_mode_config_funcs intel_mode_funcs = {
17571         .fb_create = intel_user_framebuffer_create,
17572         .get_format_info = intel_get_format_info,
17573         .output_poll_changed = intel_fbdev_output_poll_changed,
17574         .mode_valid = intel_mode_valid,
17575         .atomic_check = intel_atomic_check,
17576         .atomic_commit = intel_atomic_commit,
17577         .atomic_state_alloc = intel_atomic_state_alloc,
17578         .atomic_state_clear = intel_atomic_state_clear,
17579         .atomic_state_free = intel_atomic_state_free,
17580 };
17581
17582 /**
17583  * intel_init_display_hooks - initialize the display modesetting hooks
17584  * @dev_priv: device private
17585  */
17586 void intel_init_display_hooks(struct drm_i915_private *dev_priv)
17587 {
17588         intel_init_cdclk_hooks(dev_priv);
17589
17590         if (INTEL_GEN(dev_priv) >= 9) {
17591                 dev_priv->display.get_pipe_config = hsw_get_pipe_config;
17592                 dev_priv->display.get_initial_plane_config =
17593                         skl_get_initial_plane_config;
17594                 dev_priv->display.crtc_compute_clock = hsw_crtc_compute_clock;
17595                 dev_priv->display.crtc_enable = hsw_crtc_enable;
17596                 dev_priv->display.crtc_disable = hsw_crtc_disable;
17597         } else if (HAS_DDI(dev_priv)) {
17598                 dev_priv->display.get_pipe_config = hsw_get_pipe_config;
17599                 dev_priv->display.get_initial_plane_config =
17600                         i9xx_get_initial_plane_config;
17601                 dev_priv->display.crtc_compute_clock =
17602                         hsw_crtc_compute_clock;
17603                 dev_priv->display.crtc_enable = hsw_crtc_enable;
17604                 dev_priv->display.crtc_disable = hsw_crtc_disable;
17605         } else if (HAS_PCH_SPLIT(dev_priv)) {
17606                 dev_priv->display.get_pipe_config = ilk_get_pipe_config;
17607                 dev_priv->display.get_initial_plane_config =
17608                         i9xx_get_initial_plane_config;
17609                 dev_priv->display.crtc_compute_clock =
17610                         ilk_crtc_compute_clock;
17611                 dev_priv->display.crtc_enable = ilk_crtc_enable;
17612                 dev_priv->display.crtc_disable = ilk_crtc_disable;
17613         } else if (IS_CHERRYVIEW(dev_priv)) {
17614                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
17615                 dev_priv->display.get_initial_plane_config =
17616                         i9xx_get_initial_plane_config;
17617                 dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock;
17618                 dev_priv->display.crtc_enable = valleyview_crtc_enable;
17619                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
17620         } else if (IS_VALLEYVIEW(dev_priv)) {
17621                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
17622                 dev_priv->display.get_initial_plane_config =
17623                         i9xx_get_initial_plane_config;
17624                 dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock;
17625                 dev_priv->display.crtc_enable = valleyview_crtc_enable;
17626                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
17627         } else if (IS_G4X(dev_priv)) {
17628                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
17629                 dev_priv->display.get_initial_plane_config =
17630                         i9xx_get_initial_plane_config;
17631                 dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock;
17632                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
17633                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
17634         } else if (IS_PINEVIEW(dev_priv)) {
17635                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
17636                 dev_priv->display.get_initial_plane_config =
17637                         i9xx_get_initial_plane_config;
17638                 dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
17639                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
17640                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
17641         } else if (!IS_GEN(dev_priv, 2)) {
17642                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
17643                 dev_priv->display.get_initial_plane_config =
17644                         i9xx_get_initial_plane_config;
17645                 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
17646                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
17647                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
17648         } else {
17649                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
17650                 dev_priv->display.get_initial_plane_config =
17651                         i9xx_get_initial_plane_config;
17652                 dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
17653                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
17654                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
17655         }
17656
17657         if (IS_GEN(dev_priv, 5)) {
17658                 dev_priv->display.fdi_link_train = ilk_fdi_link_train;
17659         } else if (IS_GEN(dev_priv, 6)) {
17660                 dev_priv->display.fdi_link_train = gen6_fdi_link_train;
17661         } else if (IS_IVYBRIDGE(dev_priv)) {
17662                 /* FIXME: detect B0+ stepping and use auto training */
17663                 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
17664         }
17665
17666         if (INTEL_GEN(dev_priv) >= 9)
17667                 dev_priv->display.commit_modeset_enables = skl_commit_modeset_enables;
17668         else
17669                 dev_priv->display.commit_modeset_enables = intel_commit_modeset_enables;
17670
17671 }
17672
17673 void intel_modeset_init_hw(struct drm_i915_private *i915)
17674 {
17675         struct intel_cdclk_state *cdclk_state =
17676                 to_intel_cdclk_state(i915->cdclk.obj.state);
17677
17678         intel_update_cdclk(i915);
17679         intel_dump_cdclk_config(&i915->cdclk.hw, "Current CDCLK");
17680         cdclk_state->logical = cdclk_state->actual = i915->cdclk.hw;
17681 }
17682
17683 static int sanitize_watermarks_add_affected(struct drm_atomic_state *state)
17684 {
17685         struct drm_plane *plane;
17686         struct drm_crtc *crtc;
17687
17688         drm_for_each_crtc(crtc, state->dev) {
17689                 struct drm_crtc_state *crtc_state;
17690
17691                 crtc_state = drm_atomic_get_crtc_state(state, crtc);
17692                 if (IS_ERR(crtc_state))
17693                         return PTR_ERR(crtc_state);
17694         }
17695
17696         drm_for_each_plane(plane, state->dev) {
17697                 struct drm_plane_state *plane_state;
17698
17699                 plane_state = drm_atomic_get_plane_state(state, plane);
17700                 if (IS_ERR(plane_state))
17701                         return PTR_ERR(plane_state);
17702         }
17703
17704         return 0;
17705 }
17706
17707 /*
17708  * Calculate what we think the watermarks should be for the state we've read
17709  * out of the hardware and then immediately program those watermarks so that
17710  * we ensure the hardware settings match our internal state.
17711  *
17712  * We can calculate what we think WM's should be by creating a duplicate of the
17713  * current state (which was constructed during hardware readout) and running it
17714  * through the atomic check code to calculate new watermark values in the
17715  * state object.
17716  */
17717 static void sanitize_watermarks(struct drm_i915_private *dev_priv)
17718 {
17719         struct drm_atomic_state *state;
17720         struct intel_atomic_state *intel_state;
17721         struct intel_crtc *crtc;
17722         struct intel_crtc_state *crtc_state;
17723         struct drm_modeset_acquire_ctx ctx;
17724         int ret;
17725         int i;
17726
17727         /* Only supported on platforms that use atomic watermark design */
17728         if (!dev_priv->display.optimize_watermarks)
17729                 return;
17730
17731         state = drm_atomic_state_alloc(&dev_priv->drm);
17732         if (WARN_ON(!state))
17733                 return;
17734
17735         intel_state = to_intel_atomic_state(state);
17736
17737         drm_modeset_acquire_init(&ctx, 0);
17738
17739 retry:
17740         state->acquire_ctx = &ctx;
17741
17742         /*
17743          * Hardware readout is the only time we don't want to calculate
17744          * intermediate watermarks (since we don't trust the current
17745          * watermarks).
17746          */
17747         if (!HAS_GMCH(dev_priv))
17748                 intel_state->skip_intermediate_wm = true;
17749
17750         ret = sanitize_watermarks_add_affected(state);
17751         if (ret)
17752                 goto fail;
17753
17754         ret = intel_atomic_check(&dev_priv->drm, state);
17755         if (ret)
17756                 goto fail;
17757
17758         /* Write calculated watermark values back */
17759         for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
17760                 crtc_state->wm.need_postvbl_update = true;
17761                 dev_priv->display.optimize_watermarks(intel_state, crtc);
17762
17763                 to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm;
17764         }
17765
17766 fail:
17767         if (ret == -EDEADLK) {
17768                 drm_atomic_state_clear(state);
17769                 drm_modeset_backoff(&ctx);
17770                 goto retry;
17771         }
17772
17773         /*
17774          * If we fail here, it means that the hardware appears to be
17775          * programmed in a way that shouldn't be possible, given our
17776          * understanding of watermark requirements.  This might mean a
17777          * mistake in the hardware readout code or a mistake in the
17778          * watermark calculations for a given platform.  Raise a WARN
17779          * so that this is noticeable.
17780          *
17781          * If this actually happens, we'll have to just leave the
17782          * BIOS-programmed watermarks untouched and hope for the best.
17783          */
17784         WARN(ret, "Could not determine valid watermarks for inherited state\n");
17785
17786         drm_atomic_state_put(state);
17787
17788         drm_modeset_drop_locks(&ctx);
17789         drm_modeset_acquire_fini(&ctx);
17790 }
17791
17792 static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
17793 {
17794         if (IS_GEN(dev_priv, 5)) {
17795                 u32 fdi_pll_clk =
17796                         intel_de_read(dev_priv, FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
17797
17798                 dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
17799         } else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) {
17800                 dev_priv->fdi_pll_freq = 270000;
17801         } else {
17802                 return;
17803         }
17804
17805         drm_dbg(&dev_priv->drm, "FDI PLL freq=%d\n", dev_priv->fdi_pll_freq);
17806 }
17807
17808 static int intel_initial_commit(struct drm_device *dev)
17809 {
17810         struct drm_atomic_state *state = NULL;
17811         struct drm_modeset_acquire_ctx ctx;
17812         struct intel_crtc *crtc;
17813         int ret = 0;
17814
17815         state = drm_atomic_state_alloc(dev);
17816         if (!state)
17817                 return -ENOMEM;
17818
17819         drm_modeset_acquire_init(&ctx, 0);
17820
17821 retry:
17822         state->acquire_ctx = &ctx;
17823
17824         for_each_intel_crtc(dev, crtc) {
17825                 struct intel_crtc_state *crtc_state =
17826                         intel_atomic_get_crtc_state(state, crtc);
17827
17828                 if (IS_ERR(crtc_state)) {
17829                         ret = PTR_ERR(crtc_state);
17830                         goto out;
17831                 }
17832
17833                 if (crtc_state->hw.active) {
17834                         ret = drm_atomic_add_affected_planes(state, &crtc->base);
17835                         if (ret)
17836                                 goto out;
17837
17838                         /*
17839                          * FIXME hack to force a LUT update to avoid the
17840                          * plane update forcing the pipe gamma on without
17841                          * having a proper LUT loaded. Remove once we
17842                          * have readout for pipe gamma enable.
17843                          */
17844                         crtc_state->uapi.color_mgmt_changed = true;
17845
17846                         /*
17847                          * FIXME hack to force full modeset when DSC is being
17848                          * used.
17849                          *
17850                          * As long as we do not have full state readout and
17851                          * config comparison of crtc_state->dsc, we have no way
17852                          * to ensure reliable fastset. Remove once we have
17853                          * readout for DSC.
17854                          */
17855                         if (crtc_state->dsc.compression_enable) {
17856                                 ret = drm_atomic_add_affected_connectors(state,
17857                                                                          &crtc->base);
17858                                 if (ret)
17859                                         goto out;
17860                                 crtc_state->uapi.mode_changed = true;
17861                                 drm_dbg_kms(dev, "Force full modeset for DSC\n");
17862                         }
17863                 }
17864         }
17865
17866         ret = drm_atomic_commit(state);
17867
17868 out:
17869         if (ret == -EDEADLK) {
17870                 drm_atomic_state_clear(state);
17871                 drm_modeset_backoff(&ctx);
17872                 goto retry;
17873         }
17874
17875         drm_atomic_state_put(state);
17876
17877         drm_modeset_drop_locks(&ctx);
17878         drm_modeset_acquire_fini(&ctx);
17879
17880         return ret;
17881 }
17882
17883 static void intel_mode_config_init(struct drm_i915_private *i915)
17884 {
17885         struct drm_mode_config *mode_config = &i915->drm.mode_config;
17886
17887         drm_mode_config_init(&i915->drm);
17888         INIT_LIST_HEAD(&i915->global_obj_list);
17889
17890         mode_config->min_width = 0;
17891         mode_config->min_height = 0;
17892
17893         mode_config->preferred_depth = 24;
17894         mode_config->prefer_shadow = 1;
17895
17896         mode_config->allow_fb_modifiers = true;
17897
17898         mode_config->funcs = &intel_mode_funcs;
17899
17900         /*
17901          * Maximum framebuffer dimensions, chosen to match
17902          * the maximum render engine surface size on gen4+.
17903          */
17904         if (INTEL_GEN(i915) >= 7) {
17905                 mode_config->max_width = 16384;
17906                 mode_config->max_height = 16384;
17907         } else if (INTEL_GEN(i915) >= 4) {
17908                 mode_config->max_width = 8192;
17909                 mode_config->max_height = 8192;
17910         } else if (IS_GEN(i915, 3)) {
17911                 mode_config->max_width = 4096;
17912                 mode_config->max_height = 4096;
17913         } else {
17914                 mode_config->max_width = 2048;
17915                 mode_config->max_height = 2048;
17916         }
17917
17918         if (IS_I845G(i915) || IS_I865G(i915)) {
17919                 mode_config->cursor_width = IS_I845G(i915) ? 64 : 512;
17920                 mode_config->cursor_height = 1023;
17921         } else if (IS_GEN(i915, 2)) {
17922                 mode_config->cursor_width = 64;
17923                 mode_config->cursor_height = 64;
17924         } else {
17925                 mode_config->cursor_width = 256;
17926                 mode_config->cursor_height = 256;
17927         }
17928 }
17929
17930 static void intel_mode_config_cleanup(struct drm_i915_private *i915)
17931 {
17932         intel_atomic_global_obj_cleanup(i915);
17933         drm_mode_config_cleanup(&i915->drm);
17934 }
17935
17936 static void plane_config_fini(struct intel_initial_plane_config *plane_config)
17937 {
17938         if (plane_config->fb) {
17939                 struct drm_framebuffer *fb = &plane_config->fb->base;
17940
17941                 /* We may only have the stub and not a full framebuffer */
17942                 if (drm_framebuffer_read_refcount(fb))
17943                         drm_framebuffer_put(fb);
17944                 else
17945                         kfree(fb);
17946         }
17947
17948         if (plane_config->vma)
17949                 i915_vma_put(plane_config->vma);
17950 }
17951
17952 int intel_modeset_init(struct drm_i915_private *i915)
17953 {
17954         struct drm_device *dev = &i915->drm;
17955         enum pipe pipe;
17956         struct intel_crtc *crtc;
17957         int ret;
17958
17959         i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
17960         i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI |
17961                                         WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
17962
17963         intel_mode_config_init(i915);
17964
17965         ret = intel_cdclk_init(i915);
17966         if (ret)
17967                 return ret;
17968
17969         ret = intel_bw_init(i915);
17970         if (ret)
17971                 return ret;
17972
17973         init_llist_head(&i915->atomic_helper.free_list);
17974         INIT_WORK(&i915->atomic_helper.free_work,
17975                   intel_atomic_helper_free_state_worker);
17976
17977         intel_init_quirks(i915);
17978
17979         intel_fbc_init(i915);
17980
17981         intel_init_pm(i915);
17982
17983         intel_panel_sanitize_ssc(i915);
17984
17985         intel_gmbus_setup(i915);
17986
17987         drm_dbg_kms(&i915->drm, "%d display pipe%s available.\n",
17988                     INTEL_NUM_PIPES(i915),
17989                     INTEL_NUM_PIPES(i915) > 1 ? "s" : "");
17990
17991         if (HAS_DISPLAY(i915) && INTEL_DISPLAY_ENABLED(i915)) {
17992                 for_each_pipe(i915, pipe) {
17993                         ret = intel_crtc_init(i915, pipe);
17994                         if (ret) {
17995                                 intel_mode_config_cleanup(i915);
17996                                 return ret;
17997                         }
17998                 }
17999         }
18000
18001         intel_shared_dpll_init(dev);
18002         intel_update_fdi_pll_freq(i915);
18003
18004         intel_update_czclk(i915);
18005         intel_modeset_init_hw(i915);
18006
18007         intel_hdcp_component_init(i915);
18008
18009         if (i915->max_cdclk_freq == 0)
18010                 intel_update_max_cdclk(i915);
18011
18012         /* Just disable it once at startup */
18013         intel_vga_disable(i915);
18014         intel_setup_outputs(i915);
18015
18016         drm_modeset_lock_all(dev);
18017         intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
18018         drm_modeset_unlock_all(dev);
18019
18020         for_each_intel_crtc(dev, crtc) {
18021                 struct intel_initial_plane_config plane_config = {};
18022
18023                 if (!crtc->active)
18024                         continue;
18025
18026                 /*
18027                  * Note that reserving the BIOS fb up front prevents us
18028                  * from stuffing other stolen allocations like the ring
18029                  * on top.  This prevents some ugliness at boot time, and
18030                  * can even allow for smooth boot transitions if the BIOS
18031                  * fb is large enough for the active pipe configuration.
18032                  */
18033                 i915->display.get_initial_plane_config(crtc, &plane_config);
18034
18035                 /*
18036                  * If the fb is shared between multiple heads, we'll
18037                  * just get the first one.
18038                  */
18039                 intel_find_initial_plane_obj(crtc, &plane_config);
18040
18041                 plane_config_fini(&plane_config);
18042         }
18043
18044         /*
18045          * Make sure hardware watermarks really match the state we read out.
18046          * Note that we need to do this after reconstructing the BIOS fb's
18047          * since the watermark calculation done here will use pstate->fb.
18048          */
18049         if (!HAS_GMCH(i915))
18050                 sanitize_watermarks(i915);
18051
18052         /*
18053          * Force all active planes to recompute their states. So that on
18054          * mode_setcrtc after probe, all the intel_plane_state variables
18055          * are already calculated and there is no assert_plane warnings
18056          * during bootup.
18057          */
18058         ret = intel_initial_commit(dev);
18059         if (ret)
18060                 drm_dbg_kms(&i915->drm, "Initial commit in probe failed.\n");
18061
18062         return 0;
18063 }
18064
18065 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
18066 {
18067         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
18068         /* 640x480@60Hz, ~25175 kHz */
18069         struct dpll clock = {
18070                 .m1 = 18,
18071                 .m2 = 7,
18072                 .p1 = 13,
18073                 .p2 = 4,
18074                 .n = 2,
18075         };
18076         u32 dpll, fp;
18077         int i;
18078
18079         WARN_ON(i9xx_calc_dpll_params(48000, &clock) != 25154);
18080
18081         drm_dbg_kms(&dev_priv->drm,
18082                     "enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
18083                     pipe_name(pipe), clock.vco, clock.dot);
18084
18085         fp = i9xx_dpll_compute_fp(&clock);
18086         dpll = DPLL_DVO_2X_MODE |
18087                 DPLL_VGA_MODE_DIS |
18088                 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
18089                 PLL_P2_DIVIDE_BY_4 |
18090                 PLL_REF_INPUT_DREFCLK |
18091                 DPLL_VCO_ENABLE;
18092
18093         intel_de_write(dev_priv, FP0(pipe), fp);
18094         intel_de_write(dev_priv, FP1(pipe), fp);
18095
18096         intel_de_write(dev_priv, HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
18097         intel_de_write(dev_priv, HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
18098         intel_de_write(dev_priv, HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
18099         intel_de_write(dev_priv, VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
18100         intel_de_write(dev_priv, VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
18101         intel_de_write(dev_priv, VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
18102         intel_de_write(dev_priv, PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
18103
18104         /*
18105          * Apparently we need to have VGA mode enabled prior to changing
18106          * the P1/P2 dividers. Otherwise the DPLL will keep using the old
18107          * dividers, even though the register value does change.
18108          */
18109         intel_de_write(dev_priv, DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
18110         intel_de_write(dev_priv, DPLL(pipe), dpll);
18111
18112         /* Wait for the clocks to stabilize. */
18113         intel_de_posting_read(dev_priv, DPLL(pipe));
18114         udelay(150);
18115
18116         /* The pixel multiplier can only be updated once the
18117          * DPLL is enabled and the clocks are stable.
18118          *
18119          * So write it again.
18120          */
18121         intel_de_write(dev_priv, DPLL(pipe), dpll);
18122
18123         /* We do this three times for luck */
18124         for (i = 0; i < 3 ; i++) {
18125                 intel_de_write(dev_priv, DPLL(pipe), dpll);
18126                 intel_de_posting_read(dev_priv, DPLL(pipe));
18127                 udelay(150); /* wait for warmup */
18128         }
18129
18130         intel_de_write(dev_priv, PIPECONF(pipe),
18131                        PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
18132         intel_de_posting_read(dev_priv, PIPECONF(pipe));
18133
18134         intel_wait_for_pipe_scanline_moving(crtc);
18135 }
18136
18137 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
18138 {
18139         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
18140
18141         drm_dbg_kms(&dev_priv->drm, "disabling pipe %c due to force quirk\n",
18142                     pipe_name(pipe));
18143
18144         WARN_ON(intel_de_read(dev_priv, DSPCNTR(PLANE_A)) & DISPLAY_PLANE_ENABLE);
18145         WARN_ON(intel_de_read(dev_priv, DSPCNTR(PLANE_B)) & DISPLAY_PLANE_ENABLE);
18146         WARN_ON(intel_de_read(dev_priv, DSPCNTR(PLANE_C)) & DISPLAY_PLANE_ENABLE);
18147         WARN_ON(intel_de_read(dev_priv, CURCNTR(PIPE_A)) & MCURSOR_MODE);
18148         WARN_ON(intel_de_read(dev_priv, CURCNTR(PIPE_B)) & MCURSOR_MODE);
18149
18150         intel_de_write(dev_priv, PIPECONF(pipe), 0);
18151         intel_de_posting_read(dev_priv, PIPECONF(pipe));
18152
18153         intel_wait_for_pipe_scanline_stopped(crtc);
18154
18155         intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS);
18156         intel_de_posting_read(dev_priv, DPLL(pipe));
18157 }
18158
18159 static void
18160 intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
18161 {
18162         struct intel_crtc *crtc;
18163
18164         if (INTEL_GEN(dev_priv) >= 4)
18165                 return;
18166
18167         for_each_intel_crtc(&dev_priv->drm, crtc) {
18168                 struct intel_plane *plane =
18169                         to_intel_plane(crtc->base.primary);
18170                 struct intel_crtc *plane_crtc;
18171                 enum pipe pipe;
18172
18173                 if (!plane->get_hw_state(plane, &pipe))
18174                         continue;
18175
18176                 if (pipe == crtc->pipe)
18177                         continue;
18178
18179                 drm_dbg_kms(&dev_priv->drm,
18180                             "[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
18181                             plane->base.base.id, plane->base.name);
18182
18183                 plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
18184                 intel_plane_disable_noatomic(plane_crtc, plane);
18185         }
18186 }
18187
18188 static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
18189 {
18190         struct drm_device *dev = crtc->base.dev;
18191         struct intel_encoder *encoder;
18192
18193         for_each_encoder_on_crtc(dev, &crtc->base, encoder)
18194                 return true;
18195
18196         return false;
18197 }
18198
18199 static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
18200 {
18201         struct drm_device *dev = encoder->base.dev;
18202         struct intel_connector *connector;
18203
18204         for_each_connector_on_encoder(dev, &encoder->base, connector)
18205                 return connector;
18206
18207         return NULL;
18208 }
18209
18210 static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
18211                               enum pipe pch_transcoder)
18212 {
18213         return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
18214                 (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A);
18215 }
18216
18217 static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc_state)
18218 {
18219         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
18220         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
18221         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
18222
18223         if (INTEL_GEN(dev_priv) >= 9 ||
18224             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
18225                 i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder);
18226                 u32 val;
18227
18228                 if (transcoder_is_dsi(cpu_transcoder))
18229                         return;
18230
18231                 val = intel_de_read(dev_priv, reg);
18232                 val &= ~HSW_FRAME_START_DELAY_MASK;
18233                 val |= HSW_FRAME_START_DELAY(0);
18234                 intel_de_write(dev_priv, reg, val);
18235         } else {
18236                 i915_reg_t reg = PIPECONF(cpu_transcoder);
18237                 u32 val;
18238
18239                 val = intel_de_read(dev_priv, reg);
18240                 val &= ~PIPECONF_FRAME_START_DELAY_MASK;
18241                 val |= PIPECONF_FRAME_START_DELAY(0);
18242                 intel_de_write(dev_priv, reg, val);
18243         }
18244
18245         if (!crtc_state->has_pch_encoder)
18246                 return;
18247
18248         if (HAS_PCH_IBX(dev_priv)) {
18249                 i915_reg_t reg = PCH_TRANSCONF(crtc->pipe);
18250                 u32 val;
18251
18252                 val = intel_de_read(dev_priv, reg);
18253                 val &= ~TRANS_FRAME_START_DELAY_MASK;
18254                 val |= TRANS_FRAME_START_DELAY(0);
18255                 intel_de_write(dev_priv, reg, val);
18256         } else {
18257                 enum pipe pch_transcoder = intel_crtc_pch_transcoder(crtc);
18258                 i915_reg_t reg = TRANS_CHICKEN2(pch_transcoder);
18259                 u32 val;
18260
18261                 val = intel_de_read(dev_priv, reg);
18262                 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
18263                 val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
18264                 intel_de_write(dev_priv, reg, val);
18265         }
18266 }
18267
18268 static void intel_sanitize_crtc(struct intel_crtc *crtc,
18269                                 struct drm_modeset_acquire_ctx *ctx)
18270 {
18271         struct drm_device *dev = crtc->base.dev;
18272         struct drm_i915_private *dev_priv = to_i915(dev);
18273         struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
18274
18275         if (crtc_state->hw.active) {
18276                 struct intel_plane *plane;
18277
18278                 /* Clear any frame start delays used for debugging left by the BIOS */
18279                 intel_sanitize_frame_start_delay(crtc_state);
18280
18281                 /* Disable everything but the primary plane */
18282                 for_each_intel_plane_on_crtc(dev, crtc, plane) {
18283                         const struct intel_plane_state *plane_state =
18284                                 to_intel_plane_state(plane->base.state);
18285
18286                         if (plane_state->uapi.visible &&
18287                             plane->base.type != DRM_PLANE_TYPE_PRIMARY)
18288                                 intel_plane_disable_noatomic(crtc, plane);
18289                 }
18290
18291                 /*
18292                  * Disable any background color set by the BIOS, but enable the
18293                  * gamma and CSC to match how we program our planes.
18294                  */
18295                 if (INTEL_GEN(dev_priv) >= 9)
18296                         intel_de_write(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe),
18297                                        SKL_BOTTOM_COLOR_GAMMA_ENABLE | SKL_BOTTOM_COLOR_CSC_ENABLE);
18298         }
18299
18300         /* Adjust the state of the output pipe according to whether we
18301          * have active connectors/encoders. */
18302         if (crtc_state->hw.active && !intel_crtc_has_encoders(crtc))
18303                 intel_crtc_disable_noatomic(crtc, ctx);
18304
18305         if (crtc_state->hw.active || HAS_GMCH(dev_priv)) {
18306                 /*
18307                  * We start out with underrun reporting disabled to avoid races.
18308                  * For correct bookkeeping mark this on active crtcs.
18309                  *
18310                  * Also on gmch platforms we dont have any hardware bits to
18311                  * disable the underrun reporting. Which means we need to start
18312                  * out with underrun reporting disabled also on inactive pipes,
18313                  * since otherwise we'll complain about the garbage we read when
18314                  * e.g. coming up after runtime pm.
18315                  *
18316                  * No protection against concurrent access is required - at
18317                  * worst a fifo underrun happens which also sets this to false.
18318                  */
18319                 crtc->cpu_fifo_underrun_disabled = true;
18320                 /*
18321                  * We track the PCH trancoder underrun reporting state
18322                  * within the crtc. With crtc for pipe A housing the underrun
18323                  * reporting state for PCH transcoder A, crtc for pipe B housing
18324                  * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
18325                  * and marking underrun reporting as disabled for the non-existing
18326                  * PCH transcoders B and C would prevent enabling the south
18327                  * error interrupt (see cpt_can_enable_serr_int()).
18328                  */
18329                 if (has_pch_trancoder(dev_priv, crtc->pipe))
18330                         crtc->pch_fifo_underrun_disabled = true;
18331         }
18332 }
18333
18334 static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
18335 {
18336         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
18337
18338         /*
18339          * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
18340          * the hardware when a high res displays plugged in. DPLL P
18341          * divider is zero, and the pipe timings are bonkers. We'll
18342          * try to disable everything in that case.
18343          *
18344          * FIXME would be nice to be able to sanitize this state
18345          * without several WARNs, but for now let's take the easy
18346          * road.
18347          */
18348         return IS_GEN(dev_priv, 6) &&
18349                 crtc_state->hw.active &&
18350                 crtc_state->shared_dpll &&
18351                 crtc_state->port_clock == 0;
18352 }
18353
18354 static void intel_sanitize_encoder(struct intel_encoder *encoder)
18355 {
18356         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
18357         struct intel_connector *connector;
18358         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
18359         struct intel_crtc_state *crtc_state = crtc ?
18360                 to_intel_crtc_state(crtc->base.state) : NULL;
18361
18362         /* We need to check both for a crtc link (meaning that the
18363          * encoder is active and trying to read from a pipe) and the
18364          * pipe itself being active. */
18365         bool has_active_crtc = crtc_state &&
18366                 crtc_state->hw.active;
18367
18368         if (crtc_state && has_bogus_dpll_config(crtc_state)) {
18369                 drm_dbg_kms(&dev_priv->drm,
18370                             "BIOS has misprogrammed the hardware. Disabling pipe %c\n",
18371                             pipe_name(crtc->pipe));
18372                 has_active_crtc = false;
18373         }
18374
18375         connector = intel_encoder_find_connector(encoder);
18376         if (connector && !has_active_crtc) {
18377                 drm_dbg_kms(&dev_priv->drm,
18378                             "[ENCODER:%d:%s] has active connectors but no active pipe!\n",
18379                             encoder->base.base.id,
18380                             encoder->base.name);
18381
18382                 /* Connector is active, but has no active pipe. This is
18383                  * fallout from our resume register restoring. Disable
18384                  * the encoder manually again. */
18385                 if (crtc_state) {
18386                         struct drm_encoder *best_encoder;
18387
18388                         drm_dbg_kms(&dev_priv->drm,
18389                                     "[ENCODER:%d:%s] manually disabled\n",
18390                                     encoder->base.base.id,
18391                                     encoder->base.name);
18392
18393                         /* avoid oopsing in case the hooks consult best_encoder */
18394                         best_encoder = connector->base.state->best_encoder;
18395                         connector->base.state->best_encoder = &encoder->base;
18396
18397                         if (encoder->disable)
18398                                 encoder->disable(encoder, crtc_state,
18399                                                  connector->base.state);
18400                         if (encoder->post_disable)
18401                                 encoder->post_disable(encoder, crtc_state,
18402                                                       connector->base.state);
18403
18404                         connector->base.state->best_encoder = best_encoder;
18405                 }
18406                 encoder->base.crtc = NULL;
18407
18408                 /* Inconsistent output/port/pipe state happens presumably due to
18409                  * a bug in one of the get_hw_state functions. Or someplace else
18410                  * in our code, like the register restore mess on resume. Clamp
18411                  * things to off as a safer default. */
18412
18413                 connector->base.dpms = DRM_MODE_DPMS_OFF;
18414                 connector->base.encoder = NULL;
18415         }
18416
18417         /* notify opregion of the sanitized encoder state */
18418         intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
18419
18420         if (INTEL_GEN(dev_priv) >= 11)
18421                 icl_sanitize_encoder_pll_mapping(encoder);
18422 }
18423
18424 /* FIXME read out full plane state for all planes */
18425 static void readout_plane_state(struct drm_i915_private *dev_priv)
18426 {
18427         struct intel_plane *plane;
18428         struct intel_crtc *crtc;
18429
18430         for_each_intel_plane(&dev_priv->drm, plane) {
18431                 struct intel_plane_state *plane_state =
18432                         to_intel_plane_state(plane->base.state);
18433                 struct intel_crtc_state *crtc_state;
18434                 enum pipe pipe = PIPE_A;
18435                 bool visible;
18436
18437                 visible = plane->get_hw_state(plane, &pipe);
18438
18439                 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
18440                 crtc_state = to_intel_crtc_state(crtc->base.state);
18441
18442                 intel_set_plane_visible(crtc_state, plane_state, visible);
18443
18444                 drm_dbg_kms(&dev_priv->drm,
18445                             "[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
18446                             plane->base.base.id, plane->base.name,
18447                             enableddisabled(visible), pipe_name(pipe));
18448         }
18449
18450         for_each_intel_crtc(&dev_priv->drm, crtc) {
18451                 struct intel_crtc_state *crtc_state =
18452                         to_intel_crtc_state(crtc->base.state);
18453
18454                 fixup_active_planes(crtc_state);
18455         }
18456 }
18457
18458 static void intel_modeset_readout_hw_state(struct drm_device *dev)
18459 {
18460         struct drm_i915_private *dev_priv = to_i915(dev);
18461         struct intel_cdclk_state *cdclk_state =
18462                 to_intel_cdclk_state(dev_priv->cdclk.obj.state);
18463         enum pipe pipe;
18464         struct intel_crtc *crtc;
18465         struct intel_encoder *encoder;
18466         struct intel_connector *connector;
18467         struct drm_connector_list_iter conn_iter;
18468         u8 active_pipes = 0;
18469         int i;
18470
18471         for_each_intel_crtc(dev, crtc) {
18472                 struct intel_crtc_state *crtc_state =
18473                         to_intel_crtc_state(crtc->base.state);
18474
18475                 __drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi);
18476                 intel_crtc_free_hw_state(crtc_state);
18477                 intel_crtc_state_reset(crtc_state, crtc);
18478
18479                 crtc_state->hw.active = crtc_state->hw.enable =
18480                         dev_priv->display.get_pipe_config(crtc, crtc_state);
18481
18482                 crtc->base.enabled = crtc_state->hw.enable;
18483                 crtc->active = crtc_state->hw.active;
18484
18485                 if (crtc_state->hw.active)
18486                         active_pipes |= BIT(crtc->pipe);
18487
18488                 drm_dbg_kms(&dev_priv->drm,
18489                             "[CRTC:%d:%s] hw state readout: %s\n",
18490                             crtc->base.base.id, crtc->base.name,
18491                             enableddisabled(crtc_state->hw.active));
18492         }
18493
18494         dev_priv->active_pipes = cdclk_state->active_pipes = active_pipes;
18495
18496         readout_plane_state(dev_priv);
18497
18498         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
18499                 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
18500
18501                 pll->on = pll->info->funcs->get_hw_state(dev_priv, pll,
18502                                                         &pll->state.hw_state);
18503
18504                 if (IS_ELKHARTLAKE(dev_priv) && pll->on &&
18505                     pll->info->id == DPLL_ID_EHL_DPLL4) {
18506                         pll->wakeref = intel_display_power_get(dev_priv,
18507                                                                POWER_DOMAIN_DPLL_DC_OFF);
18508                 }
18509
18510                 pll->state.crtc_mask = 0;
18511                 for_each_intel_crtc(dev, crtc) {
18512                         struct intel_crtc_state *crtc_state =
18513                                 to_intel_crtc_state(crtc->base.state);
18514
18515                         if (crtc_state->hw.active &&
18516                             crtc_state->shared_dpll == pll)
18517                                 pll->state.crtc_mask |= 1 << crtc->pipe;
18518                 }
18519                 pll->active_mask = pll->state.crtc_mask;
18520
18521                 drm_dbg_kms(&dev_priv->drm,
18522                             "%s hw state readout: crtc_mask 0x%08x, on %i\n",
18523                             pll->info->name, pll->state.crtc_mask, pll->on);
18524         }
18525
18526         for_each_intel_encoder(dev, encoder) {
18527                 pipe = 0;
18528
18529                 if (encoder->get_hw_state(encoder, &pipe)) {
18530                         struct intel_crtc_state *crtc_state;
18531
18532                         crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
18533                         crtc_state = to_intel_crtc_state(crtc->base.state);
18534
18535                         encoder->base.crtc = &crtc->base;
18536                         encoder->get_config(encoder, crtc_state);
18537                 } else {
18538                         encoder->base.crtc = NULL;
18539                 }
18540
18541                 drm_dbg_kms(&dev_priv->drm,
18542                             "[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
18543                             encoder->base.base.id, encoder->base.name,
18544                             enableddisabled(encoder->base.crtc),
18545                             pipe_name(pipe));
18546         }
18547
18548         drm_connector_list_iter_begin(dev, &conn_iter);
18549         for_each_intel_connector_iter(connector, &conn_iter) {
18550                 if (connector->get_hw_state(connector)) {
18551                         struct intel_crtc_state *crtc_state;
18552                         struct intel_crtc *crtc;
18553
18554                         connector->base.dpms = DRM_MODE_DPMS_ON;
18555
18556                         encoder = intel_attached_encoder(connector);
18557                         connector->base.encoder = &encoder->base;
18558
18559                         crtc = to_intel_crtc(encoder->base.crtc);
18560                         crtc_state = crtc ? to_intel_crtc_state(crtc->base.state) : NULL;
18561
18562                         if (crtc_state && crtc_state->hw.active) {
18563                                 /*
18564                                  * This has to be done during hardware readout
18565                                  * because anything calling .crtc_disable may
18566                                  * rely on the connector_mask being accurate.
18567                                  */
18568                                 crtc_state->uapi.connector_mask |=
18569                                         drm_connector_mask(&connector->base);
18570                                 crtc_state->uapi.encoder_mask |=
18571                                         drm_encoder_mask(&encoder->base);
18572                         }
18573                 } else {
18574                         connector->base.dpms = DRM_MODE_DPMS_OFF;
18575                         connector->base.encoder = NULL;
18576                 }
18577                 drm_dbg_kms(&dev_priv->drm,
18578                             "[CONNECTOR:%d:%s] hw state readout: %s\n",
18579                             connector->base.base.id, connector->base.name,
18580                             enableddisabled(connector->base.encoder));
18581         }
18582         drm_connector_list_iter_end(&conn_iter);
18583
18584         for_each_intel_crtc(dev, crtc) {
18585                 struct intel_bw_state *bw_state =
18586                         to_intel_bw_state(dev_priv->bw_obj.state);
18587                 struct intel_crtc_state *crtc_state =
18588                         to_intel_crtc_state(crtc->base.state);
18589                 struct intel_plane *plane;
18590                 int min_cdclk = 0;
18591
18592                 if (crtc_state->hw.active) {
18593                         struct drm_display_mode *mode = &crtc_state->hw.mode;
18594
18595                         intel_mode_from_pipe_config(&crtc_state->hw.adjusted_mode,
18596                                                     crtc_state);
18597
18598                         *mode = crtc_state->hw.adjusted_mode;
18599                         mode->hdisplay = crtc_state->pipe_src_w;
18600                         mode->vdisplay = crtc_state->pipe_src_h;
18601
18602                         /*
18603                          * The initial mode needs to be set in order to keep
18604                          * the atomic core happy. It wants a valid mode if the
18605                          * crtc's enabled, so we do the above call.
18606                          *
18607                          * But we don't set all the derived state fully, hence
18608                          * set a flag to indicate that a full recalculation is
18609                          * needed on the next commit.
18610                          */
18611                         mode->private_flags = I915_MODE_FLAG_INHERITED;
18612
18613                         intel_crtc_compute_pixel_rate(crtc_state);
18614
18615                         intel_crtc_update_active_timings(crtc_state);
18616
18617                         intel_crtc_copy_hw_to_uapi_state(crtc_state);
18618                 }
18619
18620                 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
18621                         const struct intel_plane_state *plane_state =
18622                                 to_intel_plane_state(plane->base.state);
18623
18624                         /*
18625                          * FIXME don't have the fb yet, so can't
18626                          * use intel_plane_data_rate() :(
18627                          */
18628                         if (plane_state->uapi.visible)
18629                                 crtc_state->data_rate[plane->id] =
18630                                         4 * crtc_state->pixel_rate;
18631                         /*
18632                          * FIXME don't have the fb yet, so can't
18633                          * use plane->min_cdclk() :(
18634                          */
18635                         if (plane_state->uapi.visible && plane->min_cdclk) {
18636                                 if (crtc_state->double_wide ||
18637                                     INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
18638                                         crtc_state->min_cdclk[plane->id] =
18639                                                 DIV_ROUND_UP(crtc_state->pixel_rate, 2);
18640                                 else
18641                                         crtc_state->min_cdclk[plane->id] =
18642                                                 crtc_state->pixel_rate;
18643                         }
18644                         drm_dbg_kms(&dev_priv->drm,
18645                                     "[PLANE:%d:%s] min_cdclk %d kHz\n",
18646                                     plane->base.base.id, plane->base.name,
18647                                     crtc_state->min_cdclk[plane->id]);
18648                 }
18649
18650                 if (crtc_state->hw.active) {
18651                         min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
18652                         if (WARN_ON(min_cdclk < 0))
18653                                 min_cdclk = 0;
18654                 }
18655
18656                 cdclk_state->min_cdclk[crtc->pipe] = min_cdclk;
18657                 cdclk_state->min_voltage_level[crtc->pipe] =
18658                         crtc_state->min_voltage_level;
18659
18660                 intel_bw_crtc_update(bw_state, crtc_state);
18661
18662                 intel_pipe_config_sanity_check(dev_priv, crtc_state);
18663         }
18664 }
18665
18666 static void
18667 get_encoder_power_domains(struct drm_i915_private *dev_priv)
18668 {
18669         struct intel_encoder *encoder;
18670
18671         for_each_intel_encoder(&dev_priv->drm, encoder) {
18672                 struct intel_crtc_state *crtc_state;
18673
18674                 if (!encoder->get_power_domains)
18675                         continue;
18676
18677                 /*
18678                  * MST-primary and inactive encoders don't have a crtc state
18679                  * and neither of these require any power domain references.
18680                  */
18681                 if (!encoder->base.crtc)
18682                         continue;
18683
18684                 crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
18685                 encoder->get_power_domains(encoder, crtc_state);
18686         }
18687 }
18688
18689 static void intel_early_display_was(struct drm_i915_private *dev_priv)
18690 {
18691         /*
18692          * Display WA #1185 WaDisableDARBFClkGating:cnl,glk,icl,ehl,tgl
18693          * Also known as Wa_14010480278.
18694          */
18695         if (IS_GEN_RANGE(dev_priv, 10, 12) || IS_GEMINILAKE(dev_priv))
18696                 intel_de_write(dev_priv, GEN9_CLKGATE_DIS_0,
18697                                intel_de_read(dev_priv, GEN9_CLKGATE_DIS_0) | DARBF_GATING_DIS);
18698
18699         if (IS_HASWELL(dev_priv)) {
18700                 /*
18701                  * WaRsPkgCStateDisplayPMReq:hsw
18702                  * System hang if this isn't done before disabling all planes!
18703                  */
18704                 intel_de_write(dev_priv, CHICKEN_PAR1_1,
18705                                intel_de_read(dev_priv, CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
18706         }
18707 }
18708
18709 static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
18710                                        enum port port, i915_reg_t hdmi_reg)
18711 {
18712         u32 val = intel_de_read(dev_priv, hdmi_reg);
18713
18714         if (val & SDVO_ENABLE ||
18715             (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
18716                 return;
18717
18718         drm_dbg_kms(&dev_priv->drm,
18719                     "Sanitizing transcoder select for HDMI %c\n",
18720                     port_name(port));
18721
18722         val &= ~SDVO_PIPE_SEL_MASK;
18723         val |= SDVO_PIPE_SEL(PIPE_A);
18724
18725         intel_de_write(dev_priv, hdmi_reg, val);
18726 }
18727
18728 static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
18729                                      enum port port, i915_reg_t dp_reg)
18730 {
18731         u32 val = intel_de_read(dev_priv, dp_reg);
18732
18733         if (val & DP_PORT_EN ||
18734             (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
18735                 return;
18736
18737         drm_dbg_kms(&dev_priv->drm,
18738                     "Sanitizing transcoder select for DP %c\n",
18739                     port_name(port));
18740
18741         val &= ~DP_PIPE_SEL_MASK;
18742         val |= DP_PIPE_SEL(PIPE_A);
18743
18744         intel_de_write(dev_priv, dp_reg, val);
18745 }
18746
18747 static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
18748 {
18749         /*
18750          * The BIOS may select transcoder B on some of the PCH
18751          * ports even it doesn't enable the port. This would trip
18752          * assert_pch_dp_disabled() and assert_pch_hdmi_disabled().
18753          * Sanitize the transcoder select bits to prevent that. We
18754          * assume that the BIOS never actually enabled the port,
18755          * because if it did we'd actually have to toggle the port
18756          * on and back off to make the transcoder A select stick
18757          * (see. intel_dp_link_down(), intel_disable_hdmi(),
18758          * intel_disable_sdvo()).
18759          */
18760         ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B);
18761         ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C);
18762         ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D);
18763
18764         /* PCH SDVOB multiplex with HDMIB */
18765         ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB);
18766         ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC);
18767         ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID);
18768 }
18769
18770 /* Scan out the current hw modeset state,
18771  * and sanitizes it to the current state
18772  */
18773 static void
18774 intel_modeset_setup_hw_state(struct drm_device *dev,
18775                              struct drm_modeset_acquire_ctx *ctx)
18776 {
18777         struct drm_i915_private *dev_priv = to_i915(dev);
18778         struct intel_encoder *encoder;
18779         struct intel_crtc *crtc;
18780         intel_wakeref_t wakeref;
18781         int i;
18782
18783         wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
18784
18785         intel_early_display_was(dev_priv);
18786         intel_modeset_readout_hw_state(dev);
18787
18788         /* HW state is read out, now we need to sanitize this mess. */
18789
18790         /* Sanitize the TypeC port mode upfront, encoders depend on this */
18791         for_each_intel_encoder(dev, encoder) {
18792                 enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
18793
18794                 /* We need to sanitize only the MST primary port. */
18795                 if (encoder->type != INTEL_OUTPUT_DP_MST &&
18796                     intel_phy_is_tc(dev_priv, phy))
18797                         intel_tc_port_sanitize(enc_to_dig_port(encoder));
18798         }
18799
18800         get_encoder_power_domains(dev_priv);
18801
18802         if (HAS_PCH_IBX(dev_priv))
18803                 ibx_sanitize_pch_ports(dev_priv);
18804
18805         /*
18806          * intel_sanitize_plane_mapping() may need to do vblank
18807          * waits, so we need vblank interrupts restored beforehand.
18808          */
18809         for_each_intel_crtc(&dev_priv->drm, crtc) {
18810                 struct intel_crtc_state *crtc_state =
18811                         to_intel_crtc_state(crtc->base.state);
18812
18813                 drm_crtc_vblank_reset(&crtc->base);
18814
18815                 if (crtc_state->hw.active)
18816                         intel_crtc_vblank_on(crtc_state);
18817         }
18818
18819         intel_sanitize_plane_mapping(dev_priv);
18820
18821         for_each_intel_encoder(dev, encoder)
18822                 intel_sanitize_encoder(encoder);
18823
18824         for_each_intel_crtc(&dev_priv->drm, crtc) {
18825                 struct intel_crtc_state *crtc_state =
18826                         to_intel_crtc_state(crtc->base.state);
18827
18828                 intel_sanitize_crtc(crtc, ctx);
18829                 intel_dump_pipe_config(crtc_state, NULL, "[setup_hw_state]");
18830         }
18831
18832         intel_modeset_update_connector_atomic_state(dev);
18833
18834         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
18835                 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
18836
18837                 if (!pll->on || pll->active_mask)
18838                         continue;
18839
18840                 drm_dbg_kms(&dev_priv->drm,
18841                             "%s enabled but not in use, disabling\n",
18842                             pll->info->name);
18843
18844                 pll->info->funcs->disable(dev_priv, pll);
18845                 pll->on = false;
18846         }
18847
18848         if (IS_G4X(dev_priv)) {
18849                 g4x_wm_get_hw_state(dev_priv);
18850                 g4x_wm_sanitize(dev_priv);
18851         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
18852                 vlv_wm_get_hw_state(dev_priv);
18853                 vlv_wm_sanitize(dev_priv);
18854         } else if (INTEL_GEN(dev_priv) >= 9) {
18855                 skl_wm_get_hw_state(dev_priv);
18856         } else if (HAS_PCH_SPLIT(dev_priv)) {
18857                 ilk_wm_get_hw_state(dev_priv);
18858         }
18859
18860         for_each_intel_crtc(dev, crtc) {
18861                 struct intel_crtc_state *crtc_state =
18862                         to_intel_crtc_state(crtc->base.state);
18863                 u64 put_domains;
18864
18865                 put_domains = modeset_get_crtc_power_domains(crtc_state);
18866                 if (WARN_ON(put_domains))
18867                         modeset_put_power_domains(dev_priv, put_domains);
18868         }
18869
18870         intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
18871 }
18872
18873 void intel_display_resume(struct drm_device *dev)
18874 {
18875         struct drm_i915_private *dev_priv = to_i915(dev);
18876         struct drm_atomic_state *state = dev_priv->modeset_restore_state;
18877         struct drm_modeset_acquire_ctx ctx;
18878         int ret;
18879
18880         dev_priv->modeset_restore_state = NULL;
18881         if (state)
18882                 state->acquire_ctx = &ctx;
18883
18884         drm_modeset_acquire_init(&ctx, 0);
18885
18886         while (1) {
18887                 ret = drm_modeset_lock_all_ctx(dev, &ctx);
18888                 if (ret != -EDEADLK)
18889                         break;
18890
18891                 drm_modeset_backoff(&ctx);
18892         }
18893
18894         if (!ret)
18895                 ret = __intel_display_resume(dev, state, &ctx);
18896
18897         intel_enable_ipc(dev_priv);
18898         drm_modeset_drop_locks(&ctx);
18899         drm_modeset_acquire_fini(&ctx);
18900
18901         if (ret)
18902                 drm_err(&dev_priv->drm,
18903                         "Restoring old state failed with %i\n", ret);
18904         if (state)
18905                 drm_atomic_state_put(state);
18906 }
18907
18908 static void intel_hpd_poll_fini(struct drm_i915_private *i915)
18909 {
18910         struct intel_connector *connector;
18911         struct drm_connector_list_iter conn_iter;
18912
18913         /* Kill all the work that may have been queued by hpd. */
18914         drm_connector_list_iter_begin(&i915->drm, &conn_iter);
18915         for_each_intel_connector_iter(connector, &conn_iter) {
18916                 if (connector->modeset_retry_work.func)
18917                         cancel_work_sync(&connector->modeset_retry_work);
18918                 if (connector->hdcp.shim) {
18919                         cancel_delayed_work_sync(&connector->hdcp.check_work);
18920                         cancel_work_sync(&connector->hdcp.prop_work);
18921                 }
18922         }
18923         drm_connector_list_iter_end(&conn_iter);
18924 }
18925
18926 /* part #1: call before irq uninstall */
18927 void intel_modeset_driver_remove(struct drm_i915_private *i915)
18928 {
18929         flush_workqueue(i915->flip_wq);
18930         flush_workqueue(i915->modeset_wq);
18931
18932         flush_work(&i915->atomic_helper.free_work);
18933         WARN_ON(!llist_empty(&i915->atomic_helper.free_list));
18934 }
18935
18936 /* part #2: call after irq uninstall */
18937 void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915)
18938 {
18939         /*
18940          * Due to the hpd irq storm handling the hotplug work can re-arm the
18941          * poll handlers. Hence disable polling after hpd handling is shut down.
18942          */
18943         intel_hpd_poll_fini(i915);
18944
18945         /*
18946          * MST topology needs to be suspended so we don't have any calls to
18947          * fbdev after it's finalized. MST will be destroyed later as part of
18948          * drm_mode_config_cleanup()
18949          */
18950         intel_dp_mst_suspend(i915);
18951
18952         /* poll work can call into fbdev, hence clean that up afterwards */
18953         intel_fbdev_fini(i915);
18954
18955         intel_unregister_dsm_handler();
18956
18957         intel_fbc_global_disable(i915);
18958
18959         /* flush any delayed tasks or pending work */
18960         flush_scheduled_work();
18961
18962         intel_hdcp_component_fini(i915);
18963
18964         intel_mode_config_cleanup(i915);
18965
18966         intel_overlay_cleanup(i915);
18967
18968         intel_gmbus_teardown(i915);
18969
18970         destroy_workqueue(i915->flip_wq);
18971         destroy_workqueue(i915->modeset_wq);
18972
18973         intel_fbc_cleanup_cfb(i915);
18974 }
18975
18976 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
18977
18978 struct intel_display_error_state {
18979
18980         u32 power_well_driver;
18981
18982         struct intel_cursor_error_state {
18983                 u32 control;
18984                 u32 position;
18985                 u32 base;
18986                 u32 size;
18987         } cursor[I915_MAX_PIPES];
18988
18989         struct intel_pipe_error_state {
18990                 bool power_domain_on;
18991                 u32 source;
18992                 u32 stat;
18993         } pipe[I915_MAX_PIPES];
18994
18995         struct intel_plane_error_state {
18996                 u32 control;
18997                 u32 stride;
18998                 u32 size;
18999                 u32 pos;
19000                 u32 addr;
19001                 u32 surface;
19002                 u32 tile_offset;
19003         } plane[I915_MAX_PIPES];
19004
19005         struct intel_transcoder_error_state {
19006                 bool available;
19007                 bool power_domain_on;
19008                 enum transcoder cpu_transcoder;
19009
19010                 u32 conf;
19011
19012                 u32 htotal;
19013                 u32 hblank;
19014                 u32 hsync;
19015                 u32 vtotal;
19016                 u32 vblank;
19017                 u32 vsync;
19018         } transcoder[5];
19019 };
19020
19021 struct intel_display_error_state *
19022 intel_display_capture_error_state(struct drm_i915_private *dev_priv)
19023 {
19024         struct intel_display_error_state *error;
19025         int transcoders[] = {
19026                 TRANSCODER_A,
19027                 TRANSCODER_B,
19028                 TRANSCODER_C,
19029                 TRANSCODER_D,
19030                 TRANSCODER_EDP,
19031         };
19032         int i;
19033
19034         BUILD_BUG_ON(ARRAY_SIZE(transcoders) != ARRAY_SIZE(error->transcoder));
19035
19036         if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv))
19037                 return NULL;
19038
19039         error = kzalloc(sizeof(*error), GFP_ATOMIC);
19040         if (error == NULL)
19041                 return NULL;
19042
19043         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
19044                 error->power_well_driver = intel_de_read(dev_priv,
19045                                                          HSW_PWR_WELL_CTL2);
19046
19047         for_each_pipe(dev_priv, i) {
19048                 error->pipe[i].power_domain_on =
19049                         __intel_display_power_is_enabled(dev_priv,
19050                                                          POWER_DOMAIN_PIPE(i));
19051                 if (!error->pipe[i].power_domain_on)
19052                         continue;
19053
19054                 error->cursor[i].control = intel_de_read(dev_priv, CURCNTR(i));
19055                 error->cursor[i].position = intel_de_read(dev_priv, CURPOS(i));
19056                 error->cursor[i].base = intel_de_read(dev_priv, CURBASE(i));
19057
19058                 error->plane[i].control = intel_de_read(dev_priv, DSPCNTR(i));
19059                 error->plane[i].stride = intel_de_read(dev_priv, DSPSTRIDE(i));
19060                 if (INTEL_GEN(dev_priv) <= 3) {
19061                         error->plane[i].size = intel_de_read(dev_priv,
19062                                                              DSPSIZE(i));
19063                         error->plane[i].pos = intel_de_read(dev_priv,
19064                                                             DSPPOS(i));
19065                 }
19066                 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
19067                         error->plane[i].addr = intel_de_read(dev_priv,
19068                                                              DSPADDR(i));
19069                 if (INTEL_GEN(dev_priv) >= 4) {
19070                         error->plane[i].surface = intel_de_read(dev_priv,
19071                                                                 DSPSURF(i));
19072                         error->plane[i].tile_offset = intel_de_read(dev_priv,
19073                                                                     DSPTILEOFF(i));
19074                 }
19075
19076                 error->pipe[i].source = intel_de_read(dev_priv, PIPESRC(i));
19077
19078                 if (HAS_GMCH(dev_priv))
19079                         error->pipe[i].stat = intel_de_read(dev_priv,
19080                                                             PIPESTAT(i));
19081         }
19082
19083         for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
19084                 enum transcoder cpu_transcoder = transcoders[i];
19085
19086                 if (!INTEL_INFO(dev_priv)->trans_offsets[cpu_transcoder])
19087                         continue;
19088
19089                 error->transcoder[i].available = true;
19090                 error->transcoder[i].power_domain_on =
19091                         __intel_display_power_is_enabled(dev_priv,
19092                                 POWER_DOMAIN_TRANSCODER(cpu_transcoder));
19093                 if (!error->transcoder[i].power_domain_on)
19094                         continue;
19095
19096                 error->transcoder[i].cpu_transcoder = cpu_transcoder;
19097
19098                 error->transcoder[i].conf = intel_de_read(dev_priv,
19099                                                           PIPECONF(cpu_transcoder));
19100                 error->transcoder[i].htotal = intel_de_read(dev_priv,
19101                                                             HTOTAL(cpu_transcoder));
19102                 error->transcoder[i].hblank = intel_de_read(dev_priv,
19103                                                             HBLANK(cpu_transcoder));
19104                 error->transcoder[i].hsync = intel_de_read(dev_priv,
19105                                                            HSYNC(cpu_transcoder));
19106                 error->transcoder[i].vtotal = intel_de_read(dev_priv,
19107                                                             VTOTAL(cpu_transcoder));
19108                 error->transcoder[i].vblank = intel_de_read(dev_priv,
19109                                                             VBLANK(cpu_transcoder));
19110                 error->transcoder[i].vsync = intel_de_read(dev_priv,
19111                                                            VSYNC(cpu_transcoder));
19112         }
19113
19114         return error;
19115 }
19116
19117 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
19118
19119 void
19120 intel_display_print_error_state(struct drm_i915_error_state_buf *m,
19121                                 struct intel_display_error_state *error)
19122 {
19123         struct drm_i915_private *dev_priv = m->i915;
19124         int i;
19125
19126         if (!error)
19127                 return;
19128
19129         err_printf(m, "Num Pipes: %d\n", INTEL_NUM_PIPES(dev_priv));
19130         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
19131                 err_printf(m, "PWR_WELL_CTL2: %08x\n",
19132                            error->power_well_driver);
19133         for_each_pipe(dev_priv, i) {
19134                 err_printf(m, "Pipe [%d]:\n", i);
19135                 err_printf(m, "  Power: %s\n",
19136                            onoff(error->pipe[i].power_domain_on));
19137                 err_printf(m, "  SRC: %08x\n", error->pipe[i].source);
19138                 err_printf(m, "  STAT: %08x\n", error->pipe[i].stat);
19139
19140                 err_printf(m, "Plane [%d]:\n", i);
19141                 err_printf(m, "  CNTR: %08x\n", error->plane[i].control);
19142                 err_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
19143                 if (INTEL_GEN(dev_priv) <= 3) {
19144                         err_printf(m, "  SIZE: %08x\n", error->plane[i].size);
19145                         err_printf(m, "  POS: %08x\n", error->plane[i].pos);
19146                 }
19147                 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
19148                         err_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
19149                 if (INTEL_GEN(dev_priv) >= 4) {
19150                         err_printf(m, "  SURF: %08x\n", error->plane[i].surface);
19151                         err_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
19152                 }
19153
19154                 err_printf(m, "Cursor [%d]:\n", i);
19155                 err_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
19156                 err_printf(m, "  POS: %08x\n", error->cursor[i].position);
19157                 err_printf(m, "  BASE: %08x\n", error->cursor[i].base);
19158         }
19159
19160         for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
19161                 if (!error->transcoder[i].available)
19162                         continue;
19163
19164                 err_printf(m, "CPU transcoder: %s\n",
19165                            transcoder_name(error->transcoder[i].cpu_transcoder));
19166                 err_printf(m, "  Power: %s\n",
19167                            onoff(error->transcoder[i].power_domain_on));
19168                 err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
19169                 err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
19170                 err_printf(m, "  HBLANK: %08x\n", error->transcoder[i].hblank);
19171                 err_printf(m, "  HSYNC: %08x\n", error->transcoder[i].hsync);
19172                 err_printf(m, "  VTOTAL: %08x\n", error->transcoder[i].vtotal);
19173                 err_printf(m, "  VBLANK: %08x\n", error->transcoder[i].vblank);
19174                 err_printf(m, "  VSYNC: %08x\n", error->transcoder[i].vsync);
19175         }
19176 }
19177
19178 #endif