Merge drm/drm-next into drm-intel-next-queued
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / display / intel_display.c
1 /*
2  * Copyright © 2006-2007 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *      Eric Anholt <eric@anholt.net>
25  */
26
27 #include <linux/i2c.h>
28 #include <linux/input.h>
29 #include <linux/intel-iommu.h>
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/dma-resv.h>
33 #include <linux/slab.h>
34
35 #include <drm/drm_atomic.h>
36 #include <drm/drm_atomic_helper.h>
37 #include <drm/drm_atomic_uapi.h>
38 #include <drm/drm_dp_helper.h>
39 #include <drm/drm_edid.h>
40 #include <drm/drm_fourcc.h>
41 #include <drm/drm_plane_helper.h>
42 #include <drm/drm_probe_helper.h>
43 #include <drm/drm_rect.h>
44
45 #include "display/intel_crt.h"
46 #include "display/intel_ddi.h"
47 #include "display/intel_dp.h"
48 #include "display/intel_dp_mst.h"
49 #include "display/intel_dsi.h"
50 #include "display/intel_dvo.h"
51 #include "display/intel_gmbus.h"
52 #include "display/intel_hdmi.h"
53 #include "display/intel_lvds.h"
54 #include "display/intel_sdvo.h"
55 #include "display/intel_tv.h"
56 #include "display/intel_vdsc.h"
57
58 #include "gt/intel_rps.h"
59
60 #include "i915_drv.h"
61 #include "i915_trace.h"
62 #include "intel_acpi.h"
63 #include "intel_atomic.h"
64 #include "intel_atomic_plane.h"
65 #include "intel_bw.h"
66 #include "intel_cdclk.h"
67 #include "intel_color.h"
68 #include "intel_display_types.h"
69 #include "intel_dp_link_training.h"
70 #include "intel_fbc.h"
71 #include "intel_fbdev.h"
72 #include "intel_fifo_underrun.h"
73 #include "intel_frontbuffer.h"
74 #include "intel_hdcp.h"
75 #include "intel_hotplug.h"
76 #include "intel_overlay.h"
77 #include "intel_pipe_crc.h"
78 #include "intel_pm.h"
79 #include "intel_psr.h"
80 #include "intel_quirks.h"
81 #include "intel_sideband.h"
82 #include "intel_sprite.h"
83 #include "intel_tc.h"
84 #include "intel_vga.h"
85
86 /* Primary plane formats for gen <= 3 */
87 static const u32 i8xx_primary_formats[] = {
88         DRM_FORMAT_C8,
89         DRM_FORMAT_XRGB1555,
90         DRM_FORMAT_RGB565,
91         DRM_FORMAT_XRGB8888,
92 };
93
94 /* Primary plane formats for ivb (no fp16 due to hw issue) */
95 static const u32 ivb_primary_formats[] = {
96         DRM_FORMAT_C8,
97         DRM_FORMAT_RGB565,
98         DRM_FORMAT_XRGB8888,
99         DRM_FORMAT_XBGR8888,
100         DRM_FORMAT_XRGB2101010,
101         DRM_FORMAT_XBGR2101010,
102 };
103
104 /* Primary plane formats for gen >= 4, except ivb */
105 static const u32 i965_primary_formats[] = {
106         DRM_FORMAT_C8,
107         DRM_FORMAT_RGB565,
108         DRM_FORMAT_XRGB8888,
109         DRM_FORMAT_XBGR8888,
110         DRM_FORMAT_XRGB2101010,
111         DRM_FORMAT_XBGR2101010,
112         DRM_FORMAT_XBGR16161616F,
113 };
114
115 /* Primary plane formats for vlv/chv */
116 static const u32 vlv_primary_formats[] = {
117         DRM_FORMAT_C8,
118         DRM_FORMAT_RGB565,
119         DRM_FORMAT_XRGB8888,
120         DRM_FORMAT_XBGR8888,
121         DRM_FORMAT_ARGB8888,
122         DRM_FORMAT_ABGR8888,
123         DRM_FORMAT_XRGB2101010,
124         DRM_FORMAT_XBGR2101010,
125         DRM_FORMAT_ARGB2101010,
126         DRM_FORMAT_ABGR2101010,
127         DRM_FORMAT_XBGR16161616F,
128 };
129
130 static const u64 i9xx_format_modifiers[] = {
131         I915_FORMAT_MOD_X_TILED,
132         DRM_FORMAT_MOD_LINEAR,
133         DRM_FORMAT_MOD_INVALID
134 };
135
136 /* Cursor formats */
137 static const u32 intel_cursor_formats[] = {
138         DRM_FORMAT_ARGB8888,
139 };
140
141 static const u64 cursor_format_modifiers[] = {
142         DRM_FORMAT_MOD_LINEAR,
143         DRM_FORMAT_MOD_INVALID
144 };
145
146 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
147                                 struct intel_crtc_state *pipe_config);
148 static void ilk_pch_clock_get(struct intel_crtc *crtc,
149                               struct intel_crtc_state *pipe_config);
150
151 static int intel_framebuffer_init(struct intel_framebuffer *ifb,
152                                   struct drm_i915_gem_object *obj,
153                                   struct drm_mode_fb_cmd2 *mode_cmd);
154 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state);
155 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
156 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
157                                          const struct intel_link_m_n *m_n,
158                                          const struct intel_link_m_n *m2_n2);
159 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
160 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state);
161 static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state);
162 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
163 static void vlv_prepare_pll(struct intel_crtc *crtc,
164                             const struct intel_crtc_state *pipe_config);
165 static void chv_prepare_pll(struct intel_crtc *crtc,
166                             const struct intel_crtc_state *pipe_config);
167 static void skl_pfit_enable(const struct intel_crtc_state *crtc_state);
168 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state);
169 static void intel_modeset_setup_hw_state(struct drm_device *dev,
170                                          struct drm_modeset_acquire_ctx *ctx);
171 static struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc);
172
173 struct intel_limit {
174         struct {
175                 int min, max;
176         } dot, vco, n, m, m1, m2, p, p1;
177
178         struct {
179                 int dot_limit;
180                 int p2_slow, p2_fast;
181         } p2;
182 };
183
184 /* returns HPLL frequency in kHz */
185 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
186 {
187         int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
188
189         /* Obtain SKU information */
190         hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
191                 CCK_FUSE_HPLL_FREQ_MASK;
192
193         return vco_freq[hpll_freq] * 1000;
194 }
195
196 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
197                       const char *name, u32 reg, int ref_freq)
198 {
199         u32 val;
200         int divider;
201
202         val = vlv_cck_read(dev_priv, reg);
203         divider = val & CCK_FREQUENCY_VALUES;
204
205         drm_WARN(&dev_priv->drm, (val & CCK_FREQUENCY_STATUS) !=
206                  (divider << CCK_FREQUENCY_STATUS_SHIFT),
207                  "%s change in progress\n", name);
208
209         return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
210 }
211
212 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
213                            const char *name, u32 reg)
214 {
215         int hpll;
216
217         vlv_cck_get(dev_priv);
218
219         if (dev_priv->hpll_freq == 0)
220                 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
221
222         hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq);
223
224         vlv_cck_put(dev_priv);
225
226         return hpll;
227 }
228
229 static void intel_update_czclk(struct drm_i915_private *dev_priv)
230 {
231         if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
232                 return;
233
234         dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
235                                                       CCK_CZ_CLOCK_CONTROL);
236
237         drm_dbg(&dev_priv->drm, "CZ clock rate: %d kHz\n",
238                 dev_priv->czclk_freq);
239 }
240
241 /* units of 100MHz */
242 static u32 intel_fdi_link_freq(struct drm_i915_private *dev_priv,
243                                const struct intel_crtc_state *pipe_config)
244 {
245         if (HAS_DDI(dev_priv))
246                 return pipe_config->port_clock; /* SPLL */
247         else
248                 return dev_priv->fdi_pll_freq;
249 }
250
251 static const struct intel_limit intel_limits_i8xx_dac = {
252         .dot = { .min = 25000, .max = 350000 },
253         .vco = { .min = 908000, .max = 1512000 },
254         .n = { .min = 2, .max = 16 },
255         .m = { .min = 96, .max = 140 },
256         .m1 = { .min = 18, .max = 26 },
257         .m2 = { .min = 6, .max = 16 },
258         .p = { .min = 4, .max = 128 },
259         .p1 = { .min = 2, .max = 33 },
260         .p2 = { .dot_limit = 165000,
261                 .p2_slow = 4, .p2_fast = 2 },
262 };
263
264 static const struct intel_limit intel_limits_i8xx_dvo = {
265         .dot = { .min = 25000, .max = 350000 },
266         .vco = { .min = 908000, .max = 1512000 },
267         .n = { .min = 2, .max = 16 },
268         .m = { .min = 96, .max = 140 },
269         .m1 = { .min = 18, .max = 26 },
270         .m2 = { .min = 6, .max = 16 },
271         .p = { .min = 4, .max = 128 },
272         .p1 = { .min = 2, .max = 33 },
273         .p2 = { .dot_limit = 165000,
274                 .p2_slow = 4, .p2_fast = 4 },
275 };
276
277 static const struct intel_limit intel_limits_i8xx_lvds = {
278         .dot = { .min = 25000, .max = 350000 },
279         .vco = { .min = 908000, .max = 1512000 },
280         .n = { .min = 2, .max = 16 },
281         .m = { .min = 96, .max = 140 },
282         .m1 = { .min = 18, .max = 26 },
283         .m2 = { .min = 6, .max = 16 },
284         .p = { .min = 4, .max = 128 },
285         .p1 = { .min = 1, .max = 6 },
286         .p2 = { .dot_limit = 165000,
287                 .p2_slow = 14, .p2_fast = 7 },
288 };
289
290 static const struct intel_limit intel_limits_i9xx_sdvo = {
291         .dot = { .min = 20000, .max = 400000 },
292         .vco = { .min = 1400000, .max = 2800000 },
293         .n = { .min = 1, .max = 6 },
294         .m = { .min = 70, .max = 120 },
295         .m1 = { .min = 8, .max = 18 },
296         .m2 = { .min = 3, .max = 7 },
297         .p = { .min = 5, .max = 80 },
298         .p1 = { .min = 1, .max = 8 },
299         .p2 = { .dot_limit = 200000,
300                 .p2_slow = 10, .p2_fast = 5 },
301 };
302
303 static const struct intel_limit intel_limits_i9xx_lvds = {
304         .dot = { .min = 20000, .max = 400000 },
305         .vco = { .min = 1400000, .max = 2800000 },
306         .n = { .min = 1, .max = 6 },
307         .m = { .min = 70, .max = 120 },
308         .m1 = { .min = 8, .max = 18 },
309         .m2 = { .min = 3, .max = 7 },
310         .p = { .min = 7, .max = 98 },
311         .p1 = { .min = 1, .max = 8 },
312         .p2 = { .dot_limit = 112000,
313                 .p2_slow = 14, .p2_fast = 7 },
314 };
315
316
317 static const struct intel_limit intel_limits_g4x_sdvo = {
318         .dot = { .min = 25000, .max = 270000 },
319         .vco = { .min = 1750000, .max = 3500000},
320         .n = { .min = 1, .max = 4 },
321         .m = { .min = 104, .max = 138 },
322         .m1 = { .min = 17, .max = 23 },
323         .m2 = { .min = 5, .max = 11 },
324         .p = { .min = 10, .max = 30 },
325         .p1 = { .min = 1, .max = 3},
326         .p2 = { .dot_limit = 270000,
327                 .p2_slow = 10,
328                 .p2_fast = 10
329         },
330 };
331
332 static const struct intel_limit intel_limits_g4x_hdmi = {
333         .dot = { .min = 22000, .max = 400000 },
334         .vco = { .min = 1750000, .max = 3500000},
335         .n = { .min = 1, .max = 4 },
336         .m = { .min = 104, .max = 138 },
337         .m1 = { .min = 16, .max = 23 },
338         .m2 = { .min = 5, .max = 11 },
339         .p = { .min = 5, .max = 80 },
340         .p1 = { .min = 1, .max = 8},
341         .p2 = { .dot_limit = 165000,
342                 .p2_slow = 10, .p2_fast = 5 },
343 };
344
345 static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
346         .dot = { .min = 20000, .max = 115000 },
347         .vco = { .min = 1750000, .max = 3500000 },
348         .n = { .min = 1, .max = 3 },
349         .m = { .min = 104, .max = 138 },
350         .m1 = { .min = 17, .max = 23 },
351         .m2 = { .min = 5, .max = 11 },
352         .p = { .min = 28, .max = 112 },
353         .p1 = { .min = 2, .max = 8 },
354         .p2 = { .dot_limit = 0,
355                 .p2_slow = 14, .p2_fast = 14
356         },
357 };
358
359 static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
360         .dot = { .min = 80000, .max = 224000 },
361         .vco = { .min = 1750000, .max = 3500000 },
362         .n = { .min = 1, .max = 3 },
363         .m = { .min = 104, .max = 138 },
364         .m1 = { .min = 17, .max = 23 },
365         .m2 = { .min = 5, .max = 11 },
366         .p = { .min = 14, .max = 42 },
367         .p1 = { .min = 2, .max = 6 },
368         .p2 = { .dot_limit = 0,
369                 .p2_slow = 7, .p2_fast = 7
370         },
371 };
372
373 static const struct intel_limit pnv_limits_sdvo = {
374         .dot = { .min = 20000, .max = 400000},
375         .vco = { .min = 1700000, .max = 3500000 },
376         /* Pineview's Ncounter is a ring counter */
377         .n = { .min = 3, .max = 6 },
378         .m = { .min = 2, .max = 256 },
379         /* Pineview only has one combined m divider, which we treat as m2. */
380         .m1 = { .min = 0, .max = 0 },
381         .m2 = { .min = 0, .max = 254 },
382         .p = { .min = 5, .max = 80 },
383         .p1 = { .min = 1, .max = 8 },
384         .p2 = { .dot_limit = 200000,
385                 .p2_slow = 10, .p2_fast = 5 },
386 };
387
388 static const struct intel_limit pnv_limits_lvds = {
389         .dot = { .min = 20000, .max = 400000 },
390         .vco = { .min = 1700000, .max = 3500000 },
391         .n = { .min = 3, .max = 6 },
392         .m = { .min = 2, .max = 256 },
393         .m1 = { .min = 0, .max = 0 },
394         .m2 = { .min = 0, .max = 254 },
395         .p = { .min = 7, .max = 112 },
396         .p1 = { .min = 1, .max = 8 },
397         .p2 = { .dot_limit = 112000,
398                 .p2_slow = 14, .p2_fast = 14 },
399 };
400
401 /* Ironlake / Sandybridge
402  *
403  * We calculate clock using (register_value + 2) for N/M1/M2, so here
404  * the range value for them is (actual_value - 2).
405  */
406 static const struct intel_limit ilk_limits_dac = {
407         .dot = { .min = 25000, .max = 350000 },
408         .vco = { .min = 1760000, .max = 3510000 },
409         .n = { .min = 1, .max = 5 },
410         .m = { .min = 79, .max = 127 },
411         .m1 = { .min = 12, .max = 22 },
412         .m2 = { .min = 5, .max = 9 },
413         .p = { .min = 5, .max = 80 },
414         .p1 = { .min = 1, .max = 8 },
415         .p2 = { .dot_limit = 225000,
416                 .p2_slow = 10, .p2_fast = 5 },
417 };
418
419 static const struct intel_limit ilk_limits_single_lvds = {
420         .dot = { .min = 25000, .max = 350000 },
421         .vco = { .min = 1760000, .max = 3510000 },
422         .n = { .min = 1, .max = 3 },
423         .m = { .min = 79, .max = 118 },
424         .m1 = { .min = 12, .max = 22 },
425         .m2 = { .min = 5, .max = 9 },
426         .p = { .min = 28, .max = 112 },
427         .p1 = { .min = 2, .max = 8 },
428         .p2 = { .dot_limit = 225000,
429                 .p2_slow = 14, .p2_fast = 14 },
430 };
431
432 static const struct intel_limit ilk_limits_dual_lvds = {
433         .dot = { .min = 25000, .max = 350000 },
434         .vco = { .min = 1760000, .max = 3510000 },
435         .n = { .min = 1, .max = 3 },
436         .m = { .min = 79, .max = 127 },
437         .m1 = { .min = 12, .max = 22 },
438         .m2 = { .min = 5, .max = 9 },
439         .p = { .min = 14, .max = 56 },
440         .p1 = { .min = 2, .max = 8 },
441         .p2 = { .dot_limit = 225000,
442                 .p2_slow = 7, .p2_fast = 7 },
443 };
444
445 /* LVDS 100mhz refclk limits. */
446 static const struct intel_limit ilk_limits_single_lvds_100m = {
447         .dot = { .min = 25000, .max = 350000 },
448         .vco = { .min = 1760000, .max = 3510000 },
449         .n = { .min = 1, .max = 2 },
450         .m = { .min = 79, .max = 126 },
451         .m1 = { .min = 12, .max = 22 },
452         .m2 = { .min = 5, .max = 9 },
453         .p = { .min = 28, .max = 112 },
454         .p1 = { .min = 2, .max = 8 },
455         .p2 = { .dot_limit = 225000,
456                 .p2_slow = 14, .p2_fast = 14 },
457 };
458
459 static const struct intel_limit ilk_limits_dual_lvds_100m = {
460         .dot = { .min = 25000, .max = 350000 },
461         .vco = { .min = 1760000, .max = 3510000 },
462         .n = { .min = 1, .max = 3 },
463         .m = { .min = 79, .max = 126 },
464         .m1 = { .min = 12, .max = 22 },
465         .m2 = { .min = 5, .max = 9 },
466         .p = { .min = 14, .max = 42 },
467         .p1 = { .min = 2, .max = 6 },
468         .p2 = { .dot_limit = 225000,
469                 .p2_slow = 7, .p2_fast = 7 },
470 };
471
472 static const struct intel_limit intel_limits_vlv = {
473          /*
474           * These are the data rate limits (measured in fast clocks)
475           * since those are the strictest limits we have. The fast
476           * clock and actual rate limits are more relaxed, so checking
477           * them would make no difference.
478           */
479         .dot = { .min = 25000 * 5, .max = 270000 * 5 },
480         .vco = { .min = 4000000, .max = 6000000 },
481         .n = { .min = 1, .max = 7 },
482         .m1 = { .min = 2, .max = 3 },
483         .m2 = { .min = 11, .max = 156 },
484         .p1 = { .min = 2, .max = 3 },
485         .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
486 };
487
488 static const struct intel_limit intel_limits_chv = {
489         /*
490          * These are the data rate limits (measured in fast clocks)
491          * since those are the strictest limits we have.  The fast
492          * clock and actual rate limits are more relaxed, so checking
493          * them would make no difference.
494          */
495         .dot = { .min = 25000 * 5, .max = 540000 * 5},
496         .vco = { .min = 4800000, .max = 6480000 },
497         .n = { .min = 1, .max = 1 },
498         .m1 = { .min = 2, .max = 2 },
499         .m2 = { .min = 24 << 22, .max = 175 << 22 },
500         .p1 = { .min = 2, .max = 4 },
501         .p2 = { .p2_slow = 1, .p2_fast = 14 },
502 };
503
504 static const struct intel_limit intel_limits_bxt = {
505         /* FIXME: find real dot limits */
506         .dot = { .min = 0, .max = INT_MAX },
507         .vco = { .min = 4800000, .max = 6700000 },
508         .n = { .min = 1, .max = 1 },
509         .m1 = { .min = 2, .max = 2 },
510         /* FIXME: find real m2 limits */
511         .m2 = { .min = 2 << 22, .max = 255 << 22 },
512         .p1 = { .min = 2, .max = 4 },
513         .p2 = { .p2_slow = 1, .p2_fast = 20 },
514 };
515
516 /* WA Display #0827: Gen9:all */
517 static void
518 skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
519 {
520         if (enable)
521                 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
522                                intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DUPS1_GATING_DIS | DUPS2_GATING_DIS);
523         else
524                 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
525                                intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
526 }
527
528 /* Wa_2006604312:icl,ehl */
529 static void
530 icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
531                        bool enable)
532 {
533         if (enable)
534                 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
535                                intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS);
536         else
537                 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
538                                intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
539 }
540
541 static bool
542 needs_modeset(const struct intel_crtc_state *state)
543 {
544         return drm_atomic_crtc_needs_modeset(&state->uapi);
545 }
546
547 static bool
548 is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state)
549 {
550         return crtc_state->master_transcoder != INVALID_TRANSCODER;
551 }
552
553 static bool
554 is_trans_port_sync_master(const struct intel_crtc_state *crtc_state)
555 {
556         return crtc_state->sync_mode_slaves_mask != 0;
557 }
558
559 bool
560 is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
561 {
562         return is_trans_port_sync_master(crtc_state) ||
563                 is_trans_port_sync_slave(crtc_state);
564 }
565
566 /*
567  * Platform specific helpers to calculate the port PLL loopback- (clock.m),
568  * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
569  * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
570  * The helpers' return value is the rate of the clock that is fed to the
571  * display engine's pipe which can be the above fast dot clock rate or a
572  * divided-down version of it.
573  */
574 /* m1 is reserved as 0 in Pineview, n is a ring counter */
575 static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
576 {
577         clock->m = clock->m2 + 2;
578         clock->p = clock->p1 * clock->p2;
579         if (WARN_ON(clock->n == 0 || clock->p == 0))
580                 return 0;
581         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
582         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
583
584         return clock->dot;
585 }
586
587 static u32 i9xx_dpll_compute_m(struct dpll *dpll)
588 {
589         return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
590 }
591
592 static int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
593 {
594         clock->m = i9xx_dpll_compute_m(clock);
595         clock->p = clock->p1 * clock->p2;
596         if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
597                 return 0;
598         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
599         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
600
601         return clock->dot;
602 }
603
604 static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
605 {
606         clock->m = clock->m1 * clock->m2;
607         clock->p = clock->p1 * clock->p2;
608         if (WARN_ON(clock->n == 0 || clock->p == 0))
609                 return 0;
610         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
611         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
612
613         return clock->dot / 5;
614 }
615
616 int chv_calc_dpll_params(int refclk, struct dpll *clock)
617 {
618         clock->m = clock->m1 * clock->m2;
619         clock->p = clock->p1 * clock->p2;
620         if (WARN_ON(clock->n == 0 || clock->p == 0))
621                 return 0;
622         clock->vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock->m),
623                                            clock->n << 22);
624         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
625
626         return clock->dot / 5;
627 }
628
629 /*
630  * Returns whether the given set of divisors are valid for a given refclk with
631  * the given connectors.
632  */
633 static bool intel_pll_is_valid(struct drm_i915_private *dev_priv,
634                                const struct intel_limit *limit,
635                                const struct dpll *clock)
636 {
637         if (clock->n < limit->n.min || limit->n.max < clock->n)
638                 return false;
639         if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
640                 return false;
641         if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
642                 return false;
643         if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
644                 return false;
645
646         if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
647             !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv))
648                 if (clock->m1 <= clock->m2)
649                         return false;
650
651         if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
652             !IS_GEN9_LP(dev_priv)) {
653                 if (clock->p < limit->p.min || limit->p.max < clock->p)
654                         return false;
655                 if (clock->m < limit->m.min || limit->m.max < clock->m)
656                         return false;
657         }
658
659         if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
660                 return false;
661         /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
662          * connector, etc., rather than just a single range.
663          */
664         if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
665                 return false;
666
667         return true;
668 }
669
670 static int
671 i9xx_select_p2_div(const struct intel_limit *limit,
672                    const struct intel_crtc_state *crtc_state,
673                    int target)
674 {
675         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
676
677         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
678                 /*
679                  * For LVDS just rely on its current settings for dual-channel.
680                  * We haven't figured out how to reliably set up different
681                  * single/dual channel state, if we even can.
682                  */
683                 if (intel_is_dual_link_lvds(dev_priv))
684                         return limit->p2.p2_fast;
685                 else
686                         return limit->p2.p2_slow;
687         } else {
688                 if (target < limit->p2.dot_limit)
689                         return limit->p2.p2_slow;
690                 else
691                         return limit->p2.p2_fast;
692         }
693 }
694
695 /*
696  * Returns a set of divisors for the desired target clock with the given
697  * refclk, or FALSE.  The returned values represent the clock equation:
698  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
699  *
700  * Target and reference clocks are specified in kHz.
701  *
702  * If match_clock is provided, then best_clock P divider must match the P
703  * divider from @match_clock used for LVDS downclocking.
704  */
705 static bool
706 i9xx_find_best_dpll(const struct intel_limit *limit,
707                     struct intel_crtc_state *crtc_state,
708                     int target, int refclk, struct dpll *match_clock,
709                     struct dpll *best_clock)
710 {
711         struct drm_device *dev = crtc_state->uapi.crtc->dev;
712         struct dpll clock;
713         int err = target;
714
715         memset(best_clock, 0, sizeof(*best_clock));
716
717         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
718
719         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
720              clock.m1++) {
721                 for (clock.m2 = limit->m2.min;
722                      clock.m2 <= limit->m2.max; clock.m2++) {
723                         if (clock.m2 >= clock.m1)
724                                 break;
725                         for (clock.n = limit->n.min;
726                              clock.n <= limit->n.max; clock.n++) {
727                                 for (clock.p1 = limit->p1.min;
728                                         clock.p1 <= limit->p1.max; clock.p1++) {
729                                         int this_err;
730
731                                         i9xx_calc_dpll_params(refclk, &clock);
732                                         if (!intel_pll_is_valid(to_i915(dev),
733                                                                 limit,
734                                                                 &clock))
735                                                 continue;
736                                         if (match_clock &&
737                                             clock.p != match_clock->p)
738                                                 continue;
739
740                                         this_err = abs(clock.dot - target);
741                                         if (this_err < err) {
742                                                 *best_clock = clock;
743                                                 err = this_err;
744                                         }
745                                 }
746                         }
747                 }
748         }
749
750         return (err != target);
751 }
752
753 /*
754  * Returns a set of divisors for the desired target clock with the given
755  * refclk, or FALSE.  The returned values represent the clock equation:
756  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
757  *
758  * Target and reference clocks are specified in kHz.
759  *
760  * If match_clock is provided, then best_clock P divider must match the P
761  * divider from @match_clock used for LVDS downclocking.
762  */
763 static bool
764 pnv_find_best_dpll(const struct intel_limit *limit,
765                    struct intel_crtc_state *crtc_state,
766                    int target, int refclk, struct dpll *match_clock,
767                    struct dpll *best_clock)
768 {
769         struct drm_device *dev = crtc_state->uapi.crtc->dev;
770         struct dpll clock;
771         int err = target;
772
773         memset(best_clock, 0, sizeof(*best_clock));
774
775         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
776
777         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
778              clock.m1++) {
779                 for (clock.m2 = limit->m2.min;
780                      clock.m2 <= limit->m2.max; clock.m2++) {
781                         for (clock.n = limit->n.min;
782                              clock.n <= limit->n.max; clock.n++) {
783                                 for (clock.p1 = limit->p1.min;
784                                         clock.p1 <= limit->p1.max; clock.p1++) {
785                                         int this_err;
786
787                                         pnv_calc_dpll_params(refclk, &clock);
788                                         if (!intel_pll_is_valid(to_i915(dev),
789                                                                 limit,
790                                                                 &clock))
791                                                 continue;
792                                         if (match_clock &&
793                                             clock.p != match_clock->p)
794                                                 continue;
795
796                                         this_err = abs(clock.dot - target);
797                                         if (this_err < err) {
798                                                 *best_clock = clock;
799                                                 err = this_err;
800                                         }
801                                 }
802                         }
803                 }
804         }
805
806         return (err != target);
807 }
808
809 /*
810  * Returns a set of divisors for the desired target clock with the given
811  * refclk, or FALSE.  The returned values represent the clock equation:
812  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
813  *
814  * Target and reference clocks are specified in kHz.
815  *
816  * If match_clock is provided, then best_clock P divider must match the P
817  * divider from @match_clock used for LVDS downclocking.
818  */
819 static bool
820 g4x_find_best_dpll(const struct intel_limit *limit,
821                    struct intel_crtc_state *crtc_state,
822                    int target, int refclk, struct dpll *match_clock,
823                    struct dpll *best_clock)
824 {
825         struct drm_device *dev = crtc_state->uapi.crtc->dev;
826         struct dpll clock;
827         int max_n;
828         bool found = false;
829         /* approximately equals target * 0.00585 */
830         int err_most = (target >> 8) + (target >> 9);
831
832         memset(best_clock, 0, sizeof(*best_clock));
833
834         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
835
836         max_n = limit->n.max;
837         /* based on hardware requirement, prefer smaller n to precision */
838         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
839                 /* based on hardware requirement, prefere larger m1,m2 */
840                 for (clock.m1 = limit->m1.max;
841                      clock.m1 >= limit->m1.min; clock.m1--) {
842                         for (clock.m2 = limit->m2.max;
843                              clock.m2 >= limit->m2.min; clock.m2--) {
844                                 for (clock.p1 = limit->p1.max;
845                                      clock.p1 >= limit->p1.min; clock.p1--) {
846                                         int this_err;
847
848                                         i9xx_calc_dpll_params(refclk, &clock);
849                                         if (!intel_pll_is_valid(to_i915(dev),
850                                                                 limit,
851                                                                 &clock))
852                                                 continue;
853
854                                         this_err = abs(clock.dot - target);
855                                         if (this_err < err_most) {
856                                                 *best_clock = clock;
857                                                 err_most = this_err;
858                                                 max_n = clock.n;
859                                                 found = true;
860                                         }
861                                 }
862                         }
863                 }
864         }
865         return found;
866 }
867
868 /*
869  * Check if the calculated PLL configuration is more optimal compared to the
870  * best configuration and error found so far. Return the calculated error.
871  */
872 static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
873                                const struct dpll *calculated_clock,
874                                const struct dpll *best_clock,
875                                unsigned int best_error_ppm,
876                                unsigned int *error_ppm)
877 {
878         /*
879          * For CHV ignore the error and consider only the P value.
880          * Prefer a bigger P value based on HW requirements.
881          */
882         if (IS_CHERRYVIEW(to_i915(dev))) {
883                 *error_ppm = 0;
884
885                 return calculated_clock->p > best_clock->p;
886         }
887
888         if (drm_WARN_ON_ONCE(dev, !target_freq))
889                 return false;
890
891         *error_ppm = div_u64(1000000ULL *
892                                 abs(target_freq - calculated_clock->dot),
893                              target_freq);
894         /*
895          * Prefer a better P value over a better (smaller) error if the error
896          * is small. Ensure this preference for future configurations too by
897          * setting the error to 0.
898          */
899         if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
900                 *error_ppm = 0;
901
902                 return true;
903         }
904
905         return *error_ppm + 10 < best_error_ppm;
906 }
907
908 /*
909  * Returns a set of divisors for the desired target clock with the given
910  * refclk, or FALSE.  The returned values represent the clock equation:
911  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
912  */
913 static bool
914 vlv_find_best_dpll(const struct intel_limit *limit,
915                    struct intel_crtc_state *crtc_state,
916                    int target, int refclk, struct dpll *match_clock,
917                    struct dpll *best_clock)
918 {
919         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
920         struct drm_device *dev = crtc->base.dev;
921         struct dpll clock;
922         unsigned int bestppm = 1000000;
923         /* min update 19.2 MHz */
924         int max_n = min(limit->n.max, refclk / 19200);
925         bool found = false;
926
927         target *= 5; /* fast clock */
928
929         memset(best_clock, 0, sizeof(*best_clock));
930
931         /* based on hardware requirement, prefer smaller n to precision */
932         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
933                 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
934                         for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
935                              clock.p2 -= clock.p2 > 10 ? 2 : 1) {
936                                 clock.p = clock.p1 * clock.p2;
937                                 /* based on hardware requirement, prefer bigger m1,m2 values */
938                                 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
939                                         unsigned int ppm;
940
941                                         clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
942                                                                      refclk * clock.m1);
943
944                                         vlv_calc_dpll_params(refclk, &clock);
945
946                                         if (!intel_pll_is_valid(to_i915(dev),
947                                                                 limit,
948                                                                 &clock))
949                                                 continue;
950
951                                         if (!vlv_PLL_is_optimal(dev, target,
952                                                                 &clock,
953                                                                 best_clock,
954                                                                 bestppm, &ppm))
955                                                 continue;
956
957                                         *best_clock = clock;
958                                         bestppm = ppm;
959                                         found = true;
960                                 }
961                         }
962                 }
963         }
964
965         return found;
966 }
967
968 /*
969  * Returns a set of divisors for the desired target clock with the given
970  * refclk, or FALSE.  The returned values represent the clock equation:
971  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
972  */
973 static bool
974 chv_find_best_dpll(const struct intel_limit *limit,
975                    struct intel_crtc_state *crtc_state,
976                    int target, int refclk, struct dpll *match_clock,
977                    struct dpll *best_clock)
978 {
979         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
980         struct drm_device *dev = crtc->base.dev;
981         unsigned int best_error_ppm;
982         struct dpll clock;
983         u64 m2;
984         int found = false;
985
986         memset(best_clock, 0, sizeof(*best_clock));
987         best_error_ppm = 1000000;
988
989         /*
990          * Based on hardware doc, the n always set to 1, and m1 always
991          * set to 2.  If requires to support 200Mhz refclk, we need to
992          * revisit this because n may not 1 anymore.
993          */
994         clock.n = 1, clock.m1 = 2;
995         target *= 5;    /* fast clock */
996
997         for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
998                 for (clock.p2 = limit->p2.p2_fast;
999                                 clock.p2 >= limit->p2.p2_slow;
1000                                 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
1001                         unsigned int error_ppm;
1002
1003                         clock.p = clock.p1 * clock.p2;
1004
1005                         m2 = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(target, clock.p * clock.n) << 22,
1006                                                    refclk * clock.m1);
1007
1008                         if (m2 > INT_MAX/clock.m1)
1009                                 continue;
1010
1011                         clock.m2 = m2;
1012
1013                         chv_calc_dpll_params(refclk, &clock);
1014
1015                         if (!intel_pll_is_valid(to_i915(dev), limit, &clock))
1016                                 continue;
1017
1018                         if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
1019                                                 best_error_ppm, &error_ppm))
1020                                 continue;
1021
1022                         *best_clock = clock;
1023                         best_error_ppm = error_ppm;
1024                         found = true;
1025                 }
1026         }
1027
1028         return found;
1029 }
1030
1031 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
1032                         struct dpll *best_clock)
1033 {
1034         int refclk = 100000;
1035         const struct intel_limit *limit = &intel_limits_bxt;
1036
1037         return chv_find_best_dpll(limit, crtc_state,
1038                                   crtc_state->port_clock, refclk,
1039                                   NULL, best_clock);
1040 }
1041
1042 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
1043                                     enum pipe pipe)
1044 {
1045         i915_reg_t reg = PIPEDSL(pipe);
1046         u32 line1, line2;
1047         u32 line_mask;
1048
1049         if (IS_GEN(dev_priv, 2))
1050                 line_mask = DSL_LINEMASK_GEN2;
1051         else
1052                 line_mask = DSL_LINEMASK_GEN3;
1053
1054         line1 = intel_de_read(dev_priv, reg) & line_mask;
1055         msleep(5);
1056         line2 = intel_de_read(dev_priv, reg) & line_mask;
1057
1058         return line1 != line2;
1059 }
1060
1061 static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
1062 {
1063         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1064         enum pipe pipe = crtc->pipe;
1065
1066         /* Wait for the display line to settle/start moving */
1067         if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
1068                 drm_err(&dev_priv->drm,
1069                         "pipe %c scanline %s wait timed out\n",
1070                         pipe_name(pipe), onoff(state));
1071 }
1072
1073 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
1074 {
1075         wait_for_pipe_scanline_moving(crtc, false);
1076 }
1077
1078 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
1079 {
1080         wait_for_pipe_scanline_moving(crtc, true);
1081 }
1082
1083 static void
1084 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
1085 {
1086         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
1087         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1088
1089         if (INTEL_GEN(dev_priv) >= 4) {
1090                 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1091                 i915_reg_t reg = PIPECONF(cpu_transcoder);
1092
1093                 /* Wait for the Pipe State to go off */
1094                 if (intel_de_wait_for_clear(dev_priv, reg,
1095                                             I965_PIPECONF_ACTIVE, 100))
1096                         drm_WARN(&dev_priv->drm, 1,
1097                                  "pipe_off wait timed out\n");
1098         } else {
1099                 intel_wait_for_pipe_scanline_stopped(crtc);
1100         }
1101 }
1102
1103 /* Only for pre-ILK configs */
1104 void assert_pll(struct drm_i915_private *dev_priv,
1105                 enum pipe pipe, bool state)
1106 {
1107         u32 val;
1108         bool cur_state;
1109
1110         val = intel_de_read(dev_priv, DPLL(pipe));
1111         cur_state = !!(val & DPLL_VCO_ENABLE);
1112         I915_STATE_WARN(cur_state != state,
1113              "PLL state assertion failure (expected %s, current %s)\n",
1114                         onoff(state), onoff(cur_state));
1115 }
1116
1117 /* XXX: the dsi pll is shared between MIPI DSI ports */
1118 void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
1119 {
1120         u32 val;
1121         bool cur_state;
1122
1123         vlv_cck_get(dev_priv);
1124         val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
1125         vlv_cck_put(dev_priv);
1126
1127         cur_state = val & DSI_PLL_VCO_EN;
1128         I915_STATE_WARN(cur_state != state,
1129              "DSI PLL state assertion failure (expected %s, current %s)\n",
1130                         onoff(state), onoff(cur_state));
1131 }
1132
1133 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1134                           enum pipe pipe, bool state)
1135 {
1136         bool cur_state;
1137
1138         if (HAS_DDI(dev_priv)) {
1139                 /*
1140                  * DDI does not have a specific FDI_TX register.
1141                  *
1142                  * FDI is never fed from EDP transcoder
1143                  * so pipe->transcoder cast is fine here.
1144                  */
1145                 enum transcoder cpu_transcoder = (enum transcoder)pipe;
1146                 u32 val = intel_de_read(dev_priv,
1147                                         TRANS_DDI_FUNC_CTL(cpu_transcoder));
1148                 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
1149         } else {
1150                 u32 val = intel_de_read(dev_priv, FDI_TX_CTL(pipe));
1151                 cur_state = !!(val & FDI_TX_ENABLE);
1152         }
1153         I915_STATE_WARN(cur_state != state,
1154              "FDI TX state assertion failure (expected %s, current %s)\n",
1155                         onoff(state), onoff(cur_state));
1156 }
1157 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1158 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1159
1160 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1161                           enum pipe pipe, bool state)
1162 {
1163         u32 val;
1164         bool cur_state;
1165
1166         val = intel_de_read(dev_priv, FDI_RX_CTL(pipe));
1167         cur_state = !!(val & FDI_RX_ENABLE);
1168         I915_STATE_WARN(cur_state != state,
1169              "FDI RX state assertion failure (expected %s, current %s)\n",
1170                         onoff(state), onoff(cur_state));
1171 }
1172 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1173 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1174
1175 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1176                                       enum pipe pipe)
1177 {
1178         u32 val;
1179
1180         /* ILK FDI PLL is always enabled */
1181         if (IS_GEN(dev_priv, 5))
1182                 return;
1183
1184         /* On Haswell, DDI ports are responsible for the FDI PLL setup */
1185         if (HAS_DDI(dev_priv))
1186                 return;
1187
1188         val = intel_de_read(dev_priv, FDI_TX_CTL(pipe));
1189         I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1190 }
1191
1192 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1193                        enum pipe pipe, bool state)
1194 {
1195         u32 val;
1196         bool cur_state;
1197
1198         val = intel_de_read(dev_priv, FDI_RX_CTL(pipe));
1199         cur_state = !!(val & FDI_RX_PLL_ENABLE);
1200         I915_STATE_WARN(cur_state != state,
1201              "FDI RX PLL assertion failure (expected %s, current %s)\n",
1202                         onoff(state), onoff(cur_state));
1203 }
1204
1205 void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
1206 {
1207         i915_reg_t pp_reg;
1208         u32 val;
1209         enum pipe panel_pipe = INVALID_PIPE;
1210         bool locked = true;
1211
1212         if (drm_WARN_ON(&dev_priv->drm, HAS_DDI(dev_priv)))
1213                 return;
1214
1215         if (HAS_PCH_SPLIT(dev_priv)) {
1216                 u32 port_sel;
1217
1218                 pp_reg = PP_CONTROL(0);
1219                 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1220
1221                 switch (port_sel) {
1222                 case PANEL_PORT_SELECT_LVDS:
1223                         intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
1224                         break;
1225                 case PANEL_PORT_SELECT_DPA:
1226                         intel_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
1227                         break;
1228                 case PANEL_PORT_SELECT_DPC:
1229                         intel_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
1230                         break;
1231                 case PANEL_PORT_SELECT_DPD:
1232                         intel_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
1233                         break;
1234                 default:
1235                         MISSING_CASE(port_sel);
1236                         break;
1237                 }
1238         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1239                 /* presumably write lock depends on pipe, not port select */
1240                 pp_reg = PP_CONTROL(pipe);
1241                 panel_pipe = pipe;
1242         } else {
1243                 u32 port_sel;
1244
1245                 pp_reg = PP_CONTROL(0);
1246                 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1247
1248                 drm_WARN_ON(&dev_priv->drm,
1249                             port_sel != PANEL_PORT_SELECT_LVDS);
1250                 intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
1251         }
1252
1253         val = intel_de_read(dev_priv, pp_reg);
1254         if (!(val & PANEL_POWER_ON) ||
1255             ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
1256                 locked = false;
1257
1258         I915_STATE_WARN(panel_pipe == pipe && locked,
1259              "panel assertion failure, pipe %c regs locked\n",
1260              pipe_name(pipe));
1261 }
1262
1263 void assert_pipe(struct drm_i915_private *dev_priv,
1264                  enum transcoder cpu_transcoder, bool state)
1265 {
1266         bool cur_state;
1267         enum intel_display_power_domain power_domain;
1268         intel_wakeref_t wakeref;
1269
1270         /* we keep both pipes enabled on 830 */
1271         if (IS_I830(dev_priv))
1272                 state = true;
1273
1274         power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
1275         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
1276         if (wakeref) {
1277                 u32 val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
1278                 cur_state = !!(val & PIPECONF_ENABLE);
1279
1280                 intel_display_power_put(dev_priv, power_domain, wakeref);
1281         } else {
1282                 cur_state = false;
1283         }
1284
1285         I915_STATE_WARN(cur_state != state,
1286                         "transcoder %s assertion failure (expected %s, current %s)\n",
1287                         transcoder_name(cpu_transcoder),
1288                         onoff(state), onoff(cur_state));
1289 }
1290
1291 static void assert_plane(struct intel_plane *plane, bool state)
1292 {
1293         enum pipe pipe;
1294         bool cur_state;
1295
1296         cur_state = plane->get_hw_state(plane, &pipe);
1297
1298         I915_STATE_WARN(cur_state != state,
1299                         "%s assertion failure (expected %s, current %s)\n",
1300                         plane->base.name, onoff(state), onoff(cur_state));
1301 }
1302
1303 #define assert_plane_enabled(p) assert_plane(p, true)
1304 #define assert_plane_disabled(p) assert_plane(p, false)
1305
1306 static void assert_planes_disabled(struct intel_crtc *crtc)
1307 {
1308         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1309         struct intel_plane *plane;
1310
1311         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
1312                 assert_plane_disabled(plane);
1313 }
1314
1315 static void assert_vblank_disabled(struct drm_crtc *crtc)
1316 {
1317         if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
1318                 drm_crtc_vblank_put(crtc);
1319 }
1320
1321 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1322                                     enum pipe pipe)
1323 {
1324         u32 val;
1325         bool enabled;
1326
1327         val = intel_de_read(dev_priv, PCH_TRANSCONF(pipe));
1328         enabled = !!(val & TRANS_ENABLE);
1329         I915_STATE_WARN(enabled,
1330              "transcoder assertion failed, should be off on pipe %c but is still active\n",
1331              pipe_name(pipe));
1332 }
1333
1334 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1335                                    enum pipe pipe, enum port port,
1336                                    i915_reg_t dp_reg)
1337 {
1338         enum pipe port_pipe;
1339         bool state;
1340
1341         state = intel_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
1342
1343         I915_STATE_WARN(state && port_pipe == pipe,
1344                         "PCH DP %c enabled on transcoder %c, should be disabled\n",
1345                         port_name(port), pipe_name(pipe));
1346
1347         I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1348                         "IBX PCH DP %c still using transcoder B\n",
1349                         port_name(port));
1350 }
1351
1352 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1353                                      enum pipe pipe, enum port port,
1354                                      i915_reg_t hdmi_reg)
1355 {
1356         enum pipe port_pipe;
1357         bool state;
1358
1359         state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
1360
1361         I915_STATE_WARN(state && port_pipe == pipe,
1362                         "PCH HDMI %c enabled on transcoder %c, should be disabled\n",
1363                         port_name(port), pipe_name(pipe));
1364
1365         I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1366                         "IBX PCH HDMI %c still using transcoder B\n",
1367                         port_name(port));
1368 }
1369
1370 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1371                                       enum pipe pipe)
1372 {
1373         enum pipe port_pipe;
1374
1375         assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
1376         assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
1377         assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
1378
1379         I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) &&
1380                         port_pipe == pipe,
1381                         "PCH VGA enabled on transcoder %c, should be disabled\n",
1382                         pipe_name(pipe));
1383
1384         I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) &&
1385                         port_pipe == pipe,
1386                         "PCH LVDS enabled on transcoder %c, should be disabled\n",
1387                         pipe_name(pipe));
1388
1389         /* PCH SDVOB multiplex with HDMIB */
1390         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
1391         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
1392         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
1393 }
1394
1395 static void _vlv_enable_pll(struct intel_crtc *crtc,
1396                             const struct intel_crtc_state *pipe_config)
1397 {
1398         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1399         enum pipe pipe = crtc->pipe;
1400
1401         intel_de_write(dev_priv, DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1402         intel_de_posting_read(dev_priv, DPLL(pipe));
1403         udelay(150);
1404
1405         if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
1406                 drm_err(&dev_priv->drm, "DPLL %d failed to lock\n", pipe);
1407 }
1408
1409 static void vlv_enable_pll(struct intel_crtc *crtc,
1410                            const struct intel_crtc_state *pipe_config)
1411 {
1412         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1413         enum pipe pipe = crtc->pipe;
1414
1415         assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
1416
1417         /* PLL is protected by panel, make sure we can write it */
1418         assert_panel_unlocked(dev_priv, pipe);
1419
1420         if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1421                 _vlv_enable_pll(crtc, pipe_config);
1422
1423         intel_de_write(dev_priv, DPLL_MD(pipe),
1424                        pipe_config->dpll_hw_state.dpll_md);
1425         intel_de_posting_read(dev_priv, DPLL_MD(pipe));
1426 }
1427
1428
1429 static void _chv_enable_pll(struct intel_crtc *crtc,
1430                             const struct intel_crtc_state *pipe_config)
1431 {
1432         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1433         enum pipe pipe = crtc->pipe;
1434         enum dpio_channel port = vlv_pipe_to_channel(pipe);
1435         u32 tmp;
1436
1437         vlv_dpio_get(dev_priv);
1438
1439         /* Enable back the 10bit clock to display controller */
1440         tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1441         tmp |= DPIO_DCLKP_EN;
1442         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1443
1444         vlv_dpio_put(dev_priv);
1445
1446         /*
1447          * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1448          */
1449         udelay(1);
1450
1451         /* Enable PLL */
1452         intel_de_write(dev_priv, DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1453
1454         /* Check PLL is locked */
1455         if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
1456                 drm_err(&dev_priv->drm, "PLL %d failed to lock\n", pipe);
1457 }
1458
1459 static void chv_enable_pll(struct intel_crtc *crtc,
1460                            const struct intel_crtc_state *pipe_config)
1461 {
1462         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1463         enum pipe pipe = crtc->pipe;
1464
1465         assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
1466
1467         /* PLL is protected by panel, make sure we can write it */
1468         assert_panel_unlocked(dev_priv, pipe);
1469
1470         if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1471                 _chv_enable_pll(crtc, pipe_config);
1472
1473         if (pipe != PIPE_A) {
1474                 /*
1475                  * WaPixelRepeatModeFixForC0:chv
1476                  *
1477                  * DPLLCMD is AWOL. Use chicken bits to propagate
1478                  * the value from DPLLBMD to either pipe B or C.
1479                  */
1480                 intel_de_write(dev_priv, CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
1481                 intel_de_write(dev_priv, DPLL_MD(PIPE_B),
1482                                pipe_config->dpll_hw_state.dpll_md);
1483                 intel_de_write(dev_priv, CBR4_VLV, 0);
1484                 dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
1485
1486                 /*
1487                  * DPLLB VGA mode also seems to cause problems.
1488                  * We should always have it disabled.
1489                  */
1490                 drm_WARN_ON(&dev_priv->drm,
1491                             (intel_de_read(dev_priv, DPLL(PIPE_B)) &
1492                              DPLL_VGA_MODE_DIS) == 0);
1493         } else {
1494                 intel_de_write(dev_priv, DPLL_MD(pipe),
1495                                pipe_config->dpll_hw_state.dpll_md);
1496                 intel_de_posting_read(dev_priv, DPLL_MD(pipe));
1497         }
1498 }
1499
1500 static bool i9xx_has_pps(struct drm_i915_private *dev_priv)
1501 {
1502         if (IS_I830(dev_priv))
1503                 return false;
1504
1505         return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
1506 }
1507
1508 static void i9xx_enable_pll(struct intel_crtc *crtc,
1509                             const struct intel_crtc_state *crtc_state)
1510 {
1511         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1512         i915_reg_t reg = DPLL(crtc->pipe);
1513         u32 dpll = crtc_state->dpll_hw_state.dpll;
1514         int i;
1515
1516         assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
1517
1518         /* PLL is protected by panel, make sure we can write it */
1519         if (i9xx_has_pps(dev_priv))
1520                 assert_panel_unlocked(dev_priv, crtc->pipe);
1521
1522         /*
1523          * Apparently we need to have VGA mode enabled prior to changing
1524          * the P1/P2 dividers. Otherwise the DPLL will keep using the old
1525          * dividers, even though the register value does change.
1526          */
1527         intel_de_write(dev_priv, reg, dpll & ~DPLL_VGA_MODE_DIS);
1528         intel_de_write(dev_priv, reg, dpll);
1529
1530         /* Wait for the clocks to stabilize. */
1531         intel_de_posting_read(dev_priv, reg);
1532         udelay(150);
1533
1534         if (INTEL_GEN(dev_priv) >= 4) {
1535                 intel_de_write(dev_priv, DPLL_MD(crtc->pipe),
1536                                crtc_state->dpll_hw_state.dpll_md);
1537         } else {
1538                 /* The pixel multiplier can only be updated once the
1539                  * DPLL is enabled and the clocks are stable.
1540                  *
1541                  * So write it again.
1542                  */
1543                 intel_de_write(dev_priv, reg, dpll);
1544         }
1545
1546         /* We do this three times for luck */
1547         for (i = 0; i < 3; i++) {
1548                 intel_de_write(dev_priv, reg, dpll);
1549                 intel_de_posting_read(dev_priv, reg);
1550                 udelay(150); /* wait for warmup */
1551         }
1552 }
1553
1554 static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
1555 {
1556         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1557         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1558         enum pipe pipe = crtc->pipe;
1559
1560         /* Don't disable pipe or pipe PLLs if needed */
1561         if (IS_I830(dev_priv))
1562                 return;
1563
1564         /* Make sure the pipe isn't still relying on us */
1565         assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
1566
1567         intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS);
1568         intel_de_posting_read(dev_priv, DPLL(pipe));
1569 }
1570
1571 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1572 {
1573         u32 val;
1574
1575         /* Make sure the pipe isn't still relying on us */
1576         assert_pipe_disabled(dev_priv, (enum transcoder)pipe);
1577
1578         val = DPLL_INTEGRATED_REF_CLK_VLV |
1579                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1580         if (pipe != PIPE_A)
1581                 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1582
1583         intel_de_write(dev_priv, DPLL(pipe), val);
1584         intel_de_posting_read(dev_priv, DPLL(pipe));
1585 }
1586
1587 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1588 {
1589         enum dpio_channel port = vlv_pipe_to_channel(pipe);
1590         u32 val;
1591
1592         /* Make sure the pipe isn't still relying on us */
1593         assert_pipe_disabled(dev_priv, (enum transcoder)pipe);
1594
1595         val = DPLL_SSC_REF_CLK_CHV |
1596                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1597         if (pipe != PIPE_A)
1598                 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1599
1600         intel_de_write(dev_priv, DPLL(pipe), val);
1601         intel_de_posting_read(dev_priv, DPLL(pipe));
1602
1603         vlv_dpio_get(dev_priv);
1604
1605         /* Disable 10bit clock to display controller */
1606         val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1607         val &= ~DPIO_DCLKP_EN;
1608         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1609
1610         vlv_dpio_put(dev_priv);
1611 }
1612
1613 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1614                          struct intel_digital_port *dport,
1615                          unsigned int expected_mask)
1616 {
1617         u32 port_mask;
1618         i915_reg_t dpll_reg;
1619
1620         switch (dport->base.port) {
1621         case PORT_B:
1622                 port_mask = DPLL_PORTB_READY_MASK;
1623                 dpll_reg = DPLL(0);
1624                 break;
1625         case PORT_C:
1626                 port_mask = DPLL_PORTC_READY_MASK;
1627                 dpll_reg = DPLL(0);
1628                 expected_mask <<= 4;
1629                 break;
1630         case PORT_D:
1631                 port_mask = DPLL_PORTD_READY_MASK;
1632                 dpll_reg = DPIO_PHY_STATUS;
1633                 break;
1634         default:
1635                 BUG();
1636         }
1637
1638         if (intel_de_wait_for_register(dev_priv, dpll_reg,
1639                                        port_mask, expected_mask, 1000))
1640                 drm_WARN(&dev_priv->drm, 1,
1641                          "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
1642                          dport->base.base.base.id, dport->base.base.name,
1643                          intel_de_read(dev_priv, dpll_reg) & port_mask,
1644                          expected_mask);
1645 }
1646
1647 static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
1648 {
1649         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1650         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1651         enum pipe pipe = crtc->pipe;
1652         i915_reg_t reg;
1653         u32 val, pipeconf_val;
1654
1655         /* Make sure PCH DPLL is enabled */
1656         assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll);
1657
1658         /* FDI must be feeding us bits for PCH ports */
1659         assert_fdi_tx_enabled(dev_priv, pipe);
1660         assert_fdi_rx_enabled(dev_priv, pipe);
1661
1662         if (HAS_PCH_CPT(dev_priv)) {
1663                 reg = TRANS_CHICKEN2(pipe);
1664                 val = intel_de_read(dev_priv, reg);
1665                 /*
1666                  * Workaround: Set the timing override bit
1667                  * before enabling the pch transcoder.
1668                  */
1669                 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1670                 /* Configure frame start delay to match the CPU */
1671                 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
1672                 val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
1673                 intel_de_write(dev_priv, reg, val);
1674         }
1675
1676         reg = PCH_TRANSCONF(pipe);
1677         val = intel_de_read(dev_priv, reg);
1678         pipeconf_val = intel_de_read(dev_priv, PIPECONF(pipe));
1679
1680         if (HAS_PCH_IBX(dev_priv)) {
1681                 /* Configure frame start delay to match the CPU */
1682                 val &= ~TRANS_FRAME_START_DELAY_MASK;
1683                 val |= TRANS_FRAME_START_DELAY(0);
1684
1685                 /*
1686                  * Make the BPC in transcoder be consistent with
1687                  * that in pipeconf reg. For HDMI we must use 8bpc
1688                  * here for both 8bpc and 12bpc.
1689                  */
1690                 val &= ~PIPECONF_BPC_MASK;
1691                 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1692                         val |= PIPECONF_8BPC;
1693                 else
1694                         val |= pipeconf_val & PIPECONF_BPC_MASK;
1695         }
1696
1697         val &= ~TRANS_INTERLACE_MASK;
1698         if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) {
1699                 if (HAS_PCH_IBX(dev_priv) &&
1700                     intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
1701                         val |= TRANS_LEGACY_INTERLACED_ILK;
1702                 else
1703                         val |= TRANS_INTERLACED;
1704         } else {
1705                 val |= TRANS_PROGRESSIVE;
1706         }
1707
1708         intel_de_write(dev_priv, reg, val | TRANS_ENABLE);
1709         if (intel_de_wait_for_set(dev_priv, reg, TRANS_STATE_ENABLE, 100))
1710                 drm_err(&dev_priv->drm, "failed to enable transcoder %c\n",
1711                         pipe_name(pipe));
1712 }
1713
1714 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1715                                       enum transcoder cpu_transcoder)
1716 {
1717         u32 val, pipeconf_val;
1718
1719         /* FDI must be feeding us bits for PCH ports */
1720         assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
1721         assert_fdi_rx_enabled(dev_priv, PIPE_A);
1722
1723         val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
1724         /* Workaround: set timing override bit. */
1725         val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1726         /* Configure frame start delay to match the CPU */
1727         val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
1728         val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
1729         intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
1730
1731         val = TRANS_ENABLE;
1732         pipeconf_val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
1733
1734         if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1735             PIPECONF_INTERLACED_ILK)
1736                 val |= TRANS_INTERLACED;
1737         else
1738                 val |= TRANS_PROGRESSIVE;
1739
1740         intel_de_write(dev_priv, LPT_TRANSCONF, val);
1741         if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF,
1742                                   TRANS_STATE_ENABLE, 100))
1743                 drm_err(&dev_priv->drm, "Failed to enable PCH transcoder\n");
1744 }
1745
1746 static void ilk_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1747                                        enum pipe pipe)
1748 {
1749         i915_reg_t reg;
1750         u32 val;
1751
1752         /* FDI relies on the transcoder */
1753         assert_fdi_tx_disabled(dev_priv, pipe);
1754         assert_fdi_rx_disabled(dev_priv, pipe);
1755
1756         /* Ports must be off as well */
1757         assert_pch_ports_disabled(dev_priv, pipe);
1758
1759         reg = PCH_TRANSCONF(pipe);
1760         val = intel_de_read(dev_priv, reg);
1761         val &= ~TRANS_ENABLE;
1762         intel_de_write(dev_priv, reg, val);
1763         /* wait for PCH transcoder off, transcoder state */
1764         if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50))
1765                 drm_err(&dev_priv->drm, "failed to disable transcoder %c\n",
1766                         pipe_name(pipe));
1767
1768         if (HAS_PCH_CPT(dev_priv)) {
1769                 /* Workaround: Clear the timing override chicken bit again. */
1770                 reg = TRANS_CHICKEN2(pipe);
1771                 val = intel_de_read(dev_priv, reg);
1772                 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1773                 intel_de_write(dev_priv, reg, val);
1774         }
1775 }
1776
1777 void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1778 {
1779         u32 val;
1780
1781         val = intel_de_read(dev_priv, LPT_TRANSCONF);
1782         val &= ~TRANS_ENABLE;
1783         intel_de_write(dev_priv, LPT_TRANSCONF, val);
1784         /* wait for PCH transcoder off, transcoder state */
1785         if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF,
1786                                     TRANS_STATE_ENABLE, 50))
1787                 drm_err(&dev_priv->drm, "Failed to disable PCH transcoder\n");
1788
1789         /* Workaround: clear timing override bit. */
1790         val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
1791         val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1792         intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
1793 }
1794
1795 enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
1796 {
1797         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1798
1799         if (HAS_PCH_LPT(dev_priv))
1800                 return PIPE_A;
1801         else
1802                 return crtc->pipe;
1803 }
1804
1805 static u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state)
1806 {
1807         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1808
1809         /*
1810          * On i965gm the hardware frame counter reads
1811          * zero when the TV encoder is enabled :(
1812          */
1813         if (IS_I965GM(dev_priv) &&
1814             (crtc_state->output_types & BIT(INTEL_OUTPUT_TVOUT)))
1815                 return 0;
1816
1817         if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
1818                 return 0xffffffff; /* full 32 bit counter */
1819         else if (INTEL_GEN(dev_priv) >= 3)
1820                 return 0xffffff; /* only 24 bits of frame count */
1821         else
1822                 return 0; /* Gen2 doesn't have a hardware frame counter */
1823 }
1824
1825 void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state)
1826 {
1827         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1828
1829         assert_vblank_disabled(&crtc->base);
1830         drm_crtc_set_max_vblank_count(&crtc->base,
1831                                       intel_crtc_max_vblank_count(crtc_state));
1832         drm_crtc_vblank_on(&crtc->base);
1833 }
1834
1835 void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state)
1836 {
1837         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1838
1839         drm_crtc_vblank_off(&crtc->base);
1840         assert_vblank_disabled(&crtc->base);
1841 }
1842
1843 void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
1844 {
1845         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
1846         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1847         enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
1848         enum pipe pipe = crtc->pipe;
1849         i915_reg_t reg;
1850         u32 val;
1851
1852         drm_dbg_kms(&dev_priv->drm, "enabling pipe %c\n", pipe_name(pipe));
1853
1854         assert_planes_disabled(crtc);
1855
1856         /*
1857          * A pipe without a PLL won't actually be able to drive bits from
1858          * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
1859          * need the check.
1860          */
1861         if (HAS_GMCH(dev_priv)) {
1862                 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
1863                         assert_dsi_pll_enabled(dev_priv);
1864                 else
1865                         assert_pll_enabled(dev_priv, pipe);
1866         } else {
1867                 if (new_crtc_state->has_pch_encoder) {
1868                         /* if driving the PCH, we need FDI enabled */
1869                         assert_fdi_rx_pll_enabled(dev_priv,
1870                                                   intel_crtc_pch_transcoder(crtc));
1871                         assert_fdi_tx_pll_enabled(dev_priv,
1872                                                   (enum pipe) cpu_transcoder);
1873                 }
1874                 /* FIXME: assert CPU port conditions for SNB+ */
1875         }
1876
1877         trace_intel_pipe_enable(crtc);
1878
1879         reg = PIPECONF(cpu_transcoder);
1880         val = intel_de_read(dev_priv, reg);
1881         if (val & PIPECONF_ENABLE) {
1882                 /* we keep both pipes enabled on 830 */
1883                 drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv));
1884                 return;
1885         }
1886
1887         intel_de_write(dev_priv, reg, val | PIPECONF_ENABLE);
1888         intel_de_posting_read(dev_priv, reg);
1889
1890         /*
1891          * Until the pipe starts PIPEDSL reads will return a stale value,
1892          * which causes an apparent vblank timestamp jump when PIPEDSL
1893          * resets to its proper value. That also messes up the frame count
1894          * when it's derived from the timestamps. So let's wait for the
1895          * pipe to start properly before we call drm_crtc_vblank_on()
1896          */
1897         if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
1898                 intel_wait_for_pipe_scanline_moving(crtc);
1899 }
1900
1901 void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
1902 {
1903         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
1904         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1905         enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1906         enum pipe pipe = crtc->pipe;
1907         i915_reg_t reg;
1908         u32 val;
1909
1910         drm_dbg_kms(&dev_priv->drm, "disabling pipe %c\n", pipe_name(pipe));
1911
1912         /*
1913          * Make sure planes won't keep trying to pump pixels to us,
1914          * or we might hang the display.
1915          */
1916         assert_planes_disabled(crtc);
1917
1918         trace_intel_pipe_disable(crtc);
1919
1920         reg = PIPECONF(cpu_transcoder);
1921         val = intel_de_read(dev_priv, reg);
1922         if ((val & PIPECONF_ENABLE) == 0)
1923                 return;
1924
1925         /*
1926          * Double wide has implications for planes
1927          * so best keep it disabled when not needed.
1928          */
1929         if (old_crtc_state->double_wide)
1930                 val &= ~PIPECONF_DOUBLE_WIDE;
1931
1932         /* Don't disable pipe or pipe PLLs if needed */
1933         if (!IS_I830(dev_priv))
1934                 val &= ~PIPECONF_ENABLE;
1935
1936         intel_de_write(dev_priv, reg, val);
1937         if ((val & PIPECONF_ENABLE) == 0)
1938                 intel_wait_for_pipe_off(old_crtc_state);
1939 }
1940
1941 static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
1942 {
1943         return IS_GEN(dev_priv, 2) ? 2048 : 4096;
1944 }
1945
1946 static bool is_ccs_plane(const struct drm_framebuffer *fb, int plane)
1947 {
1948         if (!is_ccs_modifier(fb->modifier))
1949                 return false;
1950
1951         return plane >= fb->format->num_planes / 2;
1952 }
1953
1954 static bool is_gen12_ccs_modifier(u64 modifier)
1955 {
1956         return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
1957                modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS;
1958
1959 }
1960
1961 static bool is_gen12_ccs_plane(const struct drm_framebuffer *fb, int plane)
1962 {
1963         return is_gen12_ccs_modifier(fb->modifier) && is_ccs_plane(fb, plane);
1964 }
1965
1966 static bool is_aux_plane(const struct drm_framebuffer *fb, int plane)
1967 {
1968         if (is_ccs_modifier(fb->modifier))
1969                 return is_ccs_plane(fb, plane);
1970
1971         return plane == 1;
1972 }
1973
1974 static int main_to_ccs_plane(const struct drm_framebuffer *fb, int main_plane)
1975 {
1976         drm_WARN_ON(fb->dev, !is_ccs_modifier(fb->modifier) ||
1977                     (main_plane && main_plane >= fb->format->num_planes / 2));
1978
1979         return fb->format->num_planes / 2 + main_plane;
1980 }
1981
1982 static int ccs_to_main_plane(const struct drm_framebuffer *fb, int ccs_plane)
1983 {
1984         drm_WARN_ON(fb->dev, !is_ccs_modifier(fb->modifier) ||
1985                     ccs_plane < fb->format->num_planes / 2);
1986
1987         return ccs_plane - fb->format->num_planes / 2;
1988 }
1989
1990 /* Return either the main plane's CCS or - if not a CCS FB - UV plane */
1991 int intel_main_to_aux_plane(const struct drm_framebuffer *fb, int main_plane)
1992 {
1993         if (is_ccs_modifier(fb->modifier))
1994                 return main_to_ccs_plane(fb, main_plane);
1995
1996         return 1;
1997 }
1998
1999 bool
2000 intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info,
2001                                     uint64_t modifier)
2002 {
2003         return info->is_yuv &&
2004                info->num_planes == (is_ccs_modifier(modifier) ? 4 : 2);
2005 }
2006
2007 static bool is_semiplanar_uv_plane(const struct drm_framebuffer *fb,
2008                                    int color_plane)
2009 {
2010         return intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) &&
2011                color_plane == 1;
2012 }
2013
2014 static unsigned int
2015 intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
2016 {
2017         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2018         unsigned int cpp = fb->format->cpp[color_plane];
2019
2020         switch (fb->modifier) {
2021         case DRM_FORMAT_MOD_LINEAR:
2022                 return intel_tile_size(dev_priv);
2023         case I915_FORMAT_MOD_X_TILED:
2024                 if (IS_GEN(dev_priv, 2))
2025                         return 128;
2026                 else
2027                         return 512;
2028         case I915_FORMAT_MOD_Y_TILED_CCS:
2029                 if (is_ccs_plane(fb, color_plane))
2030                         return 128;
2031                 /* fall through */
2032         case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
2033         case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
2034                 if (is_ccs_plane(fb, color_plane))
2035                         return 64;
2036                 /* fall through */
2037         case I915_FORMAT_MOD_Y_TILED:
2038                 if (IS_GEN(dev_priv, 2) || HAS_128_BYTE_Y_TILING(dev_priv))
2039                         return 128;
2040                 else
2041                         return 512;
2042         case I915_FORMAT_MOD_Yf_TILED_CCS:
2043                 if (is_ccs_plane(fb, color_plane))
2044                         return 128;
2045                 /* fall through */
2046         case I915_FORMAT_MOD_Yf_TILED:
2047                 switch (cpp) {
2048                 case 1:
2049                         return 64;
2050                 case 2:
2051                 case 4:
2052                         return 128;
2053                 case 8:
2054                 case 16:
2055                         return 256;
2056                 default:
2057                         MISSING_CASE(cpp);
2058                         return cpp;
2059                 }
2060                 break;
2061         default:
2062                 MISSING_CASE(fb->modifier);
2063                 return cpp;
2064         }
2065 }
2066
2067 static unsigned int
2068 intel_tile_height(const struct drm_framebuffer *fb, int color_plane)
2069 {
2070         if (is_gen12_ccs_plane(fb, color_plane))
2071                 return 1;
2072
2073         return intel_tile_size(to_i915(fb->dev)) /
2074                 intel_tile_width_bytes(fb, color_plane);
2075 }
2076
2077 /* Return the tile dimensions in pixel units */
2078 static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane,
2079                             unsigned int *tile_width,
2080                             unsigned int *tile_height)
2081 {
2082         unsigned int tile_width_bytes = intel_tile_width_bytes(fb, color_plane);
2083         unsigned int cpp = fb->format->cpp[color_plane];
2084
2085         *tile_width = tile_width_bytes / cpp;
2086         *tile_height = intel_tile_height(fb, color_plane);
2087 }
2088
2089 static unsigned int intel_tile_row_size(const struct drm_framebuffer *fb,
2090                                         int color_plane)
2091 {
2092         unsigned int tile_width, tile_height;
2093
2094         intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2095
2096         return fb->pitches[color_plane] * tile_height;
2097 }
2098
2099 unsigned int
2100 intel_fb_align_height(const struct drm_framebuffer *fb,
2101                       int color_plane, unsigned int height)
2102 {
2103         unsigned int tile_height = intel_tile_height(fb, color_plane);
2104
2105         return ALIGN(height, tile_height);
2106 }
2107
2108 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
2109 {
2110         unsigned int size = 0;
2111         int i;
2112
2113         for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
2114                 size += rot_info->plane[i].width * rot_info->plane[i].height;
2115
2116         return size;
2117 }
2118
2119 unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info)
2120 {
2121         unsigned int size = 0;
2122         int i;
2123
2124         for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++)
2125                 size += rem_info->plane[i].width * rem_info->plane[i].height;
2126
2127         return size;
2128 }
2129
2130 static void
2131 intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
2132                         const struct drm_framebuffer *fb,
2133                         unsigned int rotation)
2134 {
2135         view->type = I915_GGTT_VIEW_NORMAL;
2136         if (drm_rotation_90_or_270(rotation)) {
2137                 view->type = I915_GGTT_VIEW_ROTATED;
2138                 view->rotated = to_intel_framebuffer(fb)->rot_info;
2139         }
2140 }
2141
2142 static unsigned int intel_cursor_alignment(const struct drm_i915_private *dev_priv)
2143 {
2144         if (IS_I830(dev_priv))
2145                 return 16 * 1024;
2146         else if (IS_I85X(dev_priv))
2147                 return 256;
2148         else if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
2149                 return 32;
2150         else
2151                 return 4 * 1024;
2152 }
2153
2154 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
2155 {
2156         if (INTEL_GEN(dev_priv) >= 9)
2157                 return 256 * 1024;
2158         else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
2159                  IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2160                 return 128 * 1024;
2161         else if (INTEL_GEN(dev_priv) >= 4)
2162                 return 4 * 1024;
2163         else
2164                 return 0;
2165 }
2166
2167 static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
2168                                          int color_plane)
2169 {
2170         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2171
2172         /* AUX_DIST needs only 4K alignment */
2173         if ((INTEL_GEN(dev_priv) < 12 && is_aux_plane(fb, color_plane)) ||
2174             is_ccs_plane(fb, color_plane))
2175                 return 4096;
2176
2177         switch (fb->modifier) {
2178         case DRM_FORMAT_MOD_LINEAR:
2179                 return intel_linear_alignment(dev_priv);
2180         case I915_FORMAT_MOD_X_TILED:
2181                 if (INTEL_GEN(dev_priv) >= 9)
2182                         return 256 * 1024;
2183                 return 0;
2184         case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
2185                 if (is_semiplanar_uv_plane(fb, color_plane))
2186                         return intel_tile_row_size(fb, color_plane);
2187                 /* Fall-through */
2188         case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
2189                 return 16 * 1024;
2190         case I915_FORMAT_MOD_Y_TILED_CCS:
2191         case I915_FORMAT_MOD_Yf_TILED_CCS:
2192         case I915_FORMAT_MOD_Y_TILED:
2193                 if (INTEL_GEN(dev_priv) >= 12 &&
2194                     is_semiplanar_uv_plane(fb, color_plane))
2195                         return intel_tile_row_size(fb, color_plane);
2196                 /* Fall-through */
2197         case I915_FORMAT_MOD_Yf_TILED:
2198                 return 1 * 1024 * 1024;
2199         default:
2200                 MISSING_CASE(fb->modifier);
2201                 return 0;
2202         }
2203 }
2204
2205 static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
2206 {
2207         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
2208         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
2209
2210         return INTEL_GEN(dev_priv) < 4 ||
2211                 (plane->has_fbc &&
2212                  plane_state->view.type == I915_GGTT_VIEW_NORMAL);
2213 }
2214
2215 struct i915_vma *
2216 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
2217                            const struct i915_ggtt_view *view,
2218                            bool uses_fence,
2219                            unsigned long *out_flags)
2220 {
2221         struct drm_device *dev = fb->dev;
2222         struct drm_i915_private *dev_priv = to_i915(dev);
2223         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2224         intel_wakeref_t wakeref;
2225         struct i915_vma *vma;
2226         unsigned int pinctl;
2227         u32 alignment;
2228
2229         if (drm_WARN_ON(dev, !i915_gem_object_is_framebuffer(obj)))
2230                 return ERR_PTR(-EINVAL);
2231
2232         alignment = intel_surf_alignment(fb, 0);
2233         if (drm_WARN_ON(dev, alignment && !is_power_of_2(alignment)))
2234                 return ERR_PTR(-EINVAL);
2235
2236         /* Note that the w/a also requires 64 PTE of padding following the
2237          * bo. We currently fill all unused PTE with the shadow page and so
2238          * we should always have valid PTE following the scanout preventing
2239          * the VT-d warning.
2240          */
2241         if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
2242                 alignment = 256 * 1024;
2243
2244         /*
2245          * Global gtt pte registers are special registers which actually forward
2246          * writes to a chunk of system memory. Which means that there is no risk
2247          * that the register values disappear as soon as we call
2248          * intel_runtime_pm_put(), so it is correct to wrap only the
2249          * pin/unpin/fence and not more.
2250          */
2251         wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2252
2253         atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
2254
2255         /*
2256          * Valleyview is definitely limited to scanning out the first
2257          * 512MiB. Lets presume this behaviour was inherited from the
2258          * g4x display engine and that all earlier gen are similarly
2259          * limited. Testing suggests that it is a little more
2260          * complicated than this. For example, Cherryview appears quite
2261          * happy to scanout from anywhere within its global aperture.
2262          */
2263         pinctl = 0;
2264         if (HAS_GMCH(dev_priv))
2265                 pinctl |= PIN_MAPPABLE;
2266
2267         vma = i915_gem_object_pin_to_display_plane(obj,
2268                                                    alignment, view, pinctl);
2269         if (IS_ERR(vma))
2270                 goto err;
2271
2272         if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
2273                 int ret;
2274
2275                 /*
2276                  * Install a fence for tiled scan-out. Pre-i965 always needs a
2277                  * fence, whereas 965+ only requires a fence if using
2278                  * framebuffer compression.  For simplicity, we always, when
2279                  * possible, install a fence as the cost is not that onerous.
2280                  *
2281                  * If we fail to fence the tiled scanout, then either the
2282                  * modeset will reject the change (which is highly unlikely as
2283                  * the affected systems, all but one, do not have unmappable
2284                  * space) or we will not be able to enable full powersaving
2285                  * techniques (also likely not to apply due to various limits
2286                  * FBC and the like impose on the size of the buffer, which
2287                  * presumably we violated anyway with this unmappable buffer).
2288                  * Anyway, it is presumably better to stumble onwards with
2289                  * something and try to run the system in a "less than optimal"
2290                  * mode that matches the user configuration.
2291                  */
2292                 ret = i915_vma_pin_fence(vma);
2293                 if (ret != 0 && INTEL_GEN(dev_priv) < 4) {
2294                         i915_gem_object_unpin_from_display_plane(vma);
2295                         vma = ERR_PTR(ret);
2296                         goto err;
2297                 }
2298
2299                 if (ret == 0 && vma->fence)
2300                         *out_flags |= PLANE_HAS_FENCE;
2301         }
2302
2303         i915_vma_get(vma);
2304 err:
2305         atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
2306         intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2307         return vma;
2308 }
2309
2310 void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
2311 {
2312         i915_gem_object_lock(vma->obj);
2313         if (flags & PLANE_HAS_FENCE)
2314                 i915_vma_unpin_fence(vma);
2315         i915_gem_object_unpin_from_display_plane(vma);
2316         i915_gem_object_unlock(vma->obj);
2317
2318         i915_vma_put(vma);
2319 }
2320
2321 static int intel_fb_pitch(const struct drm_framebuffer *fb, int color_plane,
2322                           unsigned int rotation)
2323 {
2324         if (drm_rotation_90_or_270(rotation))
2325                 return to_intel_framebuffer(fb)->rotated[color_plane].pitch;
2326         else
2327                 return fb->pitches[color_plane];
2328 }
2329
2330 /*
2331  * Convert the x/y offsets into a linear offset.
2332  * Only valid with 0/180 degree rotation, which is fine since linear
2333  * offset is only used with linear buffers on pre-hsw and tiled buffers
2334  * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
2335  */
2336 u32 intel_fb_xy_to_linear(int x, int y,
2337                           const struct intel_plane_state *state,
2338                           int color_plane)
2339 {
2340         const struct drm_framebuffer *fb = state->hw.fb;
2341         unsigned int cpp = fb->format->cpp[color_plane];
2342         unsigned int pitch = state->color_plane[color_plane].stride;
2343
2344         return y * pitch + x * cpp;
2345 }
2346
2347 /*
2348  * Add the x/y offsets derived from fb->offsets[] to the user
2349  * specified plane src x/y offsets. The resulting x/y offsets
2350  * specify the start of scanout from the beginning of the gtt mapping.
2351  */
2352 void intel_add_fb_offsets(int *x, int *y,
2353                           const struct intel_plane_state *state,
2354                           int color_plane)
2355
2356 {
2357         *x += state->color_plane[color_plane].x;
2358         *y += state->color_plane[color_plane].y;
2359 }
2360
2361 static u32 intel_adjust_tile_offset(int *x, int *y,
2362                                     unsigned int tile_width,
2363                                     unsigned int tile_height,
2364                                     unsigned int tile_size,
2365                                     unsigned int pitch_tiles,
2366                                     u32 old_offset,
2367                                     u32 new_offset)
2368 {
2369         unsigned int pitch_pixels = pitch_tiles * tile_width;
2370         unsigned int tiles;
2371
2372         WARN_ON(old_offset & (tile_size - 1));
2373         WARN_ON(new_offset & (tile_size - 1));
2374         WARN_ON(new_offset > old_offset);
2375
2376         tiles = (old_offset - new_offset) / tile_size;
2377
2378         *y += tiles / pitch_tiles * tile_height;
2379         *x += tiles % pitch_tiles * tile_width;
2380
2381         /* minimize x in case it got needlessly big */
2382         *y += *x / pitch_pixels * tile_height;
2383         *x %= pitch_pixels;
2384
2385         return new_offset;
2386 }
2387
2388 static bool is_surface_linear(const struct drm_framebuffer *fb, int color_plane)
2389 {
2390         return fb->modifier == DRM_FORMAT_MOD_LINEAR ||
2391                is_gen12_ccs_plane(fb, color_plane);
2392 }
2393
2394 static u32 intel_adjust_aligned_offset(int *x, int *y,
2395                                        const struct drm_framebuffer *fb,
2396                                        int color_plane,
2397                                        unsigned int rotation,
2398                                        unsigned int pitch,
2399                                        u32 old_offset, u32 new_offset)
2400 {
2401         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2402         unsigned int cpp = fb->format->cpp[color_plane];
2403
2404         drm_WARN_ON(&dev_priv->drm, new_offset > old_offset);
2405
2406         if (!is_surface_linear(fb, color_plane)) {
2407                 unsigned int tile_size, tile_width, tile_height;
2408                 unsigned int pitch_tiles;
2409
2410                 tile_size = intel_tile_size(dev_priv);
2411                 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2412
2413                 if (drm_rotation_90_or_270(rotation)) {
2414                         pitch_tiles = pitch / tile_height;
2415                         swap(tile_width, tile_height);
2416                 } else {
2417                         pitch_tiles = pitch / (tile_width * cpp);
2418                 }
2419
2420                 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2421                                          tile_size, pitch_tiles,
2422                                          old_offset, new_offset);
2423         } else {
2424                 old_offset += *y * pitch + *x * cpp;
2425
2426                 *y = (old_offset - new_offset) / pitch;
2427                 *x = ((old_offset - new_offset) - *y * pitch) / cpp;
2428         }
2429
2430         return new_offset;
2431 }
2432
2433 /*
2434  * Adjust the tile offset by moving the difference into
2435  * the x/y offsets.
2436  */
2437 static u32 intel_plane_adjust_aligned_offset(int *x, int *y,
2438                                              const struct intel_plane_state *state,
2439                                              int color_plane,
2440                                              u32 old_offset, u32 new_offset)
2441 {
2442         return intel_adjust_aligned_offset(x, y, state->hw.fb, color_plane,
2443                                            state->hw.rotation,
2444                                            state->color_plane[color_plane].stride,
2445                                            old_offset, new_offset);
2446 }
2447
2448 /*
2449  * Computes the aligned offset to the base tile and adjusts
2450  * x, y. bytes per pixel is assumed to be a power-of-two.
2451  *
2452  * In the 90/270 rotated case, x and y are assumed
2453  * to be already rotated to match the rotated GTT view, and
2454  * pitch is the tile_height aligned framebuffer height.
2455  *
2456  * This function is used when computing the derived information
2457  * under intel_framebuffer, so using any of that information
2458  * here is not allowed. Anything under drm_framebuffer can be
2459  * used. This is why the user has to pass in the pitch since it
2460  * is specified in the rotated orientation.
2461  */
2462 static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv,
2463                                         int *x, int *y,
2464                                         const struct drm_framebuffer *fb,
2465                                         int color_plane,
2466                                         unsigned int pitch,
2467                                         unsigned int rotation,
2468                                         u32 alignment)
2469 {
2470         unsigned int cpp = fb->format->cpp[color_plane];
2471         u32 offset, offset_aligned;
2472
2473         if (!is_surface_linear(fb, color_plane)) {
2474                 unsigned int tile_size, tile_width, tile_height;
2475                 unsigned int tile_rows, tiles, pitch_tiles;
2476
2477                 tile_size = intel_tile_size(dev_priv);
2478                 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2479
2480                 if (drm_rotation_90_or_270(rotation)) {
2481                         pitch_tiles = pitch / tile_height;
2482                         swap(tile_width, tile_height);
2483                 } else {
2484                         pitch_tiles = pitch / (tile_width * cpp);
2485                 }
2486
2487                 tile_rows = *y / tile_height;
2488                 *y %= tile_height;
2489
2490                 tiles = *x / tile_width;
2491                 *x %= tile_width;
2492
2493                 offset = (tile_rows * pitch_tiles + tiles) * tile_size;
2494
2495                 offset_aligned = offset;
2496                 if (alignment)
2497                         offset_aligned = rounddown(offset_aligned, alignment);
2498
2499                 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2500                                          tile_size, pitch_tiles,
2501                                          offset, offset_aligned);
2502         } else {
2503                 offset = *y * pitch + *x * cpp;
2504                 offset_aligned = offset;
2505                 if (alignment) {
2506                         offset_aligned = rounddown(offset_aligned, alignment);
2507                         *y = (offset % alignment) / pitch;
2508                         *x = ((offset % alignment) - *y * pitch) / cpp;
2509                 } else {
2510                         *y = *x = 0;
2511                 }
2512         }
2513
2514         return offset_aligned;
2515 }
2516
2517 static u32 intel_plane_compute_aligned_offset(int *x, int *y,
2518                                               const struct intel_plane_state *state,
2519                                               int color_plane)
2520 {
2521         struct intel_plane *intel_plane = to_intel_plane(state->uapi.plane);
2522         struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
2523         const struct drm_framebuffer *fb = state->hw.fb;
2524         unsigned int rotation = state->hw.rotation;
2525         int pitch = state->color_plane[color_plane].stride;
2526         u32 alignment;
2527
2528         if (intel_plane->id == PLANE_CURSOR)
2529                 alignment = intel_cursor_alignment(dev_priv);
2530         else
2531                 alignment = intel_surf_alignment(fb, color_plane);
2532
2533         return intel_compute_aligned_offset(dev_priv, x, y, fb, color_plane,
2534                                             pitch, rotation, alignment);
2535 }
2536
2537 /* Convert the fb->offset[] into x/y offsets */
2538 static int intel_fb_offset_to_xy(int *x, int *y,
2539                                  const struct drm_framebuffer *fb,
2540                                  int color_plane)
2541 {
2542         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2543         unsigned int height;
2544         u32 alignment;
2545
2546         if (INTEL_GEN(dev_priv) >= 12 &&
2547             is_semiplanar_uv_plane(fb, color_plane))
2548                 alignment = intel_tile_row_size(fb, color_plane);
2549         else if (fb->modifier != DRM_FORMAT_MOD_LINEAR)
2550                 alignment = intel_tile_size(dev_priv);
2551         else
2552                 alignment = 0;
2553
2554         if (alignment != 0 && fb->offsets[color_plane] % alignment) {
2555                 drm_dbg_kms(&dev_priv->drm,
2556                             "Misaligned offset 0x%08x for color plane %d\n",
2557                             fb->offsets[color_plane], color_plane);
2558                 return -EINVAL;
2559         }
2560
2561         height = drm_framebuffer_plane_height(fb->height, fb, color_plane);
2562         height = ALIGN(height, intel_tile_height(fb, color_plane));
2563
2564         /* Catch potential overflows early */
2565         if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]),
2566                             fb->offsets[color_plane])) {
2567                 drm_dbg_kms(&dev_priv->drm,
2568                             "Bad offset 0x%08x or pitch %d for color plane %d\n",
2569                             fb->offsets[color_plane], fb->pitches[color_plane],
2570                             color_plane);
2571                 return -ERANGE;
2572         }
2573
2574         *x = 0;
2575         *y = 0;
2576
2577         intel_adjust_aligned_offset(x, y,
2578                                     fb, color_plane, DRM_MODE_ROTATE_0,
2579                                     fb->pitches[color_plane],
2580                                     fb->offsets[color_plane], 0);
2581
2582         return 0;
2583 }
2584
2585 static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
2586 {
2587         switch (fb_modifier) {
2588         case I915_FORMAT_MOD_X_TILED:
2589                 return I915_TILING_X;
2590         case I915_FORMAT_MOD_Y_TILED:
2591         case I915_FORMAT_MOD_Y_TILED_CCS:
2592         case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
2593         case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
2594                 return I915_TILING_Y;
2595         default:
2596                 return I915_TILING_NONE;
2597         }
2598 }
2599
2600 /*
2601  * From the Sky Lake PRM:
2602  * "The Color Control Surface (CCS) contains the compression status of
2603  *  the cache-line pairs. The compression state of the cache-line pair
2604  *  is specified by 2 bits in the CCS. Each CCS cache-line represents
2605  *  an area on the main surface of 16 x16 sets of 128 byte Y-tiled
2606  *  cache-line-pairs. CCS is always Y tiled."
2607  *
2608  * Since cache line pairs refers to horizontally adjacent cache lines,
2609  * each cache line in the CCS corresponds to an area of 32x16 cache
2610  * lines on the main surface. Since each pixel is 4 bytes, this gives
2611  * us a ratio of one byte in the CCS for each 8x16 pixels in the
2612  * main surface.
2613  */
2614 static const struct drm_format_info skl_ccs_formats[] = {
2615         { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
2616           .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2617         { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
2618           .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2619         { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
2620           .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
2621         { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
2622           .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
2623 };
2624
2625 /*
2626  * Gen-12 compression uses 4 bits of CCS data for each cache line pair in the
2627  * main surface. And each 64B CCS cache line represents an area of 4x1 Y-tiles
2628  * in the main surface. With 4 byte pixels and each Y-tile having dimensions of
2629  * 32x32 pixels, the ratio turns out to 1B in the CCS for every 2x32 pixels in
2630  * the main surface.
2631  */
2632 static const struct drm_format_info gen12_ccs_formats[] = {
2633         { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
2634           .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
2635           .hsub = 1, .vsub = 1, },
2636         { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
2637           .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
2638           .hsub = 1, .vsub = 1, },
2639         { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
2640           .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
2641           .hsub = 1, .vsub = 1, .has_alpha = true },
2642         { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
2643           .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
2644           .hsub = 1, .vsub = 1, .has_alpha = true },
2645         { .format = DRM_FORMAT_YUYV, .num_planes = 2,
2646           .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
2647           .hsub = 2, .vsub = 1, .is_yuv = true },
2648         { .format = DRM_FORMAT_YVYU, .num_planes = 2,
2649           .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
2650           .hsub = 2, .vsub = 1, .is_yuv = true },
2651         { .format = DRM_FORMAT_UYVY, .num_planes = 2,
2652           .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
2653           .hsub = 2, .vsub = 1, .is_yuv = true },
2654         { .format = DRM_FORMAT_VYUY, .num_planes = 2,
2655           .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
2656           .hsub = 2, .vsub = 1, .is_yuv = true },
2657         { .format = DRM_FORMAT_NV12, .num_planes = 4,
2658           .char_per_block = { 1, 2, 1, 1 }, .block_w = { 1, 1, 4, 4 }, .block_h = { 1, 1, 1, 1 },
2659           .hsub = 2, .vsub = 2, .is_yuv = true },
2660         { .format = DRM_FORMAT_P010, .num_planes = 4,
2661           .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
2662           .hsub = 2, .vsub = 2, .is_yuv = true },
2663         { .format = DRM_FORMAT_P012, .num_planes = 4,
2664           .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
2665           .hsub = 2, .vsub = 2, .is_yuv = true },
2666         { .format = DRM_FORMAT_P016, .num_planes = 4,
2667           .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
2668           .hsub = 2, .vsub = 2, .is_yuv = true },
2669 };
2670
2671 static const struct drm_format_info *
2672 lookup_format_info(const struct drm_format_info formats[],
2673                    int num_formats, u32 format)
2674 {
2675         int i;
2676
2677         for (i = 0; i < num_formats; i++) {
2678                 if (formats[i].format == format)
2679                         return &formats[i];
2680         }
2681
2682         return NULL;
2683 }
2684
2685 static const struct drm_format_info *
2686 intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
2687 {
2688         switch (cmd->modifier[0]) {
2689         case I915_FORMAT_MOD_Y_TILED_CCS:
2690         case I915_FORMAT_MOD_Yf_TILED_CCS:
2691                 return lookup_format_info(skl_ccs_formats,
2692                                           ARRAY_SIZE(skl_ccs_formats),
2693                                           cmd->pixel_format);
2694         case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
2695         case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
2696                 return lookup_format_info(gen12_ccs_formats,
2697                                           ARRAY_SIZE(gen12_ccs_formats),
2698                                           cmd->pixel_format);
2699         default:
2700                 return NULL;
2701         }
2702 }
2703
2704 bool is_ccs_modifier(u64 modifier)
2705 {
2706         return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
2707                modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS ||
2708                modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
2709                modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
2710 }
2711
2712 static int gen12_ccs_aux_stride(struct drm_framebuffer *fb, int ccs_plane)
2713 {
2714         return DIV_ROUND_UP(fb->pitches[ccs_to_main_plane(fb, ccs_plane)],
2715                             512) * 64;
2716 }
2717
2718 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
2719                               u32 pixel_format, u64 modifier)
2720 {
2721         struct intel_crtc *crtc;
2722         struct intel_plane *plane;
2723
2724         /*
2725          * We assume the primary plane for pipe A has
2726          * the highest stride limits of them all,
2727          * if in case pipe A is disabled, use the first pipe from pipe_mask.
2728          */
2729         crtc = intel_get_first_crtc(dev_priv);
2730         if (!crtc)
2731                 return 0;
2732
2733         plane = to_intel_plane(crtc->base.primary);
2734
2735         return plane->max_stride(plane, pixel_format, modifier,
2736                                  DRM_MODE_ROTATE_0);
2737 }
2738
2739 static
2740 u32 intel_fb_max_stride(struct drm_i915_private *dev_priv,
2741                         u32 pixel_format, u64 modifier)
2742 {
2743         /*
2744          * Arbitrary limit for gen4+ chosen to match the
2745          * render engine max stride.
2746          *
2747          * The new CCS hash mode makes remapping impossible
2748          */
2749         if (!is_ccs_modifier(modifier)) {
2750                 if (INTEL_GEN(dev_priv) >= 7)
2751                         return 256*1024;
2752                 else if (INTEL_GEN(dev_priv) >= 4)
2753                         return 128*1024;
2754         }
2755
2756         return intel_plane_fb_max_stride(dev_priv, pixel_format, modifier);
2757 }
2758
2759 static u32
2760 intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
2761 {
2762         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2763         u32 tile_width;
2764
2765         if (is_surface_linear(fb, color_plane)) {
2766                 u32 max_stride = intel_plane_fb_max_stride(dev_priv,
2767                                                            fb->format->format,
2768                                                            fb->modifier);
2769
2770                 /*
2771                  * To make remapping with linear generally feasible
2772                  * we need the stride to be page aligned.
2773                  */
2774                 if (fb->pitches[color_plane] > max_stride &&
2775                     !is_ccs_modifier(fb->modifier))
2776                         return intel_tile_size(dev_priv);
2777                 else
2778                         return 64;
2779         }
2780
2781         tile_width = intel_tile_width_bytes(fb, color_plane);
2782         if (is_ccs_modifier(fb->modifier)) {
2783                 /*
2784                  * Display WA #0531: skl,bxt,kbl,glk
2785                  *
2786                  * Render decompression and plane width > 3840
2787                  * combined with horizontal panning requires the
2788                  * plane stride to be a multiple of 4. We'll just
2789                  * require the entire fb to accommodate that to avoid
2790                  * potential runtime errors at plane configuration time.
2791                  */
2792                 if (IS_GEN(dev_priv, 9) && color_plane == 0 && fb->width > 3840)
2793                         tile_width *= 4;
2794                 /*
2795                  * The main surface pitch must be padded to a multiple of four
2796                  * tile widths.
2797                  */
2798                 else if (INTEL_GEN(dev_priv) >= 12)
2799                         tile_width *= 4;
2800         }
2801         return tile_width;
2802 }
2803
2804 bool intel_plane_can_remap(const struct intel_plane_state *plane_state)
2805 {
2806         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
2807         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
2808         const struct drm_framebuffer *fb = plane_state->hw.fb;
2809         int i;
2810
2811         /* We don't want to deal with remapping with cursors */
2812         if (plane->id == PLANE_CURSOR)
2813                 return false;
2814
2815         /*
2816          * The display engine limits already match/exceed the
2817          * render engine limits, so not much point in remapping.
2818          * Would also need to deal with the fence POT alignment
2819          * and gen2 2KiB GTT tile size.
2820          */
2821         if (INTEL_GEN(dev_priv) < 4)
2822                 return false;
2823
2824         /*
2825          * The new CCS hash mode isn't compatible with remapping as
2826          * the virtual address of the pages affects the compressed data.
2827          */
2828         if (is_ccs_modifier(fb->modifier))
2829                 return false;
2830
2831         /* Linear needs a page aligned stride for remapping */
2832         if (fb->modifier == DRM_FORMAT_MOD_LINEAR) {
2833                 unsigned int alignment = intel_tile_size(dev_priv) - 1;
2834
2835                 for (i = 0; i < fb->format->num_planes; i++) {
2836                         if (fb->pitches[i] & alignment)
2837                                 return false;
2838                 }
2839         }
2840
2841         return true;
2842 }
2843
2844 static bool intel_plane_needs_remap(const struct intel_plane_state *plane_state)
2845 {
2846         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
2847         const struct drm_framebuffer *fb = plane_state->hw.fb;
2848         unsigned int rotation = plane_state->hw.rotation;
2849         u32 stride, max_stride;
2850
2851         /*
2852          * No remapping for invisible planes since we don't have
2853          * an actual source viewport to remap.
2854          */
2855         if (!plane_state->uapi.visible)
2856                 return false;
2857
2858         if (!intel_plane_can_remap(plane_state))
2859                 return false;
2860
2861         /*
2862          * FIXME: aux plane limits on gen9+ are
2863          * unclear in Bspec, for now no checking.
2864          */
2865         stride = intel_fb_pitch(fb, 0, rotation);
2866         max_stride = plane->max_stride(plane, fb->format->format,
2867                                        fb->modifier, rotation);
2868
2869         return stride > max_stride;
2870 }
2871
2872 static void
2873 intel_fb_plane_get_subsampling(int *hsub, int *vsub,
2874                                const struct drm_framebuffer *fb,
2875                                int color_plane)
2876 {
2877         int main_plane;
2878
2879         if (color_plane == 0) {
2880                 *hsub = 1;
2881                 *vsub = 1;
2882
2883                 return;
2884         }
2885
2886         /*
2887          * TODO: Deduct the subsampling from the char block for all CCS
2888          * formats and planes.
2889          */
2890         if (!is_gen12_ccs_plane(fb, color_plane)) {
2891                 *hsub = fb->format->hsub;
2892                 *vsub = fb->format->vsub;
2893
2894                 return;
2895         }
2896
2897         main_plane = ccs_to_main_plane(fb, color_plane);
2898         *hsub = drm_format_info_block_width(fb->format, color_plane) /
2899                 drm_format_info_block_width(fb->format, main_plane);
2900
2901         /*
2902          * The min stride check in the core framebuffer_check() function
2903          * assumes that format->hsub applies to every plane except for the
2904          * first plane. That's incorrect for the CCS AUX plane of the first
2905          * plane, but for the above check to pass we must define the block
2906          * width with that subsampling applied to it. Adjust the width here
2907          * accordingly, so we can calculate the actual subsampling factor.
2908          */
2909         if (main_plane == 0)
2910                 *hsub *= fb->format->hsub;
2911
2912         *vsub = 32;
2913 }
2914 static int
2915 intel_fb_check_ccs_xy(struct drm_framebuffer *fb, int ccs_plane, int x, int y)
2916 {
2917         struct drm_i915_private *i915 = to_i915(fb->dev);
2918         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2919         int main_plane;
2920         int hsub, vsub;
2921         int tile_width, tile_height;
2922         int ccs_x, ccs_y;
2923         int main_x, main_y;
2924
2925         if (!is_ccs_plane(fb, ccs_plane))
2926                 return 0;
2927
2928         intel_tile_dims(fb, ccs_plane, &tile_width, &tile_height);
2929         intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane);
2930
2931         tile_width *= hsub;
2932         tile_height *= vsub;
2933
2934         ccs_x = (x * hsub) % tile_width;
2935         ccs_y = (y * vsub) % tile_height;
2936
2937         main_plane = ccs_to_main_plane(fb, ccs_plane);
2938         main_x = intel_fb->normal[main_plane].x % tile_width;
2939         main_y = intel_fb->normal[main_plane].y % tile_height;
2940
2941         /*
2942          * CCS doesn't have its own x/y offset register, so the intra CCS tile
2943          * x/y offsets must match between CCS and the main surface.
2944          */
2945         if (main_x != ccs_x || main_y != ccs_y) {
2946                 drm_dbg_kms(&i915->drm,
2947                               "Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
2948                               main_x, main_y,
2949                               ccs_x, ccs_y,
2950                               intel_fb->normal[main_plane].x,
2951                               intel_fb->normal[main_plane].y,
2952                               x, y);
2953                 return -EINVAL;
2954         }
2955
2956         return 0;
2957 }
2958
2959 static void
2960 intel_fb_plane_dims(int *w, int *h, struct drm_framebuffer *fb, int color_plane)
2961 {
2962         int main_plane = is_ccs_plane(fb, color_plane) ?
2963                          ccs_to_main_plane(fb, color_plane) : 0;
2964         int main_hsub, main_vsub;
2965         int hsub, vsub;
2966
2967         intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, fb, main_plane);
2968         intel_fb_plane_get_subsampling(&hsub, &vsub, fb, color_plane);
2969         *w = fb->width / main_hsub / hsub;
2970         *h = fb->height / main_vsub / vsub;
2971 }
2972
2973 /*
2974  * Setup the rotated view for an FB plane and return the size the GTT mapping
2975  * requires for this view.
2976  */
2977 static u32
2978 setup_fb_rotation(int plane, const struct intel_remapped_plane_info *plane_info,
2979                   u32 gtt_offset_rotated, int x, int y,
2980                   unsigned int width, unsigned int height,
2981                   unsigned int tile_size,
2982                   unsigned int tile_width, unsigned int tile_height,
2983                   struct drm_framebuffer *fb)
2984 {
2985         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2986         struct intel_rotation_info *rot_info = &intel_fb->rot_info;
2987         unsigned int pitch_tiles;
2988         struct drm_rect r;
2989
2990         /* Y or Yf modifiers required for 90/270 rotation */
2991         if (fb->modifier != I915_FORMAT_MOD_Y_TILED &&
2992             fb->modifier != I915_FORMAT_MOD_Yf_TILED)
2993                 return 0;
2994
2995         if (drm_WARN_ON(fb->dev, plane >= ARRAY_SIZE(rot_info->plane)))
2996                 return 0;
2997
2998         rot_info->plane[plane] = *plane_info;
2999
3000         intel_fb->rotated[plane].pitch = plane_info->height * tile_height;
3001
3002         /* rotate the x/y offsets to match the GTT view */
3003         drm_rect_init(&r, x, y, width, height);
3004         drm_rect_rotate(&r,
3005                         plane_info->width * tile_width,
3006                         plane_info->height * tile_height,
3007                         DRM_MODE_ROTATE_270);
3008         x = r.x1;
3009         y = r.y1;
3010
3011         /* rotate the tile dimensions to match the GTT view */
3012         pitch_tiles = intel_fb->rotated[plane].pitch / tile_height;
3013         swap(tile_width, tile_height);
3014
3015         /*
3016          * We only keep the x/y offsets, so push all of the
3017          * gtt offset into the x/y offsets.
3018          */
3019         intel_adjust_tile_offset(&x, &y,
3020                                  tile_width, tile_height,
3021                                  tile_size, pitch_tiles,
3022                                  gtt_offset_rotated * tile_size, 0);
3023
3024         /*
3025          * First pixel of the framebuffer from
3026          * the start of the rotated gtt mapping.
3027          */
3028         intel_fb->rotated[plane].x = x;
3029         intel_fb->rotated[plane].y = y;
3030
3031         return plane_info->width * plane_info->height;
3032 }
3033
3034 static int
3035 intel_fill_fb_info(struct drm_i915_private *dev_priv,
3036                    struct drm_framebuffer *fb)
3037 {
3038         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
3039         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
3040         u32 gtt_offset_rotated = 0;
3041         unsigned int max_size = 0;
3042         int i, num_planes = fb->format->num_planes;
3043         unsigned int tile_size = intel_tile_size(dev_priv);
3044
3045         for (i = 0; i < num_planes; i++) {
3046                 unsigned int width, height;
3047                 unsigned int cpp, size;
3048                 u32 offset;
3049                 int x, y;
3050                 int ret;
3051
3052                 cpp = fb->format->cpp[i];
3053                 intel_fb_plane_dims(&width, &height, fb, i);
3054
3055                 ret = intel_fb_offset_to_xy(&x, &y, fb, i);
3056                 if (ret) {
3057                         drm_dbg_kms(&dev_priv->drm,
3058                                     "bad fb plane %d offset: 0x%x\n",
3059                                     i, fb->offsets[i]);
3060                         return ret;
3061                 }
3062
3063                 ret = intel_fb_check_ccs_xy(fb, i, x, y);
3064                 if (ret)
3065                         return ret;
3066
3067                 /*
3068                  * The fence (if used) is aligned to the start of the object
3069                  * so having the framebuffer wrap around across the edge of the
3070                  * fenced region doesn't really work. We have no API to configure
3071                  * the fence start offset within the object (nor could we probably
3072                  * on gen2/3). So it's just easier if we just require that the
3073                  * fb layout agrees with the fence layout. We already check that the
3074                  * fb stride matches the fence stride elsewhere.
3075                  */
3076                 if (i == 0 && i915_gem_object_is_tiled(obj) &&
3077                     (x + width) * cpp > fb->pitches[i]) {
3078                         drm_dbg_kms(&dev_priv->drm,
3079                                     "bad fb plane %d offset: 0x%x\n",
3080                                      i, fb->offsets[i]);
3081                         return -EINVAL;
3082                 }
3083
3084                 /*
3085                  * First pixel of the framebuffer from
3086                  * the start of the normal gtt mapping.
3087                  */
3088                 intel_fb->normal[i].x = x;
3089                 intel_fb->normal[i].y = y;
3090
3091                 offset = intel_compute_aligned_offset(dev_priv, &x, &y, fb, i,
3092                                                       fb->pitches[i],
3093                                                       DRM_MODE_ROTATE_0,
3094                                                       tile_size);
3095                 offset /= tile_size;
3096
3097                 if (!is_surface_linear(fb, i)) {
3098                         struct intel_remapped_plane_info plane_info;
3099                         unsigned int tile_width, tile_height;
3100
3101                         intel_tile_dims(fb, i, &tile_width, &tile_height);
3102
3103                         plane_info.offset = offset;
3104                         plane_info.stride = DIV_ROUND_UP(fb->pitches[i],
3105                                                          tile_width * cpp);
3106                         plane_info.width = DIV_ROUND_UP(x + width, tile_width);
3107                         plane_info.height = DIV_ROUND_UP(y + height,
3108                                                          tile_height);
3109
3110                         /* how many tiles does this plane need */
3111                         size = plane_info.stride * plane_info.height;
3112                         /*
3113                          * If the plane isn't horizontally tile aligned,
3114                          * we need one more tile.
3115                          */
3116                         if (x != 0)
3117                                 size++;
3118
3119                         gtt_offset_rotated +=
3120                                 setup_fb_rotation(i, &plane_info,
3121                                                   gtt_offset_rotated,
3122                                                   x, y, width, height,
3123                                                   tile_size,
3124                                                   tile_width, tile_height,
3125                                                   fb);
3126                 } else {
3127                         size = DIV_ROUND_UP((y + height) * fb->pitches[i] +
3128                                             x * cpp, tile_size);
3129                 }
3130
3131                 /* how many tiles in total needed in the bo */
3132                 max_size = max(max_size, offset + size);
3133         }
3134
3135         if (mul_u32_u32(max_size, tile_size) > obj->base.size) {
3136                 drm_dbg_kms(&dev_priv->drm,
3137                             "fb too big for bo (need %llu bytes, have %zu bytes)\n",
3138                             mul_u32_u32(max_size, tile_size), obj->base.size);
3139                 return -EINVAL;
3140         }
3141
3142         return 0;
3143 }
3144
3145 static void
3146 intel_plane_remap_gtt(struct intel_plane_state *plane_state)
3147 {
3148         struct drm_i915_private *dev_priv =
3149                 to_i915(plane_state->uapi.plane->dev);
3150         struct drm_framebuffer *fb = plane_state->hw.fb;
3151         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
3152         struct intel_rotation_info *info = &plane_state->view.rotated;
3153         unsigned int rotation = plane_state->hw.rotation;
3154         int i, num_planes = fb->format->num_planes;
3155         unsigned int tile_size = intel_tile_size(dev_priv);
3156         unsigned int src_x, src_y;
3157         unsigned int src_w, src_h;
3158         u32 gtt_offset = 0;
3159
3160         memset(&plane_state->view, 0, sizeof(plane_state->view));
3161         plane_state->view.type = drm_rotation_90_or_270(rotation) ?
3162                 I915_GGTT_VIEW_ROTATED : I915_GGTT_VIEW_REMAPPED;
3163
3164         src_x = plane_state->uapi.src.x1 >> 16;
3165         src_y = plane_state->uapi.src.y1 >> 16;
3166         src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
3167         src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
3168
3169         drm_WARN_ON(&dev_priv->drm, is_ccs_modifier(fb->modifier));
3170
3171         /* Make src coordinates relative to the viewport */
3172         drm_rect_translate(&plane_state->uapi.src,
3173                            -(src_x << 16), -(src_y << 16));
3174
3175         /* Rotate src coordinates to match rotated GTT view */
3176         if (drm_rotation_90_or_270(rotation))
3177                 drm_rect_rotate(&plane_state->uapi.src,
3178                                 src_w << 16, src_h << 16,
3179                                 DRM_MODE_ROTATE_270);
3180
3181         for (i = 0; i < num_planes; i++) {
3182                 unsigned int hsub = i ? fb->format->hsub : 1;
3183                 unsigned int vsub = i ? fb->format->vsub : 1;
3184                 unsigned int cpp = fb->format->cpp[i];
3185                 unsigned int tile_width, tile_height;
3186                 unsigned int width, height;
3187                 unsigned int pitch_tiles;
3188                 unsigned int x, y;
3189                 u32 offset;
3190
3191                 intel_tile_dims(fb, i, &tile_width, &tile_height);
3192
3193                 x = src_x / hsub;
3194                 y = src_y / vsub;
3195                 width = src_w / hsub;
3196                 height = src_h / vsub;
3197
3198                 /*
3199                  * First pixel of the src viewport from the
3200                  * start of the normal gtt mapping.
3201                  */
3202                 x += intel_fb->normal[i].x;
3203                 y += intel_fb->normal[i].y;
3204
3205                 offset = intel_compute_aligned_offset(dev_priv, &x, &y,
3206                                                       fb, i, fb->pitches[i],
3207                                                       DRM_MODE_ROTATE_0, tile_size);
3208                 offset /= tile_size;
3209
3210                 drm_WARN_ON(&dev_priv->drm, i >= ARRAY_SIZE(info->plane));
3211                 info->plane[i].offset = offset;
3212                 info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i],
3213                                                      tile_width * cpp);
3214                 info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
3215                 info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
3216
3217                 if (drm_rotation_90_or_270(rotation)) {
3218                         struct drm_rect r;
3219
3220                         /* rotate the x/y offsets to match the GTT view */
3221                         drm_rect_init(&r, x, y, width, height);
3222                         drm_rect_rotate(&r,
3223                                         info->plane[i].width * tile_width,
3224                                         info->plane[i].height * tile_height,
3225                                         DRM_MODE_ROTATE_270);
3226                         x = r.x1;
3227                         y = r.y1;
3228
3229                         pitch_tiles = info->plane[i].height;
3230                         plane_state->color_plane[i].stride = pitch_tiles * tile_height;
3231
3232                         /* rotate the tile dimensions to match the GTT view */
3233                         swap(tile_width, tile_height);
3234                 } else {
3235                         pitch_tiles = info->plane[i].width;
3236                         plane_state->color_plane[i].stride = pitch_tiles * tile_width * cpp;
3237                 }
3238
3239                 /*
3240                  * We only keep the x/y offsets, so push all of the
3241                  * gtt offset into the x/y offsets.
3242                  */
3243                 intel_adjust_tile_offset(&x, &y,
3244                                          tile_width, tile_height,
3245                                          tile_size, pitch_tiles,
3246                                          gtt_offset * tile_size, 0);
3247
3248                 gtt_offset += info->plane[i].width * info->plane[i].height;
3249
3250                 plane_state->color_plane[i].offset = 0;
3251                 plane_state->color_plane[i].x = x;
3252                 plane_state->color_plane[i].y = y;
3253         }
3254 }
3255
3256 static int
3257 intel_plane_compute_gtt(struct intel_plane_state *plane_state)
3258 {
3259         const struct intel_framebuffer *fb =
3260                 to_intel_framebuffer(plane_state->hw.fb);
3261         unsigned int rotation = plane_state->hw.rotation;
3262         int i, num_planes;
3263
3264         if (!fb)
3265                 return 0;
3266
3267         num_planes = fb->base.format->num_planes;
3268
3269         if (intel_plane_needs_remap(plane_state)) {
3270                 intel_plane_remap_gtt(plane_state);
3271
3272                 /*
3273                  * Sometimes even remapping can't overcome
3274                  * the stride limitations :( Can happen with
3275                  * big plane sizes and suitably misaligned
3276                  * offsets.
3277                  */
3278                 return intel_plane_check_stride(plane_state);
3279         }
3280
3281         intel_fill_fb_ggtt_view(&plane_state->view, &fb->base, rotation);
3282
3283         for (i = 0; i < num_planes; i++) {
3284                 plane_state->color_plane[i].stride = intel_fb_pitch(&fb->base, i, rotation);
3285                 plane_state->color_plane[i].offset = 0;
3286
3287                 if (drm_rotation_90_or_270(rotation)) {
3288                         plane_state->color_plane[i].x = fb->rotated[i].x;
3289                         plane_state->color_plane[i].y = fb->rotated[i].y;
3290                 } else {
3291                         plane_state->color_plane[i].x = fb->normal[i].x;
3292                         plane_state->color_plane[i].y = fb->normal[i].y;
3293                 }
3294         }
3295
3296         /* Rotate src coordinates to match rotated GTT view */
3297         if (drm_rotation_90_or_270(rotation))
3298                 drm_rect_rotate(&plane_state->uapi.src,
3299                                 fb->base.width << 16, fb->base.height << 16,
3300                                 DRM_MODE_ROTATE_270);
3301
3302         return intel_plane_check_stride(plane_state);
3303 }
3304
3305 static int i9xx_format_to_fourcc(int format)
3306 {
3307         switch (format) {
3308         case DISPPLANE_8BPP:
3309                 return DRM_FORMAT_C8;
3310         case DISPPLANE_BGRA555:
3311                 return DRM_FORMAT_ARGB1555;
3312         case DISPPLANE_BGRX555:
3313                 return DRM_FORMAT_XRGB1555;
3314         case DISPPLANE_BGRX565:
3315                 return DRM_FORMAT_RGB565;
3316         default:
3317         case DISPPLANE_BGRX888:
3318                 return DRM_FORMAT_XRGB8888;
3319         case DISPPLANE_RGBX888:
3320                 return DRM_FORMAT_XBGR8888;
3321         case DISPPLANE_BGRA888:
3322                 return DRM_FORMAT_ARGB8888;
3323         case DISPPLANE_RGBA888:
3324                 return DRM_FORMAT_ABGR8888;
3325         case DISPPLANE_BGRX101010:
3326                 return DRM_FORMAT_XRGB2101010;
3327         case DISPPLANE_RGBX101010:
3328                 return DRM_FORMAT_XBGR2101010;
3329         case DISPPLANE_BGRA101010:
3330                 return DRM_FORMAT_ARGB2101010;
3331         case DISPPLANE_RGBA101010:
3332                 return DRM_FORMAT_ABGR2101010;
3333         case DISPPLANE_RGBX161616:
3334                 return DRM_FORMAT_XBGR16161616F;
3335         }
3336 }
3337
3338 int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
3339 {
3340         switch (format) {
3341         case PLANE_CTL_FORMAT_RGB_565:
3342                 return DRM_FORMAT_RGB565;
3343         case PLANE_CTL_FORMAT_NV12:
3344                 return DRM_FORMAT_NV12;
3345         case PLANE_CTL_FORMAT_XYUV:
3346                 return DRM_FORMAT_XYUV8888;
3347         case PLANE_CTL_FORMAT_P010:
3348                 return DRM_FORMAT_P010;
3349         case PLANE_CTL_FORMAT_P012:
3350                 return DRM_FORMAT_P012;
3351         case PLANE_CTL_FORMAT_P016:
3352                 return DRM_FORMAT_P016;
3353         case PLANE_CTL_FORMAT_Y210:
3354                 return DRM_FORMAT_Y210;
3355         case PLANE_CTL_FORMAT_Y212:
3356                 return DRM_FORMAT_Y212;
3357         case PLANE_CTL_FORMAT_Y216:
3358                 return DRM_FORMAT_Y216;
3359         case PLANE_CTL_FORMAT_Y410:
3360                 return DRM_FORMAT_XVYU2101010;
3361         case PLANE_CTL_FORMAT_Y412:
3362                 return DRM_FORMAT_XVYU12_16161616;
3363         case PLANE_CTL_FORMAT_Y416:
3364                 return DRM_FORMAT_XVYU16161616;
3365         default:
3366         case PLANE_CTL_FORMAT_XRGB_8888:
3367                 if (rgb_order) {
3368                         if (alpha)
3369                                 return DRM_FORMAT_ABGR8888;
3370                         else
3371                                 return DRM_FORMAT_XBGR8888;
3372                 } else {
3373                         if (alpha)
3374                                 return DRM_FORMAT_ARGB8888;
3375                         else
3376                                 return DRM_FORMAT_XRGB8888;
3377                 }
3378         case PLANE_CTL_FORMAT_XRGB_2101010:
3379                 if (rgb_order) {
3380                         if (alpha)
3381                                 return DRM_FORMAT_ABGR2101010;
3382                         else
3383                                 return DRM_FORMAT_XBGR2101010;
3384                 } else {
3385                         if (alpha)
3386                                 return DRM_FORMAT_ARGB2101010;
3387                         else
3388                                 return DRM_FORMAT_XRGB2101010;
3389                 }
3390         case PLANE_CTL_FORMAT_XRGB_16161616F:
3391                 if (rgb_order) {
3392                         if (alpha)
3393                                 return DRM_FORMAT_ABGR16161616F;
3394                         else
3395                                 return DRM_FORMAT_XBGR16161616F;
3396                 } else {
3397                         if (alpha)
3398                                 return DRM_FORMAT_ARGB16161616F;
3399                         else
3400                                 return DRM_FORMAT_XRGB16161616F;
3401                 }
3402         }
3403 }
3404
3405 static struct i915_vma *
3406 initial_plane_vma(struct drm_i915_private *i915,
3407                   struct intel_initial_plane_config *plane_config)
3408 {
3409         struct drm_i915_gem_object *obj;
3410         struct i915_vma *vma;
3411         u32 base, size;
3412
3413         if (plane_config->size == 0)
3414                 return NULL;
3415
3416         base = round_down(plane_config->base,
3417                           I915_GTT_MIN_ALIGNMENT);
3418         size = round_up(plane_config->base + plane_config->size,
3419                         I915_GTT_MIN_ALIGNMENT);
3420         size -= base;
3421
3422         /*
3423          * If the FB is too big, just don't use it since fbdev is not very
3424          * important and we should probably use that space with FBC or other
3425          * features.
3426          */
3427         if (size * 2 > i915->stolen_usable_size)
3428                 return NULL;
3429
3430         obj = i915_gem_object_create_stolen_for_preallocated(i915, base, size);
3431         if (IS_ERR(obj))
3432                 return NULL;
3433
3434         switch (plane_config->tiling) {
3435         case I915_TILING_NONE:
3436                 break;
3437         case I915_TILING_X:
3438         case I915_TILING_Y:
3439                 obj->tiling_and_stride =
3440                         plane_config->fb->base.pitches[0] |
3441                         plane_config->tiling;
3442                 break;
3443         default:
3444                 MISSING_CASE(plane_config->tiling);
3445                 goto err_obj;
3446         }
3447
3448         vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
3449         if (IS_ERR(vma))
3450                 goto err_obj;
3451
3452         if (i915_ggtt_pin(vma, 0, PIN_MAPPABLE | PIN_OFFSET_FIXED | base))
3453                 goto err_obj;
3454
3455         if (i915_gem_object_is_tiled(obj) &&
3456             !i915_vma_is_map_and_fenceable(vma))
3457                 goto err_obj;
3458
3459         return vma;
3460
3461 err_obj:
3462         i915_gem_object_put(obj);
3463         return NULL;
3464 }
3465
3466 static bool
3467 intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
3468                               struct intel_initial_plane_config *plane_config)
3469 {
3470         struct drm_device *dev = crtc->base.dev;
3471         struct drm_i915_private *dev_priv = to_i915(dev);
3472         struct drm_mode_fb_cmd2 mode_cmd = { 0 };
3473         struct drm_framebuffer *fb = &plane_config->fb->base;
3474         struct i915_vma *vma;
3475
3476         switch (fb->modifier) {
3477         case DRM_FORMAT_MOD_LINEAR:
3478         case I915_FORMAT_MOD_X_TILED:
3479         case I915_FORMAT_MOD_Y_TILED:
3480                 break;
3481         default:
3482                 drm_dbg(&dev_priv->drm,
3483                         "Unsupported modifier for initial FB: 0x%llx\n",
3484                         fb->modifier);
3485                 return false;
3486         }
3487
3488         vma = initial_plane_vma(dev_priv, plane_config);
3489         if (!vma)
3490                 return false;
3491
3492         mode_cmd.pixel_format = fb->format->format;
3493         mode_cmd.width = fb->width;
3494         mode_cmd.height = fb->height;
3495         mode_cmd.pitches[0] = fb->pitches[0];
3496         mode_cmd.modifier[0] = fb->modifier;
3497         mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
3498
3499         if (intel_framebuffer_init(to_intel_framebuffer(fb),
3500                                    vma->obj, &mode_cmd)) {
3501                 drm_dbg_kms(&dev_priv->drm, "intel fb init failed\n");
3502                 goto err_vma;
3503         }
3504
3505         plane_config->vma = vma;
3506         return true;
3507
3508 err_vma:
3509         i915_vma_put(vma);
3510         return false;
3511 }
3512
3513 static void
3514 intel_set_plane_visible(struct intel_crtc_state *crtc_state,
3515                         struct intel_plane_state *plane_state,
3516                         bool visible)
3517 {
3518         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
3519
3520         plane_state->uapi.visible = visible;
3521
3522         if (visible)
3523                 crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base);
3524         else
3525                 crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base);
3526 }
3527
3528 static void fixup_active_planes(struct intel_crtc_state *crtc_state)
3529 {
3530         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
3531         struct drm_plane *plane;
3532
3533         /*
3534          * Active_planes aliases if multiple "primary" or cursor planes
3535          * have been used on the same (or wrong) pipe. plane_mask uses
3536          * unique ids, hence we can use that to reconstruct active_planes.
3537          */
3538         crtc_state->active_planes = 0;
3539
3540         drm_for_each_plane_mask(plane, &dev_priv->drm,
3541                                 crtc_state->uapi.plane_mask)
3542                 crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
3543 }
3544
3545 static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
3546                                          struct intel_plane *plane)
3547 {
3548         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3549         struct intel_crtc_state *crtc_state =
3550                 to_intel_crtc_state(crtc->base.state);
3551         struct intel_plane_state *plane_state =
3552                 to_intel_plane_state(plane->base.state);
3553
3554         drm_dbg_kms(&dev_priv->drm,
3555                     "Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
3556                     plane->base.base.id, plane->base.name,
3557                     crtc->base.base.id, crtc->base.name);
3558
3559         intel_set_plane_visible(crtc_state, plane_state, false);
3560         fixup_active_planes(crtc_state);
3561         crtc_state->data_rate[plane->id] = 0;
3562         crtc_state->min_cdclk[plane->id] = 0;
3563
3564         if (plane->id == PLANE_PRIMARY)
3565                 hsw_disable_ips(crtc_state);
3566
3567         /*
3568          * Vblank time updates from the shadow to live plane control register
3569          * are blocked if the memory self-refresh mode is active at that
3570          * moment. So to make sure the plane gets truly disabled, disable
3571          * first the self-refresh mode. The self-refresh enable bit in turn
3572          * will be checked/applied by the HW only at the next frame start
3573          * event which is after the vblank start event, so we need to have a
3574          * wait-for-vblank between disabling the plane and the pipe.
3575          */
3576         if (HAS_GMCH(dev_priv) &&
3577             intel_set_memory_cxsr(dev_priv, false))
3578                 intel_wait_for_vblank(dev_priv, crtc->pipe);
3579
3580         /*
3581          * Gen2 reports pipe underruns whenever all planes are disabled.
3582          * So disable underrun reporting before all the planes get disabled.
3583          */
3584         if (IS_GEN(dev_priv, 2) && !crtc_state->active_planes)
3585                 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
3586
3587         intel_disable_plane(plane, crtc_state);
3588 }
3589
3590 static struct intel_frontbuffer *
3591 to_intel_frontbuffer(struct drm_framebuffer *fb)
3592 {
3593         return fb ? to_intel_framebuffer(fb)->frontbuffer : NULL;
3594 }
3595
3596 static void
3597 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
3598                              struct intel_initial_plane_config *plane_config)
3599 {
3600         struct drm_device *dev = intel_crtc->base.dev;
3601         struct drm_i915_private *dev_priv = to_i915(dev);
3602         struct drm_crtc *c;
3603         struct drm_plane *primary = intel_crtc->base.primary;
3604         struct drm_plane_state *plane_state = primary->state;
3605         struct intel_plane *intel_plane = to_intel_plane(primary);
3606         struct intel_plane_state *intel_state =
3607                 to_intel_plane_state(plane_state);
3608         struct drm_framebuffer *fb;
3609         struct i915_vma *vma;
3610
3611         if (!plane_config->fb)
3612                 return;
3613
3614         if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
3615                 fb = &plane_config->fb->base;
3616                 vma = plane_config->vma;
3617                 goto valid_fb;
3618         }
3619
3620         /*
3621          * Failed to alloc the obj, check to see if we should share
3622          * an fb with another CRTC instead
3623          */
3624         for_each_crtc(dev, c) {
3625                 struct intel_plane_state *state;
3626
3627                 if (c == &intel_crtc->base)
3628                         continue;
3629
3630                 if (!to_intel_crtc(c)->active)
3631                         continue;
3632
3633                 state = to_intel_plane_state(c->primary->state);
3634                 if (!state->vma)
3635                         continue;
3636
3637                 if (intel_plane_ggtt_offset(state) == plane_config->base) {
3638                         fb = state->hw.fb;
3639                         vma = state->vma;
3640                         goto valid_fb;
3641                 }
3642         }
3643
3644         /*
3645          * We've failed to reconstruct the BIOS FB.  Current display state
3646          * indicates that the primary plane is visible, but has a NULL FB,
3647          * which will lead to problems later if we don't fix it up.  The
3648          * simplest solution is to just disable the primary plane now and
3649          * pretend the BIOS never had it enabled.
3650          */
3651         intel_plane_disable_noatomic(intel_crtc, intel_plane);
3652
3653         return;
3654
3655 valid_fb:
3656         intel_state->hw.rotation = plane_config->rotation;
3657         intel_fill_fb_ggtt_view(&intel_state->view, fb,
3658                                 intel_state->hw.rotation);
3659         intel_state->color_plane[0].stride =
3660                 intel_fb_pitch(fb, 0, intel_state->hw.rotation);
3661
3662         __i915_vma_pin(vma);
3663         intel_state->vma = i915_vma_get(vma);
3664         if (intel_plane_uses_fence(intel_state) && i915_vma_pin_fence(vma) == 0)
3665                 if (vma->fence)
3666                         intel_state->flags |= PLANE_HAS_FENCE;
3667
3668         plane_state->src_x = 0;
3669         plane_state->src_y = 0;
3670         plane_state->src_w = fb->width << 16;
3671         plane_state->src_h = fb->height << 16;
3672
3673         plane_state->crtc_x = 0;
3674         plane_state->crtc_y = 0;
3675         plane_state->crtc_w = fb->width;
3676         plane_state->crtc_h = fb->height;
3677
3678         intel_state->uapi.src = drm_plane_state_src(plane_state);
3679         intel_state->uapi.dst = drm_plane_state_dest(plane_state);
3680
3681         if (plane_config->tiling)
3682                 dev_priv->preserve_bios_swizzle = true;
3683
3684         plane_state->fb = fb;
3685         drm_framebuffer_get(fb);
3686
3687         plane_state->crtc = &intel_crtc->base;
3688         intel_plane_copy_uapi_to_hw_state(intel_state, intel_state);
3689
3690         intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
3691
3692         atomic_or(to_intel_plane(primary)->frontbuffer_bit,
3693                   &to_intel_frontbuffer(fb)->bits);
3694 }
3695
3696 static int skl_max_plane_width(const struct drm_framebuffer *fb,
3697                                int color_plane,
3698                                unsigned int rotation)
3699 {
3700         int cpp = fb->format->cpp[color_plane];
3701
3702         switch (fb->modifier) {
3703         case DRM_FORMAT_MOD_LINEAR:
3704         case I915_FORMAT_MOD_X_TILED:
3705                 /*
3706                  * Validated limit is 4k, but has 5k should
3707                  * work apart from the following features:
3708                  * - Ytile (already limited to 4k)
3709                  * - FP16 (already limited to 4k)
3710                  * - render compression (already limited to 4k)
3711                  * - KVMR sprite and cursor (don't care)
3712                  * - horizontal panning (TODO verify this)
3713                  * - pipe and plane scaling (TODO verify this)
3714                  */
3715                 if (cpp == 8)
3716                         return 4096;
3717                 else
3718                         return 5120;
3719         case I915_FORMAT_MOD_Y_TILED_CCS:
3720         case I915_FORMAT_MOD_Yf_TILED_CCS:
3721         case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
3722                 /* FIXME AUX plane? */
3723         case I915_FORMAT_MOD_Y_TILED:
3724         case I915_FORMAT_MOD_Yf_TILED:
3725                 if (cpp == 8)
3726                         return 2048;
3727                 else
3728                         return 4096;
3729         default:
3730                 MISSING_CASE(fb->modifier);
3731                 return 2048;
3732         }
3733 }
3734
3735 static int glk_max_plane_width(const struct drm_framebuffer *fb,
3736                                int color_plane,
3737                                unsigned int rotation)
3738 {
3739         int cpp = fb->format->cpp[color_plane];
3740
3741         switch (fb->modifier) {
3742         case DRM_FORMAT_MOD_LINEAR:
3743         case I915_FORMAT_MOD_X_TILED:
3744                 if (cpp == 8)
3745                         return 4096;
3746                 else
3747                         return 5120;
3748         case I915_FORMAT_MOD_Y_TILED_CCS:
3749         case I915_FORMAT_MOD_Yf_TILED_CCS:
3750                 /* FIXME AUX plane? */
3751         case I915_FORMAT_MOD_Y_TILED:
3752         case I915_FORMAT_MOD_Yf_TILED:
3753                 if (cpp == 8)
3754                         return 2048;
3755                 else
3756                         return 5120;
3757         default:
3758                 MISSING_CASE(fb->modifier);
3759                 return 2048;
3760         }
3761 }
3762
3763 static int icl_max_plane_width(const struct drm_framebuffer *fb,
3764                                int color_plane,
3765                                unsigned int rotation)
3766 {
3767         return 5120;
3768 }
3769
3770 static int skl_max_plane_height(void)
3771 {
3772         return 4096;
3773 }
3774
3775 static int icl_max_plane_height(void)
3776 {
3777         return 4320;
3778 }
3779
3780 static bool
3781 skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state,
3782                                int main_x, int main_y, u32 main_offset,
3783                                int ccs_plane)
3784 {
3785         const struct drm_framebuffer *fb = plane_state->hw.fb;
3786         int aux_x = plane_state->color_plane[ccs_plane].x;
3787         int aux_y = plane_state->color_plane[ccs_plane].y;
3788         u32 aux_offset = plane_state->color_plane[ccs_plane].offset;
3789         u32 alignment = intel_surf_alignment(fb, ccs_plane);
3790         int hsub;
3791         int vsub;
3792
3793         intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane);
3794         while (aux_offset >= main_offset && aux_y <= main_y) {
3795                 int x, y;
3796
3797                 if (aux_x == main_x && aux_y == main_y)
3798                         break;
3799
3800                 if (aux_offset == 0)
3801                         break;
3802
3803                 x = aux_x / hsub;
3804                 y = aux_y / vsub;
3805                 aux_offset = intel_plane_adjust_aligned_offset(&x, &y,
3806                                                                plane_state,
3807                                                                ccs_plane,
3808                                                                aux_offset,
3809                                                                aux_offset -
3810                                                                 alignment);
3811                 aux_x = x * hsub + aux_x % hsub;
3812                 aux_y = y * vsub + aux_y % vsub;
3813         }
3814
3815         if (aux_x != main_x || aux_y != main_y)
3816                 return false;
3817
3818         plane_state->color_plane[ccs_plane].offset = aux_offset;
3819         plane_state->color_plane[ccs_plane].x = aux_x;
3820         plane_state->color_plane[ccs_plane].y = aux_y;
3821
3822         return true;
3823 }
3824
3825 static int skl_check_main_surface(struct intel_plane_state *plane_state)
3826 {
3827         struct drm_i915_private *dev_priv = to_i915(plane_state->uapi.plane->dev);
3828         const struct drm_framebuffer *fb = plane_state->hw.fb;
3829         unsigned int rotation = plane_state->hw.rotation;
3830         int x = plane_state->uapi.src.x1 >> 16;
3831         int y = plane_state->uapi.src.y1 >> 16;
3832         int w = drm_rect_width(&plane_state->uapi.src) >> 16;
3833         int h = drm_rect_height(&plane_state->uapi.src) >> 16;
3834         int max_width;
3835         int max_height;
3836         u32 alignment;
3837         u32 offset;
3838         int aux_plane = intel_main_to_aux_plane(fb, 0);
3839         u32 aux_offset = plane_state->color_plane[aux_plane].offset;
3840
3841         if (INTEL_GEN(dev_priv) >= 11)
3842                 max_width = icl_max_plane_width(fb, 0, rotation);
3843         else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
3844                 max_width = glk_max_plane_width(fb, 0, rotation);
3845         else
3846                 max_width = skl_max_plane_width(fb, 0, rotation);
3847
3848         if (INTEL_GEN(dev_priv) >= 11)
3849                 max_height = icl_max_plane_height();
3850         else
3851                 max_height = skl_max_plane_height();
3852
3853         if (w > max_width || h > max_height) {
3854                 drm_dbg_kms(&dev_priv->drm,
3855                             "requested Y/RGB source size %dx%d too big (limit %dx%d)\n",
3856                             w, h, max_width, max_height);
3857                 return -EINVAL;
3858         }
3859
3860         intel_add_fb_offsets(&x, &y, plane_state, 0);
3861         offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 0);
3862         alignment = intel_surf_alignment(fb, 0);
3863         if (drm_WARN_ON(&dev_priv->drm, alignment && !is_power_of_2(alignment)))
3864                 return -EINVAL;
3865
3866         /*
3867          * AUX surface offset is specified as the distance from the
3868          * main surface offset, and it must be non-negative. Make
3869          * sure that is what we will get.
3870          */
3871         if (offset > aux_offset)
3872                 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3873                                                            offset, aux_offset & ~(alignment - 1));
3874
3875         /*
3876          * When using an X-tiled surface, the plane blows up
3877          * if the x offset + width exceed the stride.
3878          *
3879          * TODO: linear and Y-tiled seem fine, Yf untested,
3880          */
3881         if (fb->modifier == I915_FORMAT_MOD_X_TILED) {
3882                 int cpp = fb->format->cpp[0];
3883
3884                 while ((x + w) * cpp > plane_state->color_plane[0].stride) {
3885                         if (offset == 0) {
3886                                 drm_dbg_kms(&dev_priv->drm,
3887                                             "Unable to find suitable display surface offset due to X-tiling\n");
3888                                 return -EINVAL;
3889                         }
3890
3891                         offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3892                                                                    offset, offset - alignment);
3893                 }
3894         }
3895
3896         /*
3897          * CCS AUX surface doesn't have its own x/y offsets, we must make sure
3898          * they match with the main surface x/y offsets.
3899          */
3900         if (is_ccs_modifier(fb->modifier)) {
3901                 while (!skl_check_main_ccs_coordinates(plane_state, x, y,
3902                                                        offset, aux_plane)) {
3903                         if (offset == 0)
3904                                 break;
3905
3906                         offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3907                                                                    offset, offset - alignment);
3908                 }
3909
3910                 if (x != plane_state->color_plane[aux_plane].x ||
3911                     y != plane_state->color_plane[aux_plane].y) {
3912                         drm_dbg_kms(&dev_priv->drm,
3913                                     "Unable to find suitable display surface offset due to CCS\n");
3914                         return -EINVAL;
3915                 }
3916         }
3917
3918         plane_state->color_plane[0].offset = offset;
3919         plane_state->color_plane[0].x = x;
3920         plane_state->color_plane[0].y = y;
3921
3922         /*
3923          * Put the final coordinates back so that the src
3924          * coordinate checks will see the right values.
3925          */
3926         drm_rect_translate_to(&plane_state->uapi.src,
3927                               x << 16, y << 16);
3928
3929         return 0;
3930 }
3931
3932 static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
3933 {
3934         struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
3935         const struct drm_framebuffer *fb = plane_state->hw.fb;
3936         unsigned int rotation = plane_state->hw.rotation;
3937         int uv_plane = 1;
3938         int max_width = skl_max_plane_width(fb, uv_plane, rotation);
3939         int max_height = 4096;
3940         int x = plane_state->uapi.src.x1 >> 17;
3941         int y = plane_state->uapi.src.y1 >> 17;
3942         int w = drm_rect_width(&plane_state->uapi.src) >> 17;
3943         int h = drm_rect_height(&plane_state->uapi.src) >> 17;
3944         u32 offset;
3945
3946         intel_add_fb_offsets(&x, &y, plane_state, uv_plane);
3947         offset = intel_plane_compute_aligned_offset(&x, &y,
3948                                                     plane_state, uv_plane);
3949
3950         /* FIXME not quite sure how/if these apply to the chroma plane */
3951         if (w > max_width || h > max_height) {
3952                 drm_dbg_kms(&i915->drm,
3953                             "CbCr source size %dx%d too big (limit %dx%d)\n",
3954                             w, h, max_width, max_height);
3955                 return -EINVAL;
3956         }
3957
3958         if (is_ccs_modifier(fb->modifier)) {
3959                 int ccs_plane = main_to_ccs_plane(fb, uv_plane);
3960                 int aux_offset = plane_state->color_plane[ccs_plane].offset;
3961                 int alignment = intel_surf_alignment(fb, uv_plane);
3962
3963                 if (offset > aux_offset)
3964                         offset = intel_plane_adjust_aligned_offset(&x, &y,
3965                                                                    plane_state,
3966                                                                    uv_plane,
3967                                                                    offset,
3968                                                                    aux_offset & ~(alignment - 1));
3969
3970                 while (!skl_check_main_ccs_coordinates(plane_state, x, y,
3971                                                        offset, ccs_plane)) {
3972                         if (offset == 0)
3973                                 break;
3974
3975                         offset = intel_plane_adjust_aligned_offset(&x, &y,
3976                                                                    plane_state,
3977                                                                    uv_plane,
3978                                                                    offset, offset - alignment);
3979                 }
3980
3981                 if (x != plane_state->color_plane[ccs_plane].x ||
3982                     y != plane_state->color_plane[ccs_plane].y) {
3983                         drm_dbg_kms(&i915->drm,
3984                                     "Unable to find suitable display surface offset due to CCS\n");
3985                         return -EINVAL;
3986                 }
3987         }
3988
3989         plane_state->color_plane[uv_plane].offset = offset;
3990         plane_state->color_plane[uv_plane].x = x;
3991         plane_state->color_plane[uv_plane].y = y;
3992
3993         return 0;
3994 }
3995
3996 static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
3997 {
3998         const struct drm_framebuffer *fb = plane_state->hw.fb;
3999         int src_x = plane_state->uapi.src.x1 >> 16;
4000         int src_y = plane_state->uapi.src.y1 >> 16;
4001         u32 offset;
4002         int ccs_plane;
4003
4004         for (ccs_plane = 0; ccs_plane < fb->format->num_planes; ccs_plane++) {
4005                 int main_hsub, main_vsub;
4006                 int hsub, vsub;
4007                 int x, y;
4008
4009                 if (!is_ccs_plane(fb, ccs_plane))
4010                         continue;
4011
4012                 intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, fb,
4013                                                ccs_to_main_plane(fb, ccs_plane));
4014                 intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane);
4015
4016                 hsub *= main_hsub;
4017                 vsub *= main_vsub;
4018                 x = src_x / hsub;
4019                 y = src_y / vsub;
4020
4021                 intel_add_fb_offsets(&x, &y, plane_state, ccs_plane);
4022
4023                 offset = intel_plane_compute_aligned_offset(&x, &y,
4024                                                             plane_state,
4025                                                             ccs_plane);
4026
4027                 plane_state->color_plane[ccs_plane].offset = offset;
4028                 plane_state->color_plane[ccs_plane].x = (x * hsub +
4029                                                          src_x % hsub) /
4030                                                         main_hsub;
4031                 plane_state->color_plane[ccs_plane].y = (y * vsub +
4032                                                          src_y % vsub) /
4033                                                         main_vsub;
4034         }
4035
4036         return 0;
4037 }
4038
4039 int skl_check_plane_surface(struct intel_plane_state *plane_state)
4040 {
4041         const struct drm_framebuffer *fb = plane_state->hw.fb;
4042         int ret;
4043         bool needs_aux = false;
4044
4045         ret = intel_plane_compute_gtt(plane_state);
4046         if (ret)
4047                 return ret;
4048
4049         if (!plane_state->uapi.visible)
4050                 return 0;
4051
4052         /*
4053          * Handle the AUX surface first since the main surface setup depends on
4054          * it.
4055          */
4056         if (is_ccs_modifier(fb->modifier)) {
4057                 needs_aux = true;
4058                 ret = skl_check_ccs_aux_surface(plane_state);
4059                 if (ret)
4060                         return ret;
4061         }
4062
4063         if (intel_format_info_is_yuv_semiplanar(fb->format,
4064                                                 fb->modifier)) {
4065                 needs_aux = true;
4066                 ret = skl_check_nv12_aux_surface(plane_state);
4067                 if (ret)
4068                         return ret;
4069         }
4070
4071         if (!needs_aux) {
4072                 int i;
4073
4074                 for (i = 1; i < fb->format->num_planes; i++) {
4075                         plane_state->color_plane[i].offset = ~0xfff;
4076                         plane_state->color_plane[i].x = 0;
4077                         plane_state->color_plane[i].y = 0;
4078                 }
4079         }
4080
4081         ret = skl_check_main_surface(plane_state);
4082         if (ret)
4083                 return ret;
4084
4085         return 0;
4086 }
4087
4088 static void i9xx_plane_ratio(const struct intel_crtc_state *crtc_state,
4089                              const struct intel_plane_state *plane_state,
4090                              unsigned int *num, unsigned int *den)
4091 {
4092         const struct drm_framebuffer *fb = plane_state->hw.fb;
4093         unsigned int cpp = fb->format->cpp[0];
4094
4095         /*
4096          * g4x bspec says 64bpp pixel rate can't exceed 80%
4097          * of cdclk when the sprite plane is enabled on the
4098          * same pipe. ilk/snb bspec says 64bpp pixel rate is
4099          * never allowed to exceed 80% of cdclk. Let's just go
4100          * with the ilk/snb limit always.
4101          */
4102         if (cpp == 8) {
4103                 *num = 10;
4104                 *den = 8;
4105         } else {
4106                 *num = 1;
4107                 *den = 1;
4108         }
4109 }
4110
4111 static int i9xx_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
4112                                 const struct intel_plane_state *plane_state)
4113 {
4114         unsigned int pixel_rate;
4115         unsigned int num, den;
4116
4117         /*
4118          * Note that crtc_state->pixel_rate accounts for both
4119          * horizontal and vertical panel fitter downscaling factors.
4120          * Pre-HSW bspec tells us to only consider the horizontal
4121          * downscaling factor here. We ignore that and just consider
4122          * both for simplicity.
4123          */
4124         pixel_rate = crtc_state->pixel_rate;
4125
4126         i9xx_plane_ratio(crtc_state, plane_state, &num, &den);
4127
4128         /* two pixels per clock with double wide pipe */
4129         if (crtc_state->double_wide)
4130                 den *= 2;
4131
4132         return DIV_ROUND_UP(pixel_rate * num, den);
4133 }
4134
4135 unsigned int
4136 i9xx_plane_max_stride(struct intel_plane *plane,
4137                       u32 pixel_format, u64 modifier,
4138                       unsigned int rotation)
4139 {
4140         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
4141
4142         if (!HAS_GMCH(dev_priv)) {
4143                 return 32*1024;
4144         } else if (INTEL_GEN(dev_priv) >= 4) {
4145                 if (modifier == I915_FORMAT_MOD_X_TILED)
4146                         return 16*1024;
4147                 else
4148                         return 32*1024;
4149         } else if (INTEL_GEN(dev_priv) >= 3) {
4150                 if (modifier == I915_FORMAT_MOD_X_TILED)
4151                         return 8*1024;
4152                 else
4153                         return 16*1024;
4154         } else {
4155                 if (plane->i9xx_plane == PLANE_C)
4156                         return 4*1024;
4157                 else
4158                         return 8*1024;
4159         }
4160 }
4161
4162 static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
4163 {
4164         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4165         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4166         u32 dspcntr = 0;
4167
4168         if (crtc_state->gamma_enable)
4169                 dspcntr |= DISPPLANE_GAMMA_ENABLE;
4170
4171         if (crtc_state->csc_enable)
4172                 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
4173
4174         if (INTEL_GEN(dev_priv) < 5)
4175                 dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe);
4176
4177         return dspcntr;
4178 }
4179
4180 static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
4181                           const struct intel_plane_state *plane_state)
4182 {
4183         struct drm_i915_private *dev_priv =
4184                 to_i915(plane_state->uapi.plane->dev);
4185         const struct drm_framebuffer *fb = plane_state->hw.fb;
4186         unsigned int rotation = plane_state->hw.rotation;
4187         u32 dspcntr;
4188
4189         dspcntr = DISPLAY_PLANE_ENABLE;
4190
4191         if (IS_G4X(dev_priv) || IS_GEN(dev_priv, 5) ||
4192             IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
4193                 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
4194
4195         switch (fb->format->format) {
4196         case DRM_FORMAT_C8:
4197                 dspcntr |= DISPPLANE_8BPP;
4198                 break;
4199         case DRM_FORMAT_XRGB1555:
4200                 dspcntr |= DISPPLANE_BGRX555;
4201                 break;
4202         case DRM_FORMAT_ARGB1555:
4203                 dspcntr |= DISPPLANE_BGRA555;
4204                 break;
4205         case DRM_FORMAT_RGB565:
4206                 dspcntr |= DISPPLANE_BGRX565;
4207                 break;
4208         case DRM_FORMAT_XRGB8888:
4209                 dspcntr |= DISPPLANE_BGRX888;
4210                 break;
4211         case DRM_FORMAT_XBGR8888:
4212                 dspcntr |= DISPPLANE_RGBX888;
4213                 break;
4214         case DRM_FORMAT_ARGB8888:
4215                 dspcntr |= DISPPLANE_BGRA888;
4216                 break;
4217         case DRM_FORMAT_ABGR8888:
4218                 dspcntr |= DISPPLANE_RGBA888;
4219                 break;
4220         case DRM_FORMAT_XRGB2101010:
4221                 dspcntr |= DISPPLANE_BGRX101010;
4222                 break;
4223         case DRM_FORMAT_XBGR2101010:
4224                 dspcntr |= DISPPLANE_RGBX101010;
4225                 break;
4226         case DRM_FORMAT_ARGB2101010:
4227                 dspcntr |= DISPPLANE_BGRA101010;
4228                 break;
4229         case DRM_FORMAT_ABGR2101010:
4230                 dspcntr |= DISPPLANE_RGBA101010;
4231                 break;
4232         case DRM_FORMAT_XBGR16161616F:
4233                 dspcntr |= DISPPLANE_RGBX161616;
4234                 break;
4235         default:
4236                 MISSING_CASE(fb->format->format);
4237                 return 0;
4238         }
4239
4240         if (INTEL_GEN(dev_priv) >= 4 &&
4241             fb->modifier == I915_FORMAT_MOD_X_TILED)
4242                 dspcntr |= DISPPLANE_TILED;
4243
4244         if (rotation & DRM_MODE_ROTATE_180)
4245                 dspcntr |= DISPPLANE_ROTATE_180;
4246
4247         if (rotation & DRM_MODE_REFLECT_X)
4248                 dspcntr |= DISPPLANE_MIRROR;
4249
4250         return dspcntr;
4251 }
4252
4253 int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
4254 {
4255         struct drm_i915_private *dev_priv =
4256                 to_i915(plane_state->uapi.plane->dev);
4257         const struct drm_framebuffer *fb = plane_state->hw.fb;
4258         int src_x, src_y, src_w;
4259         u32 offset;
4260         int ret;
4261
4262         ret = intel_plane_compute_gtt(plane_state);
4263         if (ret)
4264                 return ret;
4265
4266         if (!plane_state->uapi.visible)
4267                 return 0;
4268
4269         src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
4270         src_x = plane_state->uapi.src.x1 >> 16;
4271         src_y = plane_state->uapi.src.y1 >> 16;
4272
4273         /* Undocumented hardware limit on i965/g4x/vlv/chv */
4274         if (HAS_GMCH(dev_priv) && fb->format->cpp[0] == 8 && src_w > 2048)
4275                 return -EINVAL;
4276
4277         intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
4278
4279         if (INTEL_GEN(dev_priv) >= 4)
4280                 offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
4281                                                             plane_state, 0);
4282         else
4283                 offset = 0;
4284
4285         /*
4286          * Put the final coordinates back so that the src
4287          * coordinate checks will see the right values.
4288          */
4289         drm_rect_translate_to(&plane_state->uapi.src,
4290                               src_x << 16, src_y << 16);
4291
4292         /* HSW/BDW do this automagically in hardware */
4293         if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
4294                 unsigned int rotation = plane_state->hw.rotation;
4295                 int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
4296                 int src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
4297
4298                 if (rotation & DRM_MODE_ROTATE_180) {
4299                         src_x += src_w - 1;
4300                         src_y += src_h - 1;
4301                 } else if (rotation & DRM_MODE_REFLECT_X) {
4302                         src_x += src_w - 1;
4303                 }
4304         }
4305
4306         plane_state->color_plane[0].offset = offset;
4307         plane_state->color_plane[0].x = src_x;
4308         plane_state->color_plane[0].y = src_y;
4309
4310         return 0;
4311 }
4312
4313 static bool i9xx_plane_has_windowing(struct intel_plane *plane)
4314 {
4315         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
4316         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
4317
4318         if (IS_CHERRYVIEW(dev_priv))
4319                 return i9xx_plane == PLANE_B;
4320         else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
4321                 return false;
4322         else if (IS_GEN(dev_priv, 4))
4323                 return i9xx_plane == PLANE_C;
4324         else
4325                 return i9xx_plane == PLANE_B ||
4326                         i9xx_plane == PLANE_C;
4327 }
4328
4329 static int
4330 i9xx_plane_check(struct intel_crtc_state *crtc_state,
4331                  struct intel_plane_state *plane_state)
4332 {
4333         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
4334         int ret;
4335
4336         ret = chv_plane_check_rotation(plane_state);
4337         if (ret)
4338                 return ret;
4339
4340         ret = drm_atomic_helper_check_plane_state(&plane_state->uapi,
4341                                                   &crtc_state->uapi,
4342                                                   DRM_PLANE_HELPER_NO_SCALING,
4343                                                   DRM_PLANE_HELPER_NO_SCALING,
4344                                                   i9xx_plane_has_windowing(plane),
4345                                                   true);
4346         if (ret)
4347                 return ret;
4348
4349         ret = i9xx_check_plane_surface(plane_state);
4350         if (ret)
4351                 return ret;
4352
4353         if (!plane_state->uapi.visible)
4354                 return 0;
4355
4356         ret = intel_plane_check_src_coordinates(plane_state);
4357         if (ret)
4358                 return ret;
4359
4360         plane_state->ctl = i9xx_plane_ctl(crtc_state, plane_state);
4361
4362         return 0;
4363 }
4364
4365 static void i9xx_update_plane(struct intel_plane *plane,
4366                               const struct intel_crtc_state *crtc_state,
4367                               const struct intel_plane_state *plane_state)
4368 {
4369         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
4370         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
4371         u32 linear_offset;
4372         int x = plane_state->color_plane[0].x;
4373         int y = plane_state->color_plane[0].y;
4374         int crtc_x = plane_state->uapi.dst.x1;
4375         int crtc_y = plane_state->uapi.dst.y1;
4376         int crtc_w = drm_rect_width(&plane_state->uapi.dst);
4377         int crtc_h = drm_rect_height(&plane_state->uapi.dst);
4378         unsigned long irqflags;
4379         u32 dspaddr_offset;
4380         u32 dspcntr;
4381
4382         dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state);
4383
4384         linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
4385
4386         if (INTEL_GEN(dev_priv) >= 4)
4387                 dspaddr_offset = plane_state->color_plane[0].offset;
4388         else
4389                 dspaddr_offset = linear_offset;
4390
4391         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
4392
4393         intel_de_write_fw(dev_priv, DSPSTRIDE(i9xx_plane),
4394                           plane_state->color_plane[0].stride);
4395
4396         if (INTEL_GEN(dev_priv) < 4) {
4397                 /*
4398                  * PLANE_A doesn't actually have a full window
4399                  * generator but let's assume we still need to
4400                  * program whatever is there.
4401                  */
4402                 intel_de_write_fw(dev_priv, DSPPOS(i9xx_plane),
4403                                   (crtc_y << 16) | crtc_x);
4404                 intel_de_write_fw(dev_priv, DSPSIZE(i9xx_plane),
4405                                   ((crtc_h - 1) << 16) | (crtc_w - 1));
4406         } else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
4407                 intel_de_write_fw(dev_priv, PRIMPOS(i9xx_plane),
4408                                   (crtc_y << 16) | crtc_x);
4409                 intel_de_write_fw(dev_priv, PRIMSIZE(i9xx_plane),
4410                                   ((crtc_h - 1) << 16) | (crtc_w - 1));
4411                 intel_de_write_fw(dev_priv, PRIMCNSTALPHA(i9xx_plane), 0);
4412         }
4413
4414         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
4415                 intel_de_write_fw(dev_priv, DSPOFFSET(i9xx_plane),
4416                                   (y << 16) | x);
4417         } else if (INTEL_GEN(dev_priv) >= 4) {
4418                 intel_de_write_fw(dev_priv, DSPLINOFF(i9xx_plane),
4419                                   linear_offset);
4420                 intel_de_write_fw(dev_priv, DSPTILEOFF(i9xx_plane),
4421                                   (y << 16) | x);
4422         }
4423
4424         /*
4425          * The control register self-arms if the plane was previously
4426          * disabled. Try to make the plane enable atomic by writing
4427          * the control register just before the surface register.
4428          */
4429         intel_de_write_fw(dev_priv, DSPCNTR(i9xx_plane), dspcntr);
4430         if (INTEL_GEN(dev_priv) >= 4)
4431                 intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane),
4432                                   intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
4433         else
4434                 intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane),
4435                                   intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
4436
4437         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
4438 }
4439
4440 static void i9xx_disable_plane(struct intel_plane *plane,
4441                                const struct intel_crtc_state *crtc_state)
4442 {
4443         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
4444         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
4445         unsigned long irqflags;
4446         u32 dspcntr;
4447
4448         /*
4449          * DSPCNTR pipe gamma enable on g4x+ and pipe csc
4450          * enable on ilk+ affect the pipe bottom color as
4451          * well, so we must configure them even if the plane
4452          * is disabled.
4453          *
4454          * On pre-g4x there is no way to gamma correct the
4455          * pipe bottom color but we'll keep on doing this
4456          * anyway so that the crtc state readout works correctly.
4457          */
4458         dspcntr = i9xx_plane_ctl_crtc(crtc_state);
4459
4460         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
4461
4462         intel_de_write_fw(dev_priv, DSPCNTR(i9xx_plane), dspcntr);
4463         if (INTEL_GEN(dev_priv) >= 4)
4464                 intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane), 0);
4465         else
4466                 intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane), 0);
4467
4468         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
4469 }
4470
4471 static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
4472                                     enum pipe *pipe)
4473 {
4474         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
4475         enum intel_display_power_domain power_domain;
4476         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
4477         intel_wakeref_t wakeref;
4478         bool ret;
4479         u32 val;
4480
4481         /*
4482          * Not 100% correct for planes that can move between pipes,
4483          * but that's only the case for gen2-4 which don't have any
4484          * display power wells.
4485          */
4486         power_domain = POWER_DOMAIN_PIPE(plane->pipe);
4487         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
4488         if (!wakeref)
4489                 return false;
4490
4491         val = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
4492
4493         ret = val & DISPLAY_PLANE_ENABLE;
4494
4495         if (INTEL_GEN(dev_priv) >= 5)
4496                 *pipe = plane->pipe;
4497         else
4498                 *pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
4499                         DISPPLANE_SEL_PIPE_SHIFT;
4500
4501         intel_display_power_put(dev_priv, power_domain, wakeref);
4502
4503         return ret;
4504 }
4505
4506 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
4507 {
4508         struct drm_device *dev = intel_crtc->base.dev;
4509         struct drm_i915_private *dev_priv = to_i915(dev);
4510         unsigned long irqflags;
4511
4512         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
4513
4514         intel_de_write_fw(dev_priv, SKL_PS_CTRL(intel_crtc->pipe, id), 0);
4515         intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
4516         intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
4517
4518         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
4519 }
4520
4521 /*
4522  * This function detaches (aka. unbinds) unused scalers in hardware
4523  */
4524 static void skl_detach_scalers(const struct intel_crtc_state *crtc_state)
4525 {
4526         struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
4527         const struct intel_crtc_scaler_state *scaler_state =
4528                 &crtc_state->scaler_state;
4529         int i;
4530
4531         /* loop through and disable scalers that aren't in use */
4532         for (i = 0; i < intel_crtc->num_scalers; i++) {
4533                 if (!scaler_state->scalers[i].in_use)
4534                         skl_detach_scaler(intel_crtc, i);
4535         }
4536 }
4537
4538 static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb,
4539                                           int color_plane, unsigned int rotation)
4540 {
4541         /*
4542          * The stride is either expressed as a multiple of 64 bytes chunks for
4543          * linear buffers or in number of tiles for tiled buffers.
4544          */
4545         if (is_surface_linear(fb, color_plane))
4546                 return 64;
4547         else if (drm_rotation_90_or_270(rotation))
4548                 return intel_tile_height(fb, color_plane);
4549         else
4550                 return intel_tile_width_bytes(fb, color_plane);
4551 }
4552
4553 u32 skl_plane_stride(const struct intel_plane_state *plane_state,
4554                      int color_plane)
4555 {
4556         const struct drm_framebuffer *fb = plane_state->hw.fb;
4557         unsigned int rotation = plane_state->hw.rotation;
4558         u32 stride = plane_state->color_plane[color_plane].stride;
4559
4560         if (color_plane >= fb->format->num_planes)
4561                 return 0;
4562
4563         return stride / skl_plane_stride_mult(fb, color_plane, rotation);
4564 }
4565
4566 static u32 skl_plane_ctl_format(u32 pixel_format)
4567 {
4568         switch (pixel_format) {
4569         case DRM_FORMAT_C8:
4570                 return PLANE_CTL_FORMAT_INDEXED;
4571         case DRM_FORMAT_RGB565:
4572                 return PLANE_CTL_FORMAT_RGB_565;
4573         case DRM_FORMAT_XBGR8888:
4574         case DRM_FORMAT_ABGR8888:
4575                 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
4576         case DRM_FORMAT_XRGB8888:
4577         case DRM_FORMAT_ARGB8888:
4578                 return PLANE_CTL_FORMAT_XRGB_8888;
4579         case DRM_FORMAT_XBGR2101010:
4580         case DRM_FORMAT_ABGR2101010:
4581                 return PLANE_CTL_FORMAT_XRGB_2101010 | PLANE_CTL_ORDER_RGBX;
4582         case DRM_FORMAT_XRGB2101010:
4583         case DRM_FORMAT_ARGB2101010:
4584                 return PLANE_CTL_FORMAT_XRGB_2101010;
4585         case DRM_FORMAT_XBGR16161616F:
4586         case DRM_FORMAT_ABGR16161616F:
4587                 return PLANE_CTL_FORMAT_XRGB_16161616F | PLANE_CTL_ORDER_RGBX;
4588         case DRM_FORMAT_XRGB16161616F:
4589         case DRM_FORMAT_ARGB16161616F:
4590                 return PLANE_CTL_FORMAT_XRGB_16161616F;
4591         case DRM_FORMAT_XYUV8888:
4592                 return PLANE_CTL_FORMAT_XYUV;
4593         case DRM_FORMAT_YUYV:
4594                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
4595         case DRM_FORMAT_YVYU:
4596                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
4597         case DRM_FORMAT_UYVY:
4598                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
4599         case DRM_FORMAT_VYUY:
4600                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
4601         case DRM_FORMAT_NV12:
4602                 return PLANE_CTL_FORMAT_NV12;
4603         case DRM_FORMAT_P010:
4604                 return PLANE_CTL_FORMAT_P010;
4605         case DRM_FORMAT_P012:
4606                 return PLANE_CTL_FORMAT_P012;
4607         case DRM_FORMAT_P016:
4608                 return PLANE_CTL_FORMAT_P016;
4609         case DRM_FORMAT_Y210:
4610                 return PLANE_CTL_FORMAT_Y210;
4611         case DRM_FORMAT_Y212:
4612                 return PLANE_CTL_FORMAT_Y212;
4613         case DRM_FORMAT_Y216:
4614                 return PLANE_CTL_FORMAT_Y216;
4615         case DRM_FORMAT_XVYU2101010:
4616                 return PLANE_CTL_FORMAT_Y410;
4617         case DRM_FORMAT_XVYU12_16161616:
4618                 return PLANE_CTL_FORMAT_Y412;
4619         case DRM_FORMAT_XVYU16161616:
4620                 return PLANE_CTL_FORMAT_Y416;
4621         default:
4622                 MISSING_CASE(pixel_format);
4623         }
4624
4625         return 0;
4626 }
4627
4628 static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state)
4629 {
4630         if (!plane_state->hw.fb->format->has_alpha)
4631                 return PLANE_CTL_ALPHA_DISABLE;
4632
4633         switch (plane_state->hw.pixel_blend_mode) {
4634         case DRM_MODE_BLEND_PIXEL_NONE:
4635                 return PLANE_CTL_ALPHA_DISABLE;
4636         case DRM_MODE_BLEND_PREMULTI:
4637                 return PLANE_CTL_ALPHA_SW_PREMULTIPLY;
4638         case DRM_MODE_BLEND_COVERAGE:
4639                 return PLANE_CTL_ALPHA_HW_PREMULTIPLY;
4640         default:
4641                 MISSING_CASE(plane_state->hw.pixel_blend_mode);
4642                 return PLANE_CTL_ALPHA_DISABLE;
4643         }
4644 }
4645
4646 static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state)
4647 {
4648         if (!plane_state->hw.fb->format->has_alpha)
4649                 return PLANE_COLOR_ALPHA_DISABLE;
4650
4651         switch (plane_state->hw.pixel_blend_mode) {
4652         case DRM_MODE_BLEND_PIXEL_NONE:
4653                 return PLANE_COLOR_ALPHA_DISABLE;
4654         case DRM_MODE_BLEND_PREMULTI:
4655                 return PLANE_COLOR_ALPHA_SW_PREMULTIPLY;
4656         case DRM_MODE_BLEND_COVERAGE:
4657                 return PLANE_COLOR_ALPHA_HW_PREMULTIPLY;
4658         default:
4659                 MISSING_CASE(plane_state->hw.pixel_blend_mode);
4660                 return PLANE_COLOR_ALPHA_DISABLE;
4661         }
4662 }
4663
4664 static u32 skl_plane_ctl_tiling(u64 fb_modifier)
4665 {
4666         switch (fb_modifier) {
4667         case DRM_FORMAT_MOD_LINEAR:
4668                 break;
4669         case I915_FORMAT_MOD_X_TILED:
4670                 return PLANE_CTL_TILED_X;
4671         case I915_FORMAT_MOD_Y_TILED:
4672                 return PLANE_CTL_TILED_Y;
4673         case I915_FORMAT_MOD_Y_TILED_CCS:
4674                 return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
4675         case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
4676                 return PLANE_CTL_TILED_Y |
4677                        PLANE_CTL_RENDER_DECOMPRESSION_ENABLE |
4678                        PLANE_CTL_CLEAR_COLOR_DISABLE;
4679         case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
4680                 return PLANE_CTL_TILED_Y | PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE;
4681         case I915_FORMAT_MOD_Yf_TILED:
4682                 return PLANE_CTL_TILED_YF;
4683         case I915_FORMAT_MOD_Yf_TILED_CCS:
4684                 return PLANE_CTL_TILED_YF | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
4685         default:
4686                 MISSING_CASE(fb_modifier);
4687         }
4688
4689         return 0;
4690 }
4691
4692 static u32 skl_plane_ctl_rotate(unsigned int rotate)
4693 {
4694         switch (rotate) {
4695         case DRM_MODE_ROTATE_0:
4696                 break;
4697         /*
4698          * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
4699          * while i915 HW rotation is clockwise, thats why this swapping.
4700          */
4701         case DRM_MODE_ROTATE_90:
4702                 return PLANE_CTL_ROTATE_270;
4703         case DRM_MODE_ROTATE_180:
4704                 return PLANE_CTL_ROTATE_180;
4705         case DRM_MODE_ROTATE_270:
4706                 return PLANE_CTL_ROTATE_90;
4707         default:
4708                 MISSING_CASE(rotate);
4709         }
4710
4711         return 0;
4712 }
4713
4714 static u32 cnl_plane_ctl_flip(unsigned int reflect)
4715 {
4716         switch (reflect) {
4717         case 0:
4718                 break;
4719         case DRM_MODE_REFLECT_X:
4720                 return PLANE_CTL_FLIP_HORIZONTAL;
4721         case DRM_MODE_REFLECT_Y:
4722         default:
4723                 MISSING_CASE(reflect);
4724         }
4725
4726         return 0;
4727 }
4728
4729 u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
4730 {
4731         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
4732         u32 plane_ctl = 0;
4733
4734         if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
4735                 return plane_ctl;
4736
4737         if (crtc_state->gamma_enable)
4738                 plane_ctl |= PLANE_CTL_PIPE_GAMMA_ENABLE;
4739
4740         if (crtc_state->csc_enable)
4741                 plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE;
4742
4743         return plane_ctl;
4744 }
4745
4746 u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
4747                   const struct intel_plane_state *plane_state)
4748 {
4749         struct drm_i915_private *dev_priv =
4750                 to_i915(plane_state->uapi.plane->dev);
4751         const struct drm_framebuffer *fb = plane_state->hw.fb;
4752         unsigned int rotation = plane_state->hw.rotation;
4753         const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
4754         u32 plane_ctl;
4755
4756         plane_ctl = PLANE_CTL_ENABLE;
4757
4758         if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) {
4759                 plane_ctl |= skl_plane_ctl_alpha(plane_state);
4760                 plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
4761
4762                 if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709)
4763                         plane_ctl |= PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709;
4764
4765                 if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
4766                         plane_ctl |= PLANE_CTL_YUV_RANGE_CORRECTION_DISABLE;
4767         }
4768
4769         plane_ctl |= skl_plane_ctl_format(fb->format->format);
4770         plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
4771         plane_ctl |= skl_plane_ctl_rotate(rotation & DRM_MODE_ROTATE_MASK);
4772
4773         if (INTEL_GEN(dev_priv) >= 10)
4774                 plane_ctl |= cnl_plane_ctl_flip(rotation &
4775                                                 DRM_MODE_REFLECT_MASK);
4776
4777         if (key->flags & I915_SET_COLORKEY_DESTINATION)
4778                 plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
4779         else if (key->flags & I915_SET_COLORKEY_SOURCE)
4780                 plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
4781
4782         return plane_ctl;
4783 }
4784
4785 u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state)
4786 {
4787         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
4788         u32 plane_color_ctl = 0;
4789
4790         if (INTEL_GEN(dev_priv) >= 11)
4791                 return plane_color_ctl;
4792
4793         if (crtc_state->gamma_enable)
4794                 plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE;
4795
4796         if (crtc_state->csc_enable)
4797                 plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
4798
4799         return plane_color_ctl;
4800 }
4801
4802 u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
4803                         const struct intel_plane_state *plane_state)
4804 {
4805         struct drm_i915_private *dev_priv =
4806                 to_i915(plane_state->uapi.plane->dev);
4807         const struct drm_framebuffer *fb = plane_state->hw.fb;
4808         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
4809         u32 plane_color_ctl = 0;
4810
4811         plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
4812         plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state);
4813
4814         if (fb->format->is_yuv && !icl_is_hdr_plane(dev_priv, plane->id)) {
4815                 switch (plane_state->hw.color_encoding) {
4816                 case DRM_COLOR_YCBCR_BT709:
4817                         plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
4818                         break;
4819                 case DRM_COLOR_YCBCR_BT2020:
4820                         plane_color_ctl |=
4821                                 PLANE_COLOR_CSC_MODE_YUV2020_TO_RGB2020;
4822                         break;
4823                 default:
4824                         plane_color_ctl |=
4825                                 PLANE_COLOR_CSC_MODE_YUV601_TO_RGB601;
4826                 }
4827                 if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
4828                         plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
4829         } else if (fb->format->is_yuv) {
4830                 plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE;
4831         }
4832
4833         return plane_color_ctl;
4834 }
4835
4836 static int
4837 __intel_display_resume(struct drm_device *dev,
4838                        struct drm_atomic_state *state,
4839                        struct drm_modeset_acquire_ctx *ctx)
4840 {
4841         struct drm_crtc_state *crtc_state;
4842         struct drm_crtc *crtc;
4843         int i, ret;
4844
4845         intel_modeset_setup_hw_state(dev, ctx);
4846         intel_vga_redisable(to_i915(dev));
4847
4848         if (!state)
4849                 return 0;
4850
4851         /*
4852          * We've duplicated the state, pointers to the old state are invalid.
4853          *
4854          * Don't attempt to use the old state until we commit the duplicated state.
4855          */
4856         for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
4857                 /*
4858                  * Force recalculation even if we restore
4859                  * current state. With fast modeset this may not result
4860                  * in a modeset when the state is compatible.
4861                  */
4862                 crtc_state->mode_changed = true;
4863         }
4864
4865         /* ignore any reset values/BIOS leftovers in the WM registers */
4866         if (!HAS_GMCH(to_i915(dev)))
4867                 to_intel_atomic_state(state)->skip_intermediate_wm = true;
4868
4869         ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
4870
4871         drm_WARN_ON(dev, ret == -EDEADLK);
4872         return ret;
4873 }
4874
4875 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
4876 {
4877         return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
4878                 intel_has_gpu_reset(&dev_priv->gt));
4879 }
4880
4881 void intel_prepare_reset(struct drm_i915_private *dev_priv)
4882 {
4883         struct drm_device *dev = &dev_priv->drm;
4884         struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
4885         struct drm_atomic_state *state;
4886         int ret;
4887
4888         /* reset doesn't touch the display */
4889         if (!dev_priv->params.force_reset_modeset_test &&
4890             !gpu_reset_clobbers_display(dev_priv))
4891                 return;
4892
4893         /* We have a modeset vs reset deadlock, defensively unbreak it. */
4894         set_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
4895         smp_mb__after_atomic();
4896         wake_up_bit(&dev_priv->gt.reset.flags, I915_RESET_MODESET);
4897
4898         if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
4899                 drm_dbg_kms(&dev_priv->drm,
4900                             "Modeset potentially stuck, unbreaking through wedging\n");
4901                 intel_gt_set_wedged(&dev_priv->gt);
4902         }
4903
4904         /*
4905          * Need mode_config.mutex so that we don't
4906          * trample ongoing ->detect() and whatnot.
4907          */
4908         mutex_lock(&dev->mode_config.mutex);
4909         drm_modeset_acquire_init(ctx, 0);
4910         while (1) {
4911                 ret = drm_modeset_lock_all_ctx(dev, ctx);
4912                 if (ret != -EDEADLK)
4913                         break;
4914
4915                 drm_modeset_backoff(ctx);
4916         }
4917         /*
4918          * Disabling the crtcs gracefully seems nicer. Also the
4919          * g33 docs say we should at least disable all the planes.
4920          */
4921         state = drm_atomic_helper_duplicate_state(dev, ctx);
4922         if (IS_ERR(state)) {
4923                 ret = PTR_ERR(state);
4924                 drm_err(&dev_priv->drm, "Duplicating state failed with %i\n",
4925                         ret);
4926                 return;
4927         }
4928
4929         ret = drm_atomic_helper_disable_all(dev, ctx);
4930         if (ret) {
4931                 drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
4932                         ret);
4933                 drm_atomic_state_put(state);
4934                 return;
4935         }
4936
4937         dev_priv->modeset_restore_state = state;
4938         state->acquire_ctx = ctx;
4939 }
4940
4941 void intel_finish_reset(struct drm_i915_private *dev_priv)
4942 {
4943         struct drm_device *dev = &dev_priv->drm;
4944         struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
4945         struct drm_atomic_state *state;
4946         int ret;
4947
4948         /* reset doesn't touch the display */
4949         if (!test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
4950                 return;
4951
4952         state = fetch_and_zero(&dev_priv->modeset_restore_state);
4953         if (!state)
4954                 goto unlock;
4955
4956         /* reset doesn't touch the display */
4957         if (!gpu_reset_clobbers_display(dev_priv)) {
4958                 /* for testing only restore the display */
4959                 ret = __intel_display_resume(dev, state, ctx);
4960                 if (ret)
4961                         drm_err(&dev_priv->drm,
4962                                 "Restoring old state failed with %i\n", ret);
4963         } else {
4964                 /*
4965                  * The display has been reset as well,
4966                  * so need a full re-initialization.
4967                  */
4968                 intel_pps_unlock_regs_wa(dev_priv);
4969                 intel_modeset_init_hw(dev_priv);
4970                 intel_init_clock_gating(dev_priv);
4971
4972                 spin_lock_irq(&dev_priv->irq_lock);
4973                 if (dev_priv->display.hpd_irq_setup)
4974                         dev_priv->display.hpd_irq_setup(dev_priv);
4975                 spin_unlock_irq(&dev_priv->irq_lock);
4976
4977                 ret = __intel_display_resume(dev, state, ctx);
4978                 if (ret)
4979                         drm_err(&dev_priv->drm,
4980                                 "Restoring old state failed with %i\n", ret);
4981
4982                 intel_hpd_init(dev_priv);
4983         }
4984
4985         drm_atomic_state_put(state);
4986 unlock:
4987         drm_modeset_drop_locks(ctx);
4988         drm_modeset_acquire_fini(ctx);
4989         mutex_unlock(&dev->mode_config.mutex);
4990
4991         clear_bit_unlock(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
4992 }
4993
4994 static void icl_set_pipe_chicken(struct intel_crtc *crtc)
4995 {
4996         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4997         enum pipe pipe = crtc->pipe;
4998         u32 tmp;
4999
5000         tmp = intel_de_read(dev_priv, PIPE_CHICKEN(pipe));
5001
5002         /*
5003          * Display WA #1153: icl
5004          * enable hardware to bypass the alpha math
5005          * and rounding for per-pixel values 00 and 0xff
5006          */
5007         tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
5008         /*
5009          * Display WA # 1605353570: icl
5010          * Set the pixel rounding bit to 1 for allowing
5011          * passthrough of Frame buffer pixels unmodified
5012          * across pipe
5013          */
5014         tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
5015         intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp);
5016 }
5017
5018 static void intel_fdi_normal_train(struct intel_crtc *crtc)
5019 {
5020         struct drm_device *dev = crtc->base.dev;
5021         struct drm_i915_private *dev_priv = to_i915(dev);
5022         enum pipe pipe = crtc->pipe;
5023         i915_reg_t reg;
5024         u32 temp;
5025
5026         /* enable normal train */
5027         reg = FDI_TX_CTL(pipe);
5028         temp = intel_de_read(dev_priv, reg);
5029         if (IS_IVYBRIDGE(dev_priv)) {
5030                 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
5031                 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
5032         } else {
5033                 temp &= ~FDI_LINK_TRAIN_NONE;
5034                 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
5035         }
5036         intel_de_write(dev_priv, reg, temp);
5037
5038         reg = FDI_RX_CTL(pipe);
5039         temp = intel_de_read(dev_priv, reg);
5040         if (HAS_PCH_CPT(dev_priv)) {
5041                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
5042                 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
5043         } else {
5044                 temp &= ~FDI_LINK_TRAIN_NONE;
5045                 temp |= FDI_LINK_TRAIN_NONE;
5046         }
5047         intel_de_write(dev_priv, reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
5048
5049         /* wait one idle pattern time */
5050         intel_de_posting_read(dev_priv, reg);
5051         udelay(1000);
5052
5053         /* IVB wants error correction enabled */
5054         if (IS_IVYBRIDGE(dev_priv))
5055                 intel_de_write(dev_priv, reg,
5056                                intel_de_read(dev_priv, reg) | FDI_FS_ERRC_ENABLE | FDI_FE_ERRC_ENABLE);
5057 }
5058
5059 /* The FDI link training functions for ILK/Ibexpeak. */
5060 static void ilk_fdi_link_train(struct intel_crtc *crtc,
5061                                const struct intel_crtc_state *crtc_state)
5062 {
5063         struct drm_device *dev = crtc->base.dev;
5064         struct drm_i915_private *dev_priv = to_i915(dev);
5065         enum pipe pipe = crtc->pipe;
5066         i915_reg_t reg;
5067         u32 temp, tries;
5068
5069         /* FDI needs bits from pipe first */
5070         assert_pipe_enabled(dev_priv, crtc_state->cpu_transcoder);
5071
5072         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
5073            for train result */
5074         reg = FDI_RX_IMR(pipe);
5075         temp = intel_de_read(dev_priv, reg);
5076         temp &= ~FDI_RX_SYMBOL_LOCK;
5077         temp &= ~FDI_RX_BIT_LOCK;
5078         intel_de_write(dev_priv, reg, temp);
5079         intel_de_read(dev_priv, reg);
5080         udelay(150);
5081
5082         /* enable CPU FDI TX and PCH FDI RX */
5083         reg = FDI_TX_CTL(pipe);
5084         temp = intel_de_read(dev_priv, reg);
5085         temp &= ~FDI_DP_PORT_WIDTH_MASK;
5086         temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
5087         temp &= ~FDI_LINK_TRAIN_NONE;
5088         temp |= FDI_LINK_TRAIN_PATTERN_1;
5089         intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
5090
5091         reg = FDI_RX_CTL(pipe);
5092         temp = intel_de_read(dev_priv, reg);
5093         temp &= ~FDI_LINK_TRAIN_NONE;
5094         temp |= FDI_LINK_TRAIN_PATTERN_1;
5095         intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
5096
5097         intel_de_posting_read(dev_priv, reg);
5098         udelay(150);
5099
5100         /* Ironlake workaround, enable clock pointer after FDI enable*/
5101         intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
5102                        FDI_RX_PHASE_SYNC_POINTER_OVR);
5103         intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
5104                        FDI_RX_PHASE_SYNC_POINTER_OVR | FDI_RX_PHASE_SYNC_POINTER_EN);
5105
5106         reg = FDI_RX_IIR(pipe);
5107         for (tries = 0; tries < 5; tries++) {
5108                 temp = intel_de_read(dev_priv, reg);
5109                 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
5110
5111                 if ((temp & FDI_RX_BIT_LOCK)) {
5112                         drm_dbg_kms(&dev_priv->drm, "FDI train 1 done.\n");
5113                         intel_de_write(dev_priv, reg, temp | FDI_RX_BIT_LOCK);
5114                         break;
5115                 }
5116         }
5117         if (tries == 5)
5118                 drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
5119
5120         /* Train 2 */
5121         reg = FDI_TX_CTL(pipe);
5122         temp = intel_de_read(dev_priv, reg);
5123         temp &= ~FDI_LINK_TRAIN_NONE;
5124         temp |= FDI_LINK_TRAIN_PATTERN_2;
5125         intel_de_write(dev_priv, reg, temp);
5126
5127         reg = FDI_RX_CTL(pipe);
5128         temp = intel_de_read(dev_priv, reg);
5129         temp &= ~FDI_LINK_TRAIN_NONE;
5130         temp |= FDI_LINK_TRAIN_PATTERN_2;
5131         intel_de_write(dev_priv, reg, temp);
5132
5133         intel_de_posting_read(dev_priv, reg);
5134         udelay(150);
5135
5136         reg = FDI_RX_IIR(pipe);
5137         for (tries = 0; tries < 5; tries++) {
5138                 temp = intel_de_read(dev_priv, reg);
5139                 drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
5140
5141                 if (temp & FDI_RX_SYMBOL_LOCK) {
5142                         intel_de_write(dev_priv, reg,
5143                                        temp | FDI_RX_SYMBOL_LOCK);
5144                         drm_dbg_kms(&dev_priv->drm, "FDI train 2 done.\n");
5145                         break;
5146                 }
5147         }
5148         if (tries == 5)
5149                 drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
5150
5151         drm_dbg_kms(&dev_priv->drm, "FDI train done\n");
5152
5153 }
5154
5155 static const int snb_b_fdi_train_param[] = {
5156         FDI_LINK_TRAIN_400MV_0DB_SNB_B,
5157         FDI_LINK_TRAIN_400MV_6DB_SNB_B,
5158         FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
5159         FDI_LINK_TRAIN_800MV_0DB_SNB_B,
5160 };
5161
5162 /* The FDI link training functions for SNB/Cougarpoint. */
5163 static void gen6_fdi_link_train(struct intel_crtc *crtc,
5164                                 const struct intel_crtc_state *crtc_state)
5165 {
5166         struct drm_device *dev = crtc->base.dev;
5167         struct drm_i915_private *dev_priv = to_i915(dev);
5168         enum pipe pipe = crtc->pipe;
5169         i915_reg_t reg;
5170         u32 temp, i, retry;
5171
5172         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
5173            for train result */
5174         reg = FDI_RX_IMR(pipe);
5175         temp = intel_de_read(dev_priv, reg);
5176         temp &= ~FDI_RX_SYMBOL_LOCK;
5177         temp &= ~FDI_RX_BIT_LOCK;
5178         intel_de_write(dev_priv, reg, temp);
5179
5180         intel_de_posting_read(dev_priv, reg);
5181         udelay(150);
5182
5183         /* enable CPU FDI TX and PCH FDI RX */
5184         reg = FDI_TX_CTL(pipe);
5185         temp = intel_de_read(dev_priv, reg);
5186         temp &= ~FDI_DP_PORT_WIDTH_MASK;
5187         temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
5188         temp &= ~FDI_LINK_TRAIN_NONE;
5189         temp |= FDI_LINK_TRAIN_PATTERN_1;
5190         temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
5191         /* SNB-B */
5192         temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
5193         intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
5194
5195         intel_de_write(dev_priv, FDI_RX_MISC(pipe),
5196                        FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
5197
5198         reg = FDI_RX_CTL(pipe);
5199         temp = intel_de_read(dev_priv, reg);
5200         if (HAS_PCH_CPT(dev_priv)) {
5201                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
5202                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
5203         } else {
5204                 temp &= ~FDI_LINK_TRAIN_NONE;
5205                 temp |= FDI_LINK_TRAIN_PATTERN_1;
5206         }
5207         intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
5208
5209         intel_de_posting_read(dev_priv, reg);
5210         udelay(150);
5211
5212         for (i = 0; i < 4; i++) {
5213                 reg = FDI_TX_CTL(pipe);
5214                 temp = intel_de_read(dev_priv, reg);
5215                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
5216                 temp |= snb_b_fdi_train_param[i];
5217                 intel_de_write(dev_priv, reg, temp);
5218
5219                 intel_de_posting_read(dev_priv, reg);
5220                 udelay(500);
5221
5222                 for (retry = 0; retry < 5; retry++) {
5223                         reg = FDI_RX_IIR(pipe);
5224                         temp = intel_de_read(dev_priv, reg);
5225                         drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
5226                         if (temp & FDI_RX_BIT_LOCK) {
5227                                 intel_de_write(dev_priv, reg,
5228                                                temp | FDI_RX_BIT_LOCK);
5229                                 drm_dbg_kms(&dev_priv->drm,
5230                                             "FDI train 1 done.\n");
5231                                 break;
5232                         }
5233                         udelay(50);
5234                 }
5235                 if (retry < 5)
5236                         break;
5237         }
5238         if (i == 4)
5239                 drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
5240
5241         /* Train 2 */
5242         reg = FDI_TX_CTL(pipe);
5243         temp = intel_de_read(dev_priv, reg);
5244         temp &= ~FDI_LINK_TRAIN_NONE;
5245         temp |= FDI_LINK_TRAIN_PATTERN_2;
5246         if (IS_GEN(dev_priv, 6)) {
5247                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
5248                 /* SNB-B */
5249                 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
5250         }
5251         intel_de_write(dev_priv, reg, temp);
5252
5253         reg = FDI_RX_CTL(pipe);
5254         temp = intel_de_read(dev_priv, reg);
5255         if (HAS_PCH_CPT(dev_priv)) {
5256                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
5257                 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
5258         } else {
5259                 temp &= ~FDI_LINK_TRAIN_NONE;
5260                 temp |= FDI_LINK_TRAIN_PATTERN_2;
5261         }
5262         intel_de_write(dev_priv, reg, temp);
5263
5264         intel_de_posting_read(dev_priv, reg);
5265         udelay(150);
5266
5267         for (i = 0; i < 4; i++) {
5268                 reg = FDI_TX_CTL(pipe);
5269                 temp = intel_de_read(dev_priv, reg);
5270                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
5271                 temp |= snb_b_fdi_train_param[i];
5272                 intel_de_write(dev_priv, reg, temp);
5273
5274                 intel_de_posting_read(dev_priv, reg);
5275                 udelay(500);
5276
5277                 for (retry = 0; retry < 5; retry++) {
5278                         reg = FDI_RX_IIR(pipe);
5279                         temp = intel_de_read(dev_priv, reg);
5280                         drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
5281                         if (temp & FDI_RX_SYMBOL_LOCK) {
5282                                 intel_de_write(dev_priv, reg,
5283                                                temp | FDI_RX_SYMBOL_LOCK);
5284                                 drm_dbg_kms(&dev_priv->drm,
5285                                             "FDI train 2 done.\n");
5286                                 break;
5287                         }
5288                         udelay(50);
5289                 }
5290                 if (retry < 5)
5291                         break;
5292         }
5293         if (i == 4)
5294                 drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
5295
5296         drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
5297 }
5298
5299 /* Manual link training for Ivy Bridge A0 parts */
5300 static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
5301                                       const struct intel_crtc_state *crtc_state)
5302 {
5303         struct drm_device *dev = crtc->base.dev;
5304         struct drm_i915_private *dev_priv = to_i915(dev);
5305         enum pipe pipe = crtc->pipe;
5306         i915_reg_t reg;
5307         u32 temp, i, j;
5308
5309         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
5310            for train result */
5311         reg = FDI_RX_IMR(pipe);
5312         temp = intel_de_read(dev_priv, reg);
5313         temp &= ~FDI_RX_SYMBOL_LOCK;
5314         temp &= ~FDI_RX_BIT_LOCK;
5315         intel_de_write(dev_priv, reg, temp);
5316
5317         intel_de_posting_read(dev_priv, reg);
5318         udelay(150);
5319
5320         drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR before link train 0x%x\n",
5321                     intel_de_read(dev_priv, FDI_RX_IIR(pipe)));
5322
5323         /* Try each vswing and preemphasis setting twice before moving on */
5324         for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
5325                 /* disable first in case we need to retry */
5326                 reg = FDI_TX_CTL(pipe);
5327                 temp = intel_de_read(dev_priv, reg);
5328                 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
5329                 temp &= ~FDI_TX_ENABLE;
5330                 intel_de_write(dev_priv, reg, temp);
5331
5332                 reg = FDI_RX_CTL(pipe);
5333                 temp = intel_de_read(dev_priv, reg);
5334                 temp &= ~FDI_LINK_TRAIN_AUTO;
5335                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
5336                 temp &= ~FDI_RX_ENABLE;
5337                 intel_de_write(dev_priv, reg, temp);
5338
5339                 /* enable CPU FDI TX and PCH FDI RX */
5340                 reg = FDI_TX_CTL(pipe);
5341                 temp = intel_de_read(dev_priv, reg);
5342                 temp &= ~FDI_DP_PORT_WIDTH_MASK;
5343                 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
5344                 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
5345                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
5346                 temp |= snb_b_fdi_train_param[j/2];
5347                 temp |= FDI_COMPOSITE_SYNC;
5348                 intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
5349
5350                 intel_de_write(dev_priv, FDI_RX_MISC(pipe),
5351                                FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
5352
5353                 reg = FDI_RX_CTL(pipe);
5354                 temp = intel_de_read(dev_priv, reg);
5355                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
5356                 temp |= FDI_COMPOSITE_SYNC;
5357                 intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
5358
5359                 intel_de_posting_read(dev_priv, reg);
5360                 udelay(1); /* should be 0.5us */
5361
5362                 for (i = 0; i < 4; i++) {
5363                         reg = FDI_RX_IIR(pipe);
5364                         temp = intel_de_read(dev_priv, reg);
5365                         drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
5366
5367                         if (temp & FDI_RX_BIT_LOCK ||
5368                             (intel_de_read(dev_priv, reg) & FDI_RX_BIT_LOCK)) {
5369                                 intel_de_write(dev_priv, reg,
5370                                                temp | FDI_RX_BIT_LOCK);
5371                                 drm_dbg_kms(&dev_priv->drm,
5372                                             "FDI train 1 done, level %i.\n",
5373                                             i);
5374                                 break;
5375                         }
5376                         udelay(1); /* should be 0.5us */
5377                 }
5378                 if (i == 4) {
5379                         drm_dbg_kms(&dev_priv->drm,
5380                                     "FDI train 1 fail on vswing %d\n", j / 2);
5381                         continue;
5382                 }
5383
5384                 /* Train 2 */
5385                 reg = FDI_TX_CTL(pipe);
5386                 temp = intel_de_read(dev_priv, reg);
5387                 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
5388                 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
5389                 intel_de_write(dev_priv, reg, temp);
5390
5391                 reg = FDI_RX_CTL(pipe);
5392                 temp = intel_de_read(dev_priv, reg);
5393                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
5394                 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
5395                 intel_de_write(dev_priv, reg, temp);
5396
5397                 intel_de_posting_read(dev_priv, reg);
5398                 udelay(2); /* should be 1.5us */
5399
5400                 for (i = 0; i < 4; i++) {
5401                         reg = FDI_RX_IIR(pipe);
5402                         temp = intel_de_read(dev_priv, reg);
5403                         drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
5404
5405                         if (temp & FDI_RX_SYMBOL_LOCK ||
5406                             (intel_de_read(dev_priv, reg) & FDI_RX_SYMBOL_LOCK)) {
5407                                 intel_de_write(dev_priv, reg,
5408                                                temp | FDI_RX_SYMBOL_LOCK);
5409                                 drm_dbg_kms(&dev_priv->drm,
5410                                             "FDI train 2 done, level %i.\n",
5411                                             i);
5412                                 goto train_done;
5413                         }
5414                         udelay(2); /* should be 1.5us */
5415                 }
5416                 if (i == 4)
5417                         drm_dbg_kms(&dev_priv->drm,
5418                                     "FDI train 2 fail on vswing %d\n", j / 2);
5419         }
5420
5421 train_done:
5422         drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
5423 }
5424
5425 static void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
5426 {
5427         struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
5428         struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
5429         enum pipe pipe = intel_crtc->pipe;
5430         i915_reg_t reg;
5431         u32 temp;
5432
5433         /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
5434         reg = FDI_RX_CTL(pipe);
5435         temp = intel_de_read(dev_priv, reg);
5436         temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
5437         temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
5438         temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
5439         intel_de_write(dev_priv, reg, temp | FDI_RX_PLL_ENABLE);
5440
5441         intel_de_posting_read(dev_priv, reg);
5442         udelay(200);
5443
5444         /* Switch from Rawclk to PCDclk */
5445         temp = intel_de_read(dev_priv, reg);
5446         intel_de_write(dev_priv, reg, temp | FDI_PCDCLK);
5447
5448         intel_de_posting_read(dev_priv, reg);
5449         udelay(200);
5450
5451         /* Enable CPU FDI TX PLL, always on for Ironlake */
5452         reg = FDI_TX_CTL(pipe);
5453         temp = intel_de_read(dev_priv, reg);
5454         if ((temp & FDI_TX_PLL_ENABLE) == 0) {
5455                 intel_de_write(dev_priv, reg, temp | FDI_TX_PLL_ENABLE);
5456
5457                 intel_de_posting_read(dev_priv, reg);
5458                 udelay(100);
5459         }
5460 }
5461
5462 static void ilk_fdi_pll_disable(struct intel_crtc *intel_crtc)
5463 {
5464         struct drm_device *dev = intel_crtc->base.dev;
5465         struct drm_i915_private *dev_priv = to_i915(dev);
5466         enum pipe pipe = intel_crtc->pipe;
5467         i915_reg_t reg;
5468         u32 temp;
5469
5470         /* Switch from PCDclk to Rawclk */
5471         reg = FDI_RX_CTL(pipe);
5472         temp = intel_de_read(dev_priv, reg);
5473         intel_de_write(dev_priv, reg, temp & ~FDI_PCDCLK);
5474
5475         /* Disable CPU FDI TX PLL */
5476         reg = FDI_TX_CTL(pipe);
5477         temp = intel_de_read(dev_priv, reg);
5478         intel_de_write(dev_priv, reg, temp & ~FDI_TX_PLL_ENABLE);
5479
5480         intel_de_posting_read(dev_priv, reg);
5481         udelay(100);
5482
5483         reg = FDI_RX_CTL(pipe);
5484         temp = intel_de_read(dev_priv, reg);
5485         intel_de_write(dev_priv, reg, temp & ~FDI_RX_PLL_ENABLE);
5486
5487         /* Wait for the clocks to turn off. */
5488         intel_de_posting_read(dev_priv, reg);
5489         udelay(100);
5490 }
5491
5492 static void ilk_fdi_disable(struct intel_crtc *crtc)
5493 {
5494         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5495         enum pipe pipe = crtc->pipe;
5496         i915_reg_t reg;
5497         u32 temp;
5498
5499         /* disable CPU FDI tx and PCH FDI rx */
5500         reg = FDI_TX_CTL(pipe);
5501         temp = intel_de_read(dev_priv, reg);
5502         intel_de_write(dev_priv, reg, temp & ~FDI_TX_ENABLE);
5503         intel_de_posting_read(dev_priv, reg);
5504
5505         reg = FDI_RX_CTL(pipe);
5506         temp = intel_de_read(dev_priv, reg);
5507         temp &= ~(0x7 << 16);
5508         temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
5509         intel_de_write(dev_priv, reg, temp & ~FDI_RX_ENABLE);
5510
5511         intel_de_posting_read(dev_priv, reg);
5512         udelay(100);
5513
5514         /* Ironlake workaround, disable clock pointer after downing FDI */
5515         if (HAS_PCH_IBX(dev_priv))
5516                 intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
5517                                FDI_RX_PHASE_SYNC_POINTER_OVR);
5518
5519         /* still set train pattern 1 */
5520         reg = FDI_TX_CTL(pipe);
5521         temp = intel_de_read(dev_priv, reg);
5522         temp &= ~FDI_LINK_TRAIN_NONE;
5523         temp |= FDI_LINK_TRAIN_PATTERN_1;
5524         intel_de_write(dev_priv, reg, temp);
5525
5526         reg = FDI_RX_CTL(pipe);
5527         temp = intel_de_read(dev_priv, reg);
5528         if (HAS_PCH_CPT(dev_priv)) {
5529                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
5530                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
5531         } else {
5532                 temp &= ~FDI_LINK_TRAIN_NONE;
5533                 temp |= FDI_LINK_TRAIN_PATTERN_1;
5534         }
5535         /* BPC in FDI rx is consistent with that in PIPECONF */
5536         temp &= ~(0x07 << 16);
5537         temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
5538         intel_de_write(dev_priv, reg, temp);
5539
5540         intel_de_posting_read(dev_priv, reg);
5541         udelay(100);
5542 }
5543
5544 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
5545 {
5546         struct drm_crtc *crtc;
5547         bool cleanup_done;
5548
5549         drm_for_each_crtc(crtc, &dev_priv->drm) {
5550                 struct drm_crtc_commit *commit;
5551                 spin_lock(&crtc->commit_lock);
5552                 commit = list_first_entry_or_null(&crtc->commit_list,
5553                                                   struct drm_crtc_commit, commit_entry);
5554                 cleanup_done = commit ?
5555                         try_wait_for_completion(&commit->cleanup_done) : true;
5556                 spin_unlock(&crtc->commit_lock);
5557
5558                 if (cleanup_done)
5559                         continue;
5560
5561                 drm_crtc_wait_one_vblank(crtc);
5562
5563                 return true;
5564         }
5565
5566         return false;
5567 }
5568
5569 void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
5570 {
5571         u32 temp;
5572
5573         intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_GATE);
5574
5575         mutex_lock(&dev_priv->sb_lock);
5576
5577         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
5578         temp |= SBI_SSCCTL_DISABLE;
5579         intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
5580
5581         mutex_unlock(&dev_priv->sb_lock);
5582 }
5583
5584 /* Program iCLKIP clock to the desired frequency */
5585 static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
5586 {
5587         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5588         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5589         int clock = crtc_state->hw.adjusted_mode.crtc_clock;
5590         u32 divsel, phaseinc, auxdiv, phasedir = 0;
5591         u32 temp;
5592
5593         lpt_disable_iclkip(dev_priv);
5594
5595         /* The iCLK virtual clock root frequency is in MHz,
5596          * but the adjusted_mode->crtc_clock in in KHz. To get the
5597          * divisors, it is necessary to divide one by another, so we
5598          * convert the virtual clock precision to KHz here for higher
5599          * precision.
5600          */
5601         for (auxdiv = 0; auxdiv < 2; auxdiv++) {
5602                 u32 iclk_virtual_root_freq = 172800 * 1000;
5603                 u32 iclk_pi_range = 64;
5604                 u32 desired_divisor;
5605
5606                 desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
5607                                                     clock << auxdiv);
5608                 divsel = (desired_divisor / iclk_pi_range) - 2;
5609                 phaseinc = desired_divisor % iclk_pi_range;
5610
5611                 /*
5612                  * Near 20MHz is a corner case which is
5613                  * out of range for the 7-bit divisor
5614                  */
5615                 if (divsel <= 0x7f)
5616                         break;
5617         }
5618
5619         /* This should not happen with any sane values */
5620         drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
5621                     ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
5622         drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIR(phasedir) &
5623                     ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
5624
5625         drm_dbg_kms(&dev_priv->drm,
5626                     "iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
5627                     clock, auxdiv, divsel, phasedir, phaseinc);
5628
5629         mutex_lock(&dev_priv->sb_lock);
5630
5631         /* Program SSCDIVINTPHASE6 */
5632         temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
5633         temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
5634         temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
5635         temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
5636         temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
5637         temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
5638         temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
5639         intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
5640
5641         /* Program SSCAUXDIV */
5642         temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
5643         temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
5644         temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
5645         intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
5646
5647         /* Enable modulator and associated divider */
5648         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
5649         temp &= ~SBI_SSCCTL_DISABLE;
5650         intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
5651
5652         mutex_unlock(&dev_priv->sb_lock);
5653
5654         /* Wait for initialization time */
5655         udelay(24);
5656
5657         intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_UNGATE);
5658 }
5659
5660 int lpt_get_iclkip(struct drm_i915_private *dev_priv)
5661 {
5662         u32 divsel, phaseinc, auxdiv;
5663         u32 iclk_virtual_root_freq = 172800 * 1000;
5664         u32 iclk_pi_range = 64;
5665         u32 desired_divisor;
5666         u32 temp;
5667
5668         if ((intel_de_read(dev_priv, PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
5669                 return 0;
5670
5671         mutex_lock(&dev_priv->sb_lock);
5672
5673         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
5674         if (temp & SBI_SSCCTL_DISABLE) {
5675                 mutex_unlock(&dev_priv->sb_lock);
5676                 return 0;
5677         }
5678
5679         temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
5680         divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
5681                 SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
5682         phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
5683                 SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
5684
5685         temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
5686         auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
5687                 SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
5688
5689         mutex_unlock(&dev_priv->sb_lock);
5690
5691         desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
5692
5693         return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
5694                                  desired_divisor << auxdiv);
5695 }
5696
5697 static void ilk_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
5698                                            enum pipe pch_transcoder)
5699 {
5700         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5701         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5702         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
5703
5704         intel_de_write(dev_priv, PCH_TRANS_HTOTAL(pch_transcoder),
5705                        intel_de_read(dev_priv, HTOTAL(cpu_transcoder)));
5706         intel_de_write(dev_priv, PCH_TRANS_HBLANK(pch_transcoder),
5707                        intel_de_read(dev_priv, HBLANK(cpu_transcoder)));
5708         intel_de_write(dev_priv, PCH_TRANS_HSYNC(pch_transcoder),
5709                        intel_de_read(dev_priv, HSYNC(cpu_transcoder)));
5710
5711         intel_de_write(dev_priv, PCH_TRANS_VTOTAL(pch_transcoder),
5712                        intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
5713         intel_de_write(dev_priv, PCH_TRANS_VBLANK(pch_transcoder),
5714                        intel_de_read(dev_priv, VBLANK(cpu_transcoder)));
5715         intel_de_write(dev_priv, PCH_TRANS_VSYNC(pch_transcoder),
5716                        intel_de_read(dev_priv, VSYNC(cpu_transcoder)));
5717         intel_de_write(dev_priv, PCH_TRANS_VSYNCSHIFT(pch_transcoder),
5718                        intel_de_read(dev_priv, VSYNCSHIFT(cpu_transcoder)));
5719 }
5720
5721 static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
5722 {
5723         u32 temp;
5724
5725         temp = intel_de_read(dev_priv, SOUTH_CHICKEN1);
5726         if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
5727                 return;
5728
5729         drm_WARN_ON(&dev_priv->drm,
5730                     intel_de_read(dev_priv, FDI_RX_CTL(PIPE_B)) &
5731                     FDI_RX_ENABLE);
5732         drm_WARN_ON(&dev_priv->drm,
5733                     intel_de_read(dev_priv, FDI_RX_CTL(PIPE_C)) &
5734                     FDI_RX_ENABLE);
5735
5736         temp &= ~FDI_BC_BIFURCATION_SELECT;
5737         if (enable)
5738                 temp |= FDI_BC_BIFURCATION_SELECT;
5739
5740         drm_dbg_kms(&dev_priv->drm, "%sabling fdi C rx\n",
5741                     enable ? "en" : "dis");
5742         intel_de_write(dev_priv, SOUTH_CHICKEN1, temp);
5743         intel_de_posting_read(dev_priv, SOUTH_CHICKEN1);
5744 }
5745
5746 static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
5747 {
5748         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5749         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5750
5751         switch (crtc->pipe) {
5752         case PIPE_A:
5753                 break;
5754         case PIPE_B:
5755                 if (crtc_state->fdi_lanes > 2)
5756                         cpt_set_fdi_bc_bifurcation(dev_priv, false);
5757                 else
5758                         cpt_set_fdi_bc_bifurcation(dev_priv, true);
5759
5760                 break;
5761         case PIPE_C:
5762                 cpt_set_fdi_bc_bifurcation(dev_priv, true);
5763
5764                 break;
5765         default:
5766                 BUG();
5767         }
5768 }
5769
5770 /*
5771  * Finds the encoder associated with the given CRTC. This can only be
5772  * used when we know that the CRTC isn't feeding multiple encoders!
5773  */
5774 static struct intel_encoder *
5775 intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
5776                            const struct intel_crtc_state *crtc_state)
5777 {
5778         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5779         const struct drm_connector_state *connector_state;
5780         const struct drm_connector *connector;
5781         struct intel_encoder *encoder = NULL;
5782         int num_encoders = 0;
5783         int i;
5784
5785         for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
5786                 if (connector_state->crtc != &crtc->base)
5787                         continue;
5788
5789                 encoder = to_intel_encoder(connector_state->best_encoder);
5790                 num_encoders++;
5791         }
5792
5793         drm_WARN(encoder->base.dev, num_encoders != 1,
5794                  "%d encoders for pipe %c\n",
5795                  num_encoders, pipe_name(crtc->pipe));
5796
5797         return encoder;
5798 }
5799
5800 /*
5801  * Enable PCH resources required for PCH ports:
5802  *   - PCH PLLs
5803  *   - FDI training & RX/TX
5804  *   - update transcoder timings
5805  *   - DP transcoding bits
5806  *   - transcoder
5807  */
5808 static void ilk_pch_enable(const struct intel_atomic_state *state,
5809                            const struct intel_crtc_state *crtc_state)
5810 {
5811         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5812         struct drm_device *dev = crtc->base.dev;
5813         struct drm_i915_private *dev_priv = to_i915(dev);
5814         enum pipe pipe = crtc->pipe;
5815         u32 temp;
5816
5817         assert_pch_transcoder_disabled(dev_priv, pipe);
5818
5819         if (IS_IVYBRIDGE(dev_priv))
5820                 ivb_update_fdi_bc_bifurcation(crtc_state);
5821
5822         /* Write the TU size bits before fdi link training, so that error
5823          * detection works. */
5824         intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
5825                        intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
5826
5827         /* For PCH output, training FDI link */
5828         dev_priv->display.fdi_link_train(crtc, crtc_state);
5829
5830         /* We need to program the right clock selection before writing the pixel
5831          * mutliplier into the DPLL. */
5832         if (HAS_PCH_CPT(dev_priv)) {
5833                 u32 sel;
5834
5835                 temp = intel_de_read(dev_priv, PCH_DPLL_SEL);
5836                 temp |= TRANS_DPLL_ENABLE(pipe);
5837                 sel = TRANS_DPLLB_SEL(pipe);
5838                 if (crtc_state->shared_dpll ==
5839                     intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
5840                         temp |= sel;
5841                 else
5842                         temp &= ~sel;
5843                 intel_de_write(dev_priv, PCH_DPLL_SEL, temp);
5844         }
5845
5846         /* XXX: pch pll's can be enabled any time before we enable the PCH
5847          * transcoder, and we actually should do this to not upset any PCH
5848          * transcoder that already use the clock when we share it.
5849          *
5850          * Note that enable_shared_dpll tries to do the right thing, but
5851          * get_shared_dpll unconditionally resets the pll - we need that to have
5852          * the right LVDS enable sequence. */
5853         intel_enable_shared_dpll(crtc_state);
5854
5855         /* set transcoder timing, panel must allow it */
5856         assert_panel_unlocked(dev_priv, pipe);
5857         ilk_pch_transcoder_set_timings(crtc_state, pipe);
5858
5859         intel_fdi_normal_train(crtc);
5860
5861         /* For PCH DP, enable TRANS_DP_CTL */
5862         if (HAS_PCH_CPT(dev_priv) &&
5863             intel_crtc_has_dp_encoder(crtc_state)) {
5864                 const struct drm_display_mode *adjusted_mode =
5865                         &crtc_state->hw.adjusted_mode;
5866                 u32 bpc = (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
5867                 i915_reg_t reg = TRANS_DP_CTL(pipe);
5868                 enum port port;
5869
5870                 temp = intel_de_read(dev_priv, reg);
5871                 temp &= ~(TRANS_DP_PORT_SEL_MASK |
5872                           TRANS_DP_SYNC_MASK |
5873                           TRANS_DP_BPC_MASK);
5874                 temp |= TRANS_DP_OUTPUT_ENABLE;
5875                 temp |= bpc << 9; /* same format but at 11:9 */
5876
5877                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
5878                         temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
5879                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
5880                         temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
5881
5882                 port = intel_get_crtc_new_encoder(state, crtc_state)->port;
5883                 drm_WARN_ON(dev, port < PORT_B || port > PORT_D);
5884                 temp |= TRANS_DP_PORT_SEL(port);
5885
5886                 intel_de_write(dev_priv, reg, temp);
5887         }
5888
5889         ilk_enable_pch_transcoder(crtc_state);
5890 }
5891
5892 void lpt_pch_enable(const struct intel_crtc_state *crtc_state)
5893 {
5894         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5895         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5896         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
5897
5898         assert_pch_transcoder_disabled(dev_priv, PIPE_A);
5899
5900         lpt_program_iclkip(crtc_state);
5901
5902         /* Set transcoder timing. */
5903         ilk_pch_transcoder_set_timings(crtc_state, PIPE_A);
5904
5905         lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
5906 }
5907
5908 static void cpt_verify_modeset(struct drm_i915_private *dev_priv,
5909                                enum pipe pipe)
5910 {
5911         i915_reg_t dslreg = PIPEDSL(pipe);
5912         u32 temp;
5913
5914         temp = intel_de_read(dev_priv, dslreg);
5915         udelay(500);
5916         if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5)) {
5917                 if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5))
5918                         drm_err(&dev_priv->drm,
5919                                 "mode set failed: pipe %c stuck\n",
5920                                 pipe_name(pipe));
5921         }
5922 }
5923
5924 /*
5925  * The hardware phase 0.0 refers to the center of the pixel.
5926  * We want to start from the top/left edge which is phase
5927  * -0.5. That matches how the hardware calculates the scaling
5928  * factors (from top-left of the first pixel to bottom-right
5929  * of the last pixel, as opposed to the pixel centers).
5930  *
5931  * For 4:2:0 subsampled chroma planes we obviously have to
5932  * adjust that so that the chroma sample position lands in
5933  * the right spot.
5934  *
5935  * Note that for packed YCbCr 4:2:2 formats there is no way to
5936  * control chroma siting. The hardware simply replicates the
5937  * chroma samples for both of the luma samples, and thus we don't
5938  * actually get the expected MPEG2 chroma siting convention :(
5939  * The same behaviour is observed on pre-SKL platforms as well.
5940  *
5941  * Theory behind the formula (note that we ignore sub-pixel
5942  * source coordinates):
5943  * s = source sample position
5944  * d = destination sample position
5945  *
5946  * Downscaling 4:1:
5947  * -0.5
5948  * | 0.0
5949  * | |     1.5 (initial phase)
5950  * | |     |
5951  * v v     v
5952  * | s | s | s | s |
5953  * |       d       |
5954  *
5955  * Upscaling 1:4:
5956  * -0.5
5957  * | -0.375 (initial phase)
5958  * | |     0.0
5959  * | |     |
5960  * v v     v
5961  * |       s       |
5962  * | d | d | d | d |
5963  */
5964 u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited)
5965 {
5966         int phase = -0x8000;
5967         u16 trip = 0;
5968
5969         if (chroma_cosited)
5970                 phase += (sub - 1) * 0x8000 / sub;
5971
5972         phase += scale / (2 * sub);
5973
5974         /*
5975          * Hardware initial phase limited to [-0.5:1.5].
5976          * Since the max hardware scale factor is 3.0, we
5977          * should never actually excdeed 1.0 here.
5978          */
5979         WARN_ON(phase < -0x8000 || phase > 0x18000);
5980
5981         if (phase < 0)
5982                 phase = 0x10000 + phase;
5983         else
5984                 trip = PS_PHASE_TRIP;
5985
5986         return ((phase >> 2) & PS_PHASE_MASK) | trip;
5987 }
5988
5989 #define SKL_MIN_SRC_W 8
5990 #define SKL_MAX_SRC_W 4096
5991 #define SKL_MIN_SRC_H 8
5992 #define SKL_MAX_SRC_H 4096
5993 #define SKL_MIN_DST_W 8
5994 #define SKL_MAX_DST_W 4096
5995 #define SKL_MIN_DST_H 8
5996 #define SKL_MAX_DST_H 4096
5997 #define ICL_MAX_SRC_W 5120
5998 #define ICL_MAX_SRC_H 4096
5999 #define ICL_MAX_DST_W 5120
6000 #define ICL_MAX_DST_H 4096
6001 #define SKL_MIN_YUV_420_SRC_W 16
6002 #define SKL_MIN_YUV_420_SRC_H 16
6003
6004 static int
6005 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
6006                   unsigned int scaler_user, int *scaler_id,
6007                   int src_w, int src_h, int dst_w, int dst_h,
6008                   const struct drm_format_info *format,
6009                   u64 modifier, bool need_scaler)
6010 {
6011         struct intel_crtc_scaler_state *scaler_state =
6012                 &crtc_state->scaler_state;
6013         struct intel_crtc *intel_crtc =
6014                 to_intel_crtc(crtc_state->uapi.crtc);
6015         struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
6016         const struct drm_display_mode *adjusted_mode =
6017                 &crtc_state->hw.adjusted_mode;
6018
6019         /*
6020          * Src coordinates are already rotated by 270 degrees for
6021          * the 90/270 degree plane rotation cases (to match the
6022          * GTT mapping), hence no need to account for rotation here.
6023          */
6024         if (src_w != dst_w || src_h != dst_h)
6025                 need_scaler = true;
6026
6027         /*
6028          * Scaling/fitting not supported in IF-ID mode in GEN9+
6029          * TODO: Interlace fetch mode doesn't support YUV420 planar formats.
6030          * Once NV12 is enabled, handle it here while allocating scaler
6031          * for NV12.
6032          */
6033         if (INTEL_GEN(dev_priv) >= 9 && crtc_state->hw.enable &&
6034             need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
6035                 drm_dbg_kms(&dev_priv->drm,
6036                             "Pipe/Plane scaling not supported with IF-ID mode\n");
6037                 return -EINVAL;
6038         }
6039
6040         /*
6041          * if plane is being disabled or scaler is no more required or force detach
6042          *  - free scaler binded to this plane/crtc
6043          *  - in order to do this, update crtc->scaler_usage
6044          *
6045          * Here scaler state in crtc_state is set free so that
6046          * scaler can be assigned to other user. Actual register
6047          * update to free the scaler is done in plane/panel-fit programming.
6048          * For this purpose crtc/plane_state->scaler_id isn't reset here.
6049          */
6050         if (force_detach || !need_scaler) {
6051                 if (*scaler_id >= 0) {
6052                         scaler_state->scaler_users &= ~(1 << scaler_user);
6053                         scaler_state->scalers[*scaler_id].in_use = 0;
6054
6055                         drm_dbg_kms(&dev_priv->drm,
6056                                     "scaler_user index %u.%u: "
6057                                     "Staged freeing scaler id %d scaler_users = 0x%x\n",
6058                                     intel_crtc->pipe, scaler_user, *scaler_id,
6059                                     scaler_state->scaler_users);
6060                         *scaler_id = -1;
6061                 }
6062                 return 0;
6063         }
6064
6065         if (format && intel_format_info_is_yuv_semiplanar(format, modifier) &&
6066             (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
6067                 drm_dbg_kms(&dev_priv->drm,
6068                             "Planar YUV: src dimensions not met\n");
6069                 return -EINVAL;
6070         }
6071
6072         /* range checks */
6073         if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
6074             dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
6075             (INTEL_GEN(dev_priv) >= 11 &&
6076              (src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H ||
6077               dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) ||
6078             (INTEL_GEN(dev_priv) < 11 &&
6079              (src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
6080               dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H))) {
6081                 drm_dbg_kms(&dev_priv->drm,
6082                             "scaler_user index %u.%u: src %ux%u dst %ux%u "
6083                             "size is out of scaler range\n",
6084                             intel_crtc->pipe, scaler_user, src_w, src_h,
6085                             dst_w, dst_h);
6086                 return -EINVAL;
6087         }
6088
6089         /* mark this plane as a scaler user in crtc_state */
6090         scaler_state->scaler_users |= (1 << scaler_user);
6091         drm_dbg_kms(&dev_priv->drm, "scaler_user index %u.%u: "
6092                     "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
6093                     intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
6094                     scaler_state->scaler_users);
6095
6096         return 0;
6097 }
6098
6099 static int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state)
6100 {
6101         const struct drm_display_mode *adjusted_mode =
6102                 &crtc_state->hw.adjusted_mode;
6103         int width, height;
6104
6105         if (crtc_state->pch_pfit.enabled) {
6106                 width = drm_rect_width(&crtc_state->pch_pfit.dst);
6107                 height = drm_rect_height(&crtc_state->pch_pfit.dst);
6108         } else {
6109                 width = adjusted_mode->crtc_hdisplay;
6110                 height = adjusted_mode->crtc_vdisplay;
6111         }
6112
6113         return skl_update_scaler(crtc_state, !crtc_state->hw.active,
6114                                  SKL_CRTC_INDEX,
6115                                  &crtc_state->scaler_state.scaler_id,
6116                                  crtc_state->pipe_src_w, crtc_state->pipe_src_h,
6117                                  width, height, NULL, 0,
6118                                  crtc_state->pch_pfit.enabled);
6119 }
6120
6121 /**
6122  * skl_update_scaler_plane - Stages update to scaler state for a given plane.
6123  * @crtc_state: crtc's scaler state
6124  * @plane_state: atomic plane state to update
6125  *
6126  * Return
6127  *     0 - scaler_usage updated successfully
6128  *    error - requested scaling cannot be supported or other error condition
6129  */
6130 static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
6131                                    struct intel_plane_state *plane_state)
6132 {
6133         struct intel_plane *intel_plane =
6134                 to_intel_plane(plane_state->uapi.plane);
6135         struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
6136         struct drm_framebuffer *fb = plane_state->hw.fb;
6137         int ret;
6138         bool force_detach = !fb || !plane_state->uapi.visible;
6139         bool need_scaler = false;
6140
6141         /* Pre-gen11 and SDR planes always need a scaler for planar formats. */
6142         if (!icl_is_hdr_plane(dev_priv, intel_plane->id) &&
6143             fb && intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier))
6144                 need_scaler = true;
6145
6146         ret = skl_update_scaler(crtc_state, force_detach,
6147                                 drm_plane_index(&intel_plane->base),
6148                                 &plane_state->scaler_id,
6149                                 drm_rect_width(&plane_state->uapi.src) >> 16,
6150                                 drm_rect_height(&plane_state->uapi.src) >> 16,
6151                                 drm_rect_width(&plane_state->uapi.dst),
6152                                 drm_rect_height(&plane_state->uapi.dst),
6153                                 fb ? fb->format : NULL,
6154                                 fb ? fb->modifier : 0,
6155                                 need_scaler);
6156
6157         if (ret || plane_state->scaler_id < 0)
6158                 return ret;
6159
6160         /* check colorkey */
6161         if (plane_state->ckey.flags) {
6162                 drm_dbg_kms(&dev_priv->drm,
6163                             "[PLANE:%d:%s] scaling with color key not allowed",
6164                             intel_plane->base.base.id,
6165                             intel_plane->base.name);
6166                 return -EINVAL;
6167         }
6168
6169         /* Check src format */
6170         switch (fb->format->format) {
6171         case DRM_FORMAT_RGB565:
6172         case DRM_FORMAT_XBGR8888:
6173         case DRM_FORMAT_XRGB8888:
6174         case DRM_FORMAT_ABGR8888:
6175         case DRM_FORMAT_ARGB8888:
6176         case DRM_FORMAT_XRGB2101010:
6177         case DRM_FORMAT_XBGR2101010:
6178         case DRM_FORMAT_ARGB2101010:
6179         case DRM_FORMAT_ABGR2101010:
6180         case DRM_FORMAT_YUYV:
6181         case DRM_FORMAT_YVYU:
6182         case DRM_FORMAT_UYVY:
6183         case DRM_FORMAT_VYUY:
6184         case DRM_FORMAT_NV12:
6185         case DRM_FORMAT_XYUV8888:
6186         case DRM_FORMAT_P010:
6187         case DRM_FORMAT_P012:
6188         case DRM_FORMAT_P016:
6189         case DRM_FORMAT_Y210:
6190         case DRM_FORMAT_Y212:
6191         case DRM_FORMAT_Y216:
6192         case DRM_FORMAT_XVYU2101010:
6193         case DRM_FORMAT_XVYU12_16161616:
6194         case DRM_FORMAT_XVYU16161616:
6195                 break;
6196         case DRM_FORMAT_XBGR16161616F:
6197         case DRM_FORMAT_ABGR16161616F:
6198         case DRM_FORMAT_XRGB16161616F:
6199         case DRM_FORMAT_ARGB16161616F:
6200                 if (INTEL_GEN(dev_priv) >= 11)
6201                         break;
6202                 /* fall through */
6203         default:
6204                 drm_dbg_kms(&dev_priv->drm,
6205                             "[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
6206                             intel_plane->base.base.id, intel_plane->base.name,
6207                             fb->base.id, fb->format->format);
6208                 return -EINVAL;
6209         }
6210
6211         return 0;
6212 }
6213
6214 void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state)
6215 {
6216         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
6217         int i;
6218
6219         for (i = 0; i < crtc->num_scalers; i++)
6220                 skl_detach_scaler(crtc, i);
6221 }
6222
6223 static void skl_pfit_enable(const struct intel_crtc_state *crtc_state)
6224 {
6225         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6226         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6227         const struct intel_crtc_scaler_state *scaler_state =
6228                 &crtc_state->scaler_state;
6229         struct drm_rect src = {
6230                 .x2 = crtc_state->pipe_src_w << 16,
6231                 .y2 = crtc_state->pipe_src_h << 16,
6232         };
6233         const struct drm_rect *dst = &crtc_state->pch_pfit.dst;
6234         u16 uv_rgb_hphase, uv_rgb_vphase;
6235         enum pipe pipe = crtc->pipe;
6236         int width = drm_rect_width(dst);
6237         int height = drm_rect_height(dst);
6238         int x = dst->x1;
6239         int y = dst->y1;
6240         int hscale, vscale;
6241         unsigned long irqflags;
6242         int id;
6243
6244         if (!crtc_state->pch_pfit.enabled)
6245                 return;
6246
6247         if (drm_WARN_ON(&dev_priv->drm,
6248                         crtc_state->scaler_state.scaler_id < 0))
6249                 return;
6250
6251         hscale = drm_rect_calc_hscale(&src, dst, 0, INT_MAX);
6252         vscale = drm_rect_calc_vscale(&src, dst, 0, INT_MAX);
6253
6254         uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
6255         uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
6256
6257         id = scaler_state->scaler_id;
6258
6259         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
6260
6261         intel_de_write_fw(dev_priv, SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
6262                           PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
6263         intel_de_write_fw(dev_priv, SKL_PS_VPHASE(pipe, id),
6264                           PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
6265         intel_de_write_fw(dev_priv, SKL_PS_HPHASE(pipe, id),
6266                           PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
6267         intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(pipe, id),
6268                           x << 16 | y);
6269         intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(pipe, id),
6270                           width << 16 | height);
6271
6272         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
6273 }
6274
6275 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state)
6276 {
6277         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6278         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6279         const struct drm_rect *dst = &crtc_state->pch_pfit.dst;
6280         enum pipe pipe = crtc->pipe;
6281         int width = drm_rect_width(dst);
6282         int height = drm_rect_height(dst);
6283         int x = dst->x1;
6284         int y = dst->y1;
6285
6286         if (!crtc_state->pch_pfit.enabled)
6287                 return;
6288
6289         /* Force use of hard-coded filter coefficients
6290          * as some pre-programmed values are broken,
6291          * e.g. x201.
6292          */
6293         if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
6294                 intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE |
6295                                PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe));
6296         else
6297                 intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE |
6298                                PF_FILTER_MED_3x3);
6299         intel_de_write(dev_priv, PF_WIN_POS(pipe), x << 16 | y);
6300         intel_de_write(dev_priv, PF_WIN_SZ(pipe), width << 16 | height);
6301 }
6302
6303 void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
6304 {
6305         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6306         struct drm_device *dev = crtc->base.dev;
6307         struct drm_i915_private *dev_priv = to_i915(dev);
6308
6309         if (!crtc_state->ips_enabled)
6310                 return;
6311
6312         /*
6313          * We can only enable IPS after we enable a plane and wait for a vblank
6314          * This function is called from post_plane_update, which is run after
6315          * a vblank wait.
6316          */
6317         drm_WARN_ON(dev, !(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
6318
6319         if (IS_BROADWELL(dev_priv)) {
6320                 drm_WARN_ON(dev, sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
6321                                                          IPS_ENABLE | IPS_PCODE_CONTROL));
6322                 /* Quoting Art Runyan: "its not safe to expect any particular
6323                  * value in IPS_CTL bit 31 after enabling IPS through the
6324                  * mailbox." Moreover, the mailbox may return a bogus state,
6325                  * so we need to just enable it and continue on.
6326                  */
6327         } else {
6328                 intel_de_write(dev_priv, IPS_CTL, IPS_ENABLE);
6329                 /* The bit only becomes 1 in the next vblank, so this wait here
6330                  * is essentially intel_wait_for_vblank. If we don't have this
6331                  * and don't wait for vblanks until the end of crtc_enable, then
6332                  * the HW state readout code will complain that the expected
6333                  * IPS_CTL value is not the one we read. */
6334                 if (intel_de_wait_for_set(dev_priv, IPS_CTL, IPS_ENABLE, 50))
6335                         drm_err(&dev_priv->drm,
6336                                 "Timed out waiting for IPS enable\n");
6337         }
6338 }
6339
6340 void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
6341 {
6342         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6343         struct drm_device *dev = crtc->base.dev;
6344         struct drm_i915_private *dev_priv = to_i915(dev);
6345
6346         if (!crtc_state->ips_enabled)
6347                 return;
6348
6349         if (IS_BROADWELL(dev_priv)) {
6350                 drm_WARN_ON(dev,
6351                             sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
6352                 /*
6353                  * Wait for PCODE to finish disabling IPS. The BSpec specified
6354                  * 42ms timeout value leads to occasional timeouts so use 100ms
6355                  * instead.
6356                  */
6357                 if (intel_de_wait_for_clear(dev_priv, IPS_CTL, IPS_ENABLE, 100))
6358                         drm_err(&dev_priv->drm,
6359                                 "Timed out waiting for IPS disable\n");
6360         } else {
6361                 intel_de_write(dev_priv, IPS_CTL, 0);
6362                 intel_de_posting_read(dev_priv, IPS_CTL);
6363         }
6364
6365         /* We need to wait for a vblank before we can disable the plane. */
6366         intel_wait_for_vblank(dev_priv, crtc->pipe);
6367 }
6368
6369 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
6370 {
6371         if (intel_crtc->overlay)
6372                 (void) intel_overlay_switch_off(intel_crtc->overlay);
6373
6374         /* Let userspace switch the overlay on again. In most cases userspace
6375          * has to recompute where to put it anyway.
6376          */
6377 }
6378
6379 static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
6380                                        const struct intel_crtc_state *new_crtc_state)
6381 {
6382         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
6383         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6384
6385         if (!old_crtc_state->ips_enabled)
6386                 return false;
6387
6388         if (needs_modeset(new_crtc_state))
6389                 return true;
6390
6391         /*
6392          * Workaround : Do not read or write the pipe palette/gamma data while
6393          * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
6394          *
6395          * Disable IPS before we program the LUT.
6396          */
6397         if (IS_HASWELL(dev_priv) &&
6398             (new_crtc_state->uapi.color_mgmt_changed ||
6399              new_crtc_state->update_pipe) &&
6400             new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
6401                 return true;
6402
6403         return !new_crtc_state->ips_enabled;
6404 }
6405
6406 static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
6407                                        const struct intel_crtc_state *new_crtc_state)
6408 {
6409         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
6410         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6411
6412         if (!new_crtc_state->ips_enabled)
6413                 return false;
6414
6415         if (needs_modeset(new_crtc_state))
6416                 return true;
6417
6418         /*
6419          * Workaround : Do not read or write the pipe palette/gamma data while
6420          * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
6421          *
6422          * Re-enable IPS after the LUT has been programmed.
6423          */
6424         if (IS_HASWELL(dev_priv) &&
6425             (new_crtc_state->uapi.color_mgmt_changed ||
6426              new_crtc_state->update_pipe) &&
6427             new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
6428                 return true;
6429
6430         /*
6431          * We can't read out IPS on broadwell, assume the worst and
6432          * forcibly enable IPS on the first fastset.
6433          */
6434         if (new_crtc_state->update_pipe && old_crtc_state->inherited)
6435                 return true;
6436
6437         return !old_crtc_state->ips_enabled;
6438 }
6439
6440 static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state)
6441 {
6442         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
6443
6444         if (!crtc_state->nv12_planes)
6445                 return false;
6446
6447         /* WA Display #0827: Gen9:all */
6448         if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv))
6449                 return true;
6450
6451         return false;
6452 }
6453
6454 static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state)
6455 {
6456         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
6457
6458         /* Wa_2006604312:icl,ehl */
6459         if (crtc_state->scaler_state.scaler_users > 0 && IS_GEN(dev_priv, 11))
6460                 return true;
6461
6462         return false;
6463 }
6464
6465 static bool planes_enabling(const struct intel_crtc_state *old_crtc_state,
6466                             const struct intel_crtc_state *new_crtc_state)
6467 {
6468         return (!old_crtc_state->active_planes || needs_modeset(new_crtc_state)) &&
6469                 new_crtc_state->active_planes;
6470 }
6471
6472 static bool planes_disabling(const struct intel_crtc_state *old_crtc_state,
6473                              const struct intel_crtc_state *new_crtc_state)
6474 {
6475         return old_crtc_state->active_planes &&
6476                 (!new_crtc_state->active_planes || needs_modeset(new_crtc_state));
6477 }
6478
6479 static void intel_post_plane_update(struct intel_atomic_state *state,
6480                                     struct intel_crtc *crtc)
6481 {
6482         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6483         const struct intel_crtc_state *old_crtc_state =
6484                 intel_atomic_get_old_crtc_state(state, crtc);
6485         const struct intel_crtc_state *new_crtc_state =
6486                 intel_atomic_get_new_crtc_state(state, crtc);
6487         enum pipe pipe = crtc->pipe;
6488
6489         intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits);
6490
6491         if (new_crtc_state->update_wm_post && new_crtc_state->hw.active)
6492                 intel_update_watermarks(crtc);
6493
6494         if (hsw_post_update_enable_ips(old_crtc_state, new_crtc_state))
6495                 hsw_enable_ips(new_crtc_state);
6496
6497         intel_fbc_post_update(state, crtc);
6498
6499         if (needs_nv12_wa(old_crtc_state) &&
6500             !needs_nv12_wa(new_crtc_state))
6501                 skl_wa_827(dev_priv, pipe, false);
6502
6503         if (needs_scalerclk_wa(old_crtc_state) &&
6504             !needs_scalerclk_wa(new_crtc_state))
6505                 icl_wa_scalerclkgating(dev_priv, pipe, false);
6506 }
6507
6508 static void intel_pre_plane_update(struct intel_atomic_state *state,
6509                                    struct intel_crtc *crtc)
6510 {
6511         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6512         const struct intel_crtc_state *old_crtc_state =
6513                 intel_atomic_get_old_crtc_state(state, crtc);
6514         const struct intel_crtc_state *new_crtc_state =
6515                 intel_atomic_get_new_crtc_state(state, crtc);
6516         enum pipe pipe = crtc->pipe;
6517
6518         if (hsw_pre_update_disable_ips(old_crtc_state, new_crtc_state))
6519                 hsw_disable_ips(old_crtc_state);
6520
6521         if (intel_fbc_pre_update(state, crtc))
6522                 intel_wait_for_vblank(dev_priv, pipe);
6523
6524         /* Display WA 827 */
6525         if (!needs_nv12_wa(old_crtc_state) &&
6526             needs_nv12_wa(new_crtc_state))
6527                 skl_wa_827(dev_priv, pipe, true);
6528
6529         /* Wa_2006604312:icl,ehl */
6530         if (!needs_scalerclk_wa(old_crtc_state) &&
6531             needs_scalerclk_wa(new_crtc_state))
6532                 icl_wa_scalerclkgating(dev_priv, pipe, true);
6533
6534         /*
6535          * Vblank time updates from the shadow to live plane control register
6536          * are blocked if the memory self-refresh mode is active at that
6537          * moment. So to make sure the plane gets truly disabled, disable
6538          * first the self-refresh mode. The self-refresh enable bit in turn
6539          * will be checked/applied by the HW only at the next frame start
6540          * event which is after the vblank start event, so we need to have a
6541          * wait-for-vblank between disabling the plane and the pipe.
6542          */
6543         if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active &&
6544             new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
6545                 intel_wait_for_vblank(dev_priv, pipe);
6546
6547         /*
6548          * IVB workaround: must disable low power watermarks for at least
6549          * one frame before enabling scaling.  LP watermarks can be re-enabled
6550          * when scaling is disabled.
6551          *
6552          * WaCxSRDisabledForSpriteScaling:ivb
6553          */
6554         if (old_crtc_state->hw.active &&
6555             new_crtc_state->disable_lp_wm && ilk_disable_lp_wm(dev_priv))
6556                 intel_wait_for_vblank(dev_priv, pipe);
6557
6558         /*
6559          * If we're doing a modeset we don't need to do any
6560          * pre-vblank watermark programming here.
6561          */
6562         if (!needs_modeset(new_crtc_state)) {
6563                 /*
6564                  * For platforms that support atomic watermarks, program the
6565                  * 'intermediate' watermarks immediately.  On pre-gen9 platforms, these
6566                  * will be the intermediate values that are safe for both pre- and
6567                  * post- vblank; when vblank happens, the 'active' values will be set
6568                  * to the final 'target' values and we'll do this again to get the
6569                  * optimal watermarks.  For gen9+ platforms, the values we program here
6570                  * will be the final target values which will get automatically latched
6571                  * at vblank time; no further programming will be necessary.
6572                  *
6573                  * If a platform hasn't been transitioned to atomic watermarks yet,
6574                  * we'll continue to update watermarks the old way, if flags tell
6575                  * us to.
6576                  */
6577                 if (dev_priv->display.initial_watermarks)
6578                         dev_priv->display.initial_watermarks(state, crtc);
6579                 else if (new_crtc_state->update_wm_pre)
6580                         intel_update_watermarks(crtc);
6581         }
6582
6583         /*
6584          * Gen2 reports pipe underruns whenever all planes are disabled.
6585          * So disable underrun reporting before all the planes get disabled.
6586          *
6587          * We do this after .initial_watermarks() so that we have a
6588          * chance of catching underruns with the intermediate watermarks
6589          * vs. the old plane configuration.
6590          */
6591         if (IS_GEN(dev_priv, 2) && planes_disabling(old_crtc_state, new_crtc_state))
6592                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6593 }
6594
6595 static void intel_crtc_disable_planes(struct intel_atomic_state *state,
6596                                       struct intel_crtc *crtc)
6597 {
6598         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6599         const struct intel_crtc_state *new_crtc_state =
6600                 intel_atomic_get_new_crtc_state(state, crtc);
6601         unsigned int update_mask = new_crtc_state->update_planes;
6602         const struct intel_plane_state *old_plane_state;
6603         struct intel_plane *plane;
6604         unsigned fb_bits = 0;
6605         int i;
6606
6607         intel_crtc_dpms_overlay_disable(crtc);
6608
6609         for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
6610                 if (crtc->pipe != plane->pipe ||
6611                     !(update_mask & BIT(plane->id)))
6612                         continue;
6613
6614                 intel_disable_plane(plane, new_crtc_state);
6615
6616                 if (old_plane_state->uapi.visible)
6617                         fb_bits |= plane->frontbuffer_bit;
6618         }
6619
6620         intel_frontbuffer_flip(dev_priv, fb_bits);
6621 }
6622
6623 /*
6624  * intel_connector_primary_encoder - get the primary encoder for a connector
6625  * @connector: connector for which to return the encoder
6626  *
6627  * Returns the primary encoder for a connector. There is a 1:1 mapping from
6628  * all connectors to their encoder, except for DP-MST connectors which have
6629  * both a virtual and a primary encoder. These DP-MST primary encoders can be
6630  * pointed to by as many DP-MST connectors as there are pipes.
6631  */
6632 static struct intel_encoder *
6633 intel_connector_primary_encoder(struct intel_connector *connector)
6634 {
6635         struct intel_encoder *encoder;
6636
6637         if (connector->mst_port)
6638                 return &dp_to_dig_port(connector->mst_port)->base;
6639
6640         encoder = intel_attached_encoder(connector);
6641         drm_WARN_ON(connector->base.dev, !encoder);
6642
6643         return encoder;
6644 }
6645
6646 static void intel_encoders_update_prepare(struct intel_atomic_state *state)
6647 {
6648         struct drm_connector_state *new_conn_state;
6649         struct drm_connector *connector;
6650         int i;
6651
6652         for_each_new_connector_in_state(&state->base, connector, new_conn_state,
6653                                         i) {
6654                 struct intel_connector *intel_connector;
6655                 struct intel_encoder *encoder;
6656                 struct intel_crtc *crtc;
6657
6658                 if (!intel_connector_needs_modeset(state, connector))
6659                         continue;
6660
6661                 intel_connector = to_intel_connector(connector);
6662                 encoder = intel_connector_primary_encoder(intel_connector);
6663                 if (!encoder->update_prepare)
6664                         continue;
6665
6666                 crtc = new_conn_state->crtc ?
6667                         to_intel_crtc(new_conn_state->crtc) : NULL;
6668                 encoder->update_prepare(state, encoder, crtc);
6669         }
6670 }
6671
6672 static void intel_encoders_update_complete(struct intel_atomic_state *state)
6673 {
6674         struct drm_connector_state *new_conn_state;
6675         struct drm_connector *connector;
6676         int i;
6677
6678         for_each_new_connector_in_state(&state->base, connector, new_conn_state,
6679                                         i) {
6680                 struct intel_connector *intel_connector;
6681                 struct intel_encoder *encoder;
6682                 struct intel_crtc *crtc;
6683
6684                 if (!intel_connector_needs_modeset(state, connector))
6685                         continue;
6686
6687                 intel_connector = to_intel_connector(connector);
6688                 encoder = intel_connector_primary_encoder(intel_connector);
6689                 if (!encoder->update_complete)
6690                         continue;
6691
6692                 crtc = new_conn_state->crtc ?
6693                         to_intel_crtc(new_conn_state->crtc) : NULL;
6694                 encoder->update_complete(state, encoder, crtc);
6695         }
6696 }
6697
6698 static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state,
6699                                           struct intel_crtc *crtc)
6700 {
6701         const struct intel_crtc_state *crtc_state =
6702                 intel_atomic_get_new_crtc_state(state, crtc);
6703         const struct drm_connector_state *conn_state;
6704         struct drm_connector *conn;
6705         int i;
6706
6707         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6708                 struct intel_encoder *encoder =
6709                         to_intel_encoder(conn_state->best_encoder);
6710
6711                 if (conn_state->crtc != &crtc->base)
6712                         continue;
6713
6714                 if (encoder->pre_pll_enable)
6715                         encoder->pre_pll_enable(state, encoder,
6716                                                 crtc_state, conn_state);
6717         }
6718 }
6719
6720 static void intel_encoders_pre_enable(struct intel_atomic_state *state,
6721                                       struct intel_crtc *crtc)
6722 {
6723         const struct intel_crtc_state *crtc_state =
6724                 intel_atomic_get_new_crtc_state(state, crtc);
6725         const struct drm_connector_state *conn_state;
6726         struct drm_connector *conn;
6727         int i;
6728
6729         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6730                 struct intel_encoder *encoder =
6731                         to_intel_encoder(conn_state->best_encoder);
6732
6733                 if (conn_state->crtc != &crtc->base)
6734                         continue;
6735
6736                 if (encoder->pre_enable)
6737                         encoder->pre_enable(state, encoder,
6738                                             crtc_state, conn_state);
6739         }
6740 }
6741
6742 static void intel_encoders_enable(struct intel_atomic_state *state,
6743                                   struct intel_crtc *crtc)
6744 {
6745         const struct intel_crtc_state *crtc_state =
6746                 intel_atomic_get_new_crtc_state(state, crtc);
6747         const struct drm_connector_state *conn_state;
6748         struct drm_connector *conn;
6749         int i;
6750
6751         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6752                 struct intel_encoder *encoder =
6753                         to_intel_encoder(conn_state->best_encoder);
6754
6755                 if (conn_state->crtc != &crtc->base)
6756                         continue;
6757
6758                 if (encoder->enable)
6759                         encoder->enable(state, encoder,
6760                                         crtc_state, conn_state);
6761                 intel_opregion_notify_encoder(encoder, true);
6762         }
6763 }
6764
6765 static void intel_encoders_disable(struct intel_atomic_state *state,
6766                                    struct intel_crtc *crtc)
6767 {
6768         const struct intel_crtc_state *old_crtc_state =
6769                 intel_atomic_get_old_crtc_state(state, crtc);
6770         const struct drm_connector_state *old_conn_state;
6771         struct drm_connector *conn;
6772         int i;
6773
6774         for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
6775                 struct intel_encoder *encoder =
6776                         to_intel_encoder(old_conn_state->best_encoder);
6777
6778                 if (old_conn_state->crtc != &crtc->base)
6779                         continue;
6780
6781                 intel_opregion_notify_encoder(encoder, false);
6782                 if (encoder->disable)
6783                         encoder->disable(state, encoder,
6784                                          old_crtc_state, old_conn_state);
6785         }
6786 }
6787
6788 static void intel_encoders_post_disable(struct intel_atomic_state *state,
6789                                         struct intel_crtc *crtc)
6790 {
6791         const struct intel_crtc_state *old_crtc_state =
6792                 intel_atomic_get_old_crtc_state(state, crtc);
6793         const struct drm_connector_state *old_conn_state;
6794         struct drm_connector *conn;
6795         int i;
6796
6797         for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
6798                 struct intel_encoder *encoder =
6799                         to_intel_encoder(old_conn_state->best_encoder);
6800
6801                 if (old_conn_state->crtc != &crtc->base)
6802                         continue;
6803
6804                 if (encoder->post_disable)
6805                         encoder->post_disable(state, encoder,
6806                                               old_crtc_state, old_conn_state);
6807         }
6808 }
6809
6810 static void intel_encoders_post_pll_disable(struct intel_atomic_state *state,
6811                                             struct intel_crtc *crtc)
6812 {
6813         const struct intel_crtc_state *old_crtc_state =
6814                 intel_atomic_get_old_crtc_state(state, crtc);
6815         const struct drm_connector_state *old_conn_state;
6816         struct drm_connector *conn;
6817         int i;
6818
6819         for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
6820                 struct intel_encoder *encoder =
6821                         to_intel_encoder(old_conn_state->best_encoder);
6822
6823                 if (old_conn_state->crtc != &crtc->base)
6824                         continue;
6825
6826                 if (encoder->post_pll_disable)
6827                         encoder->post_pll_disable(state, encoder,
6828                                                   old_crtc_state, old_conn_state);
6829         }
6830 }
6831
6832 static void intel_encoders_update_pipe(struct intel_atomic_state *state,
6833                                        struct intel_crtc *crtc)
6834 {
6835         const struct intel_crtc_state *crtc_state =
6836                 intel_atomic_get_new_crtc_state(state, crtc);
6837         const struct drm_connector_state *conn_state;
6838         struct drm_connector *conn;
6839         int i;
6840
6841         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6842                 struct intel_encoder *encoder =
6843                         to_intel_encoder(conn_state->best_encoder);
6844
6845                 if (conn_state->crtc != &crtc->base)
6846                         continue;
6847
6848                 if (encoder->update_pipe)
6849                         encoder->update_pipe(state, encoder,
6850                                              crtc_state, conn_state);
6851         }
6852 }
6853
6854 static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state)
6855 {
6856         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6857         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
6858
6859         plane->disable_plane(plane, crtc_state);
6860 }
6861
6862 static void ilk_crtc_enable(struct intel_atomic_state *state,
6863                             struct intel_crtc *crtc)
6864 {
6865         const struct intel_crtc_state *new_crtc_state =
6866                 intel_atomic_get_new_crtc_state(state, crtc);
6867         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6868         enum pipe pipe = crtc->pipe;
6869
6870         if (drm_WARN_ON(&dev_priv->drm, crtc->active))
6871                 return;
6872
6873         /*
6874          * Sometimes spurious CPU pipe underruns happen during FDI
6875          * training, at least with VGA+HDMI cloning. Suppress them.
6876          *
6877          * On ILK we get an occasional spurious CPU pipe underruns
6878          * between eDP port A enable and vdd enable. Also PCH port
6879          * enable seems to result in the occasional CPU pipe underrun.
6880          *
6881          * Spurious PCH underruns also occur during PCH enabling.
6882          */
6883         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6884         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
6885
6886         if (new_crtc_state->has_pch_encoder)
6887                 intel_prepare_shared_dpll(new_crtc_state);
6888
6889         if (intel_crtc_has_dp_encoder(new_crtc_state))
6890                 intel_dp_set_m_n(new_crtc_state, M1_N1);
6891
6892         intel_set_pipe_timings(new_crtc_state);
6893         intel_set_pipe_src_size(new_crtc_state);
6894
6895         if (new_crtc_state->has_pch_encoder)
6896                 intel_cpu_transcoder_set_m_n(new_crtc_state,
6897                                              &new_crtc_state->fdi_m_n, NULL);
6898
6899         ilk_set_pipeconf(new_crtc_state);
6900
6901         crtc->active = true;
6902
6903         intel_encoders_pre_enable(state, crtc);
6904
6905         if (new_crtc_state->has_pch_encoder) {
6906                 /* Note: FDI PLL enabling _must_ be done before we enable the
6907                  * cpu pipes, hence this is separate from all the other fdi/pch
6908                  * enabling. */
6909                 ilk_fdi_pll_enable(new_crtc_state);
6910         } else {
6911                 assert_fdi_tx_disabled(dev_priv, pipe);
6912                 assert_fdi_rx_disabled(dev_priv, pipe);
6913         }
6914
6915         ilk_pfit_enable(new_crtc_state);
6916
6917         /*
6918          * On ILK+ LUT must be loaded before the pipe is running but with
6919          * clocks enabled
6920          */
6921         intel_color_load_luts(new_crtc_state);
6922         intel_color_commit(new_crtc_state);
6923         /* update DSPCNTR to configure gamma for pipe bottom color */
6924         intel_disable_primary_plane(new_crtc_state);
6925
6926         if (dev_priv->display.initial_watermarks)
6927                 dev_priv->display.initial_watermarks(state, crtc);
6928         intel_enable_pipe(new_crtc_state);
6929
6930         if (new_crtc_state->has_pch_encoder)
6931                 ilk_pch_enable(state, new_crtc_state);
6932
6933         intel_crtc_vblank_on(new_crtc_state);
6934
6935         intel_encoders_enable(state, crtc);
6936
6937         if (HAS_PCH_CPT(dev_priv))
6938                 cpt_verify_modeset(dev_priv, pipe);
6939
6940         /*
6941          * Must wait for vblank to avoid spurious PCH FIFO underruns.
6942          * And a second vblank wait is needed at least on ILK with
6943          * some interlaced HDMI modes. Let's do the double wait always
6944          * in case there are more corner cases we don't know about.
6945          */
6946         if (new_crtc_state->has_pch_encoder) {
6947                 intel_wait_for_vblank(dev_priv, pipe);
6948                 intel_wait_for_vblank(dev_priv, pipe);
6949         }
6950         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6951         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
6952 }
6953
6954 /* IPS only exists on ULT machines and is tied to pipe A. */
6955 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
6956 {
6957         return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
6958 }
6959
6960 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
6961                                             enum pipe pipe, bool apply)
6962 {
6963         u32 val = intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe));
6964         u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
6965
6966         if (apply)
6967                 val |= mask;
6968         else
6969                 val &= ~mask;
6970
6971         intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), val);
6972 }
6973
6974 static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
6975 {
6976         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6977         enum pipe pipe = crtc->pipe;
6978         u32 val;
6979
6980         val = MBUS_DBOX_A_CREDIT(2);
6981
6982         if (INTEL_GEN(dev_priv) >= 12) {
6983                 val |= MBUS_DBOX_BW_CREDIT(2);
6984                 val |= MBUS_DBOX_B_CREDIT(12);
6985         } else {
6986                 val |= MBUS_DBOX_BW_CREDIT(1);
6987                 val |= MBUS_DBOX_B_CREDIT(8);
6988         }
6989
6990         intel_de_write(dev_priv, PIPE_MBUS_DBOX_CTL(pipe), val);
6991 }
6992
6993 static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state)
6994 {
6995         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6996         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6997
6998         intel_de_write(dev_priv, WM_LINETIME(crtc->pipe),
6999                        HSW_LINETIME(crtc_state->linetime) |
7000                        HSW_IPS_LINETIME(crtc_state->ips_linetime));
7001 }
7002
7003 static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
7004 {
7005         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7006         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7007         i915_reg_t reg = CHICKEN_TRANS(crtc_state->cpu_transcoder);
7008         u32 val;
7009
7010         val = intel_de_read(dev_priv, reg);
7011         val &= ~HSW_FRAME_START_DELAY_MASK;
7012         val |= HSW_FRAME_START_DELAY(0);
7013         intel_de_write(dev_priv, reg, val);
7014 }
7015
7016 static void hsw_crtc_enable(struct intel_atomic_state *state,
7017                             struct intel_crtc *crtc)
7018 {
7019         const struct intel_crtc_state *new_crtc_state =
7020                 intel_atomic_get_new_crtc_state(state, crtc);
7021         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7022         enum pipe pipe = crtc->pipe, hsw_workaround_pipe;
7023         enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
7024         bool psl_clkgate_wa;
7025
7026         if (drm_WARN_ON(&dev_priv->drm, crtc->active))
7027                 return;
7028
7029         intel_encoders_pre_pll_enable(state, crtc);
7030
7031         if (new_crtc_state->shared_dpll)
7032                 intel_enable_shared_dpll(new_crtc_state);
7033
7034         intel_encoders_pre_enable(state, crtc);
7035
7036         if (!transcoder_is_dsi(cpu_transcoder))
7037                 intel_set_pipe_timings(new_crtc_state);
7038
7039         intel_set_pipe_src_size(new_crtc_state);
7040
7041         if (cpu_transcoder != TRANSCODER_EDP &&
7042             !transcoder_is_dsi(cpu_transcoder))
7043                 intel_de_write(dev_priv, PIPE_MULT(cpu_transcoder),
7044                                new_crtc_state->pixel_multiplier - 1);
7045
7046         if (new_crtc_state->has_pch_encoder)
7047                 intel_cpu_transcoder_set_m_n(new_crtc_state,
7048                                              &new_crtc_state->fdi_m_n, NULL);
7049
7050         if (!transcoder_is_dsi(cpu_transcoder)) {
7051                 hsw_set_frame_start_delay(new_crtc_state);
7052                 hsw_set_pipeconf(new_crtc_state);
7053         }
7054
7055         if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
7056                 bdw_set_pipemisc(new_crtc_state);
7057
7058         crtc->active = true;
7059
7060         /* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
7061         psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
7062                 new_crtc_state->pch_pfit.enabled;
7063         if (psl_clkgate_wa)
7064                 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
7065
7066         if (INTEL_GEN(dev_priv) >= 9)
7067                 skl_pfit_enable(new_crtc_state);
7068         else
7069                 ilk_pfit_enable(new_crtc_state);
7070
7071         /*
7072          * On ILK+ LUT must be loaded before the pipe is running but with
7073          * clocks enabled
7074          */
7075         intel_color_load_luts(new_crtc_state);
7076         intel_color_commit(new_crtc_state);
7077         /* update DSPCNTR to configure gamma/csc for pipe bottom color */
7078         if (INTEL_GEN(dev_priv) < 9)
7079                 intel_disable_primary_plane(new_crtc_state);
7080
7081         hsw_set_linetime_wm(new_crtc_state);
7082
7083         if (INTEL_GEN(dev_priv) >= 11)
7084                 icl_set_pipe_chicken(crtc);
7085
7086         if (dev_priv->display.initial_watermarks)
7087                 dev_priv->display.initial_watermarks(state, crtc);
7088
7089         if (INTEL_GEN(dev_priv) >= 11)
7090                 icl_pipe_mbus_enable(crtc);
7091
7092         intel_encoders_enable(state, crtc);
7093
7094         if (psl_clkgate_wa) {
7095                 intel_wait_for_vblank(dev_priv, pipe);
7096                 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
7097         }
7098
7099         /* If we change the relative order between pipe/planes enabling, we need
7100          * to change the workaround. */
7101         hsw_workaround_pipe = new_crtc_state->hsw_workaround_pipe;
7102         if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
7103                 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
7104                 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
7105         }
7106 }
7107
7108 void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state)
7109 {
7110         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
7111         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7112         enum pipe pipe = crtc->pipe;
7113
7114         /* To avoid upsetting the power well on haswell only disable the pfit if
7115          * it's in use. The hw state code will make sure we get this right. */
7116         if (!old_crtc_state->pch_pfit.enabled)
7117                 return;
7118
7119         intel_de_write(dev_priv, PF_CTL(pipe), 0);
7120         intel_de_write(dev_priv, PF_WIN_POS(pipe), 0);
7121         intel_de_write(dev_priv, PF_WIN_SZ(pipe), 0);
7122 }
7123
7124 static void ilk_crtc_disable(struct intel_atomic_state *state,
7125                              struct intel_crtc *crtc)
7126 {
7127         const struct intel_crtc_state *old_crtc_state =
7128                 intel_atomic_get_old_crtc_state(state, crtc);
7129         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7130         enum pipe pipe = crtc->pipe;
7131
7132         /*
7133          * Sometimes spurious CPU pipe underruns happen when the
7134          * pipe is already disabled, but FDI RX/TX is still enabled.
7135          * Happens at least with VGA+HDMI cloning. Suppress them.
7136          */
7137         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
7138         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
7139
7140         intel_encoders_disable(state, crtc);
7141
7142         intel_crtc_vblank_off(old_crtc_state);
7143
7144         intel_disable_pipe(old_crtc_state);
7145
7146         ilk_pfit_disable(old_crtc_state);
7147
7148         if (old_crtc_state->has_pch_encoder)
7149                 ilk_fdi_disable(crtc);
7150
7151         intel_encoders_post_disable(state, crtc);
7152
7153         if (old_crtc_state->has_pch_encoder) {
7154                 ilk_disable_pch_transcoder(dev_priv, pipe);
7155
7156                 if (HAS_PCH_CPT(dev_priv)) {
7157                         i915_reg_t reg;
7158                         u32 temp;
7159
7160                         /* disable TRANS_DP_CTL */
7161                         reg = TRANS_DP_CTL(pipe);
7162                         temp = intel_de_read(dev_priv, reg);
7163                         temp &= ~(TRANS_DP_OUTPUT_ENABLE |
7164                                   TRANS_DP_PORT_SEL_MASK);
7165                         temp |= TRANS_DP_PORT_SEL_NONE;
7166                         intel_de_write(dev_priv, reg, temp);
7167
7168                         /* disable DPLL_SEL */
7169                         temp = intel_de_read(dev_priv, PCH_DPLL_SEL);
7170                         temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
7171                         intel_de_write(dev_priv, PCH_DPLL_SEL, temp);
7172                 }
7173
7174                 ilk_fdi_pll_disable(crtc);
7175         }
7176
7177         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
7178         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
7179 }
7180
7181 static void hsw_crtc_disable(struct intel_atomic_state *state,
7182                              struct intel_crtc *crtc)
7183 {
7184         /*
7185          * FIXME collapse everything to one hook.
7186          * Need care with mst->ddi interactions.
7187          */
7188         intel_encoders_disable(state, crtc);
7189         intel_encoders_post_disable(state, crtc);
7190 }
7191
7192 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
7193 {
7194         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7195         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7196
7197         if (!crtc_state->gmch_pfit.control)
7198                 return;
7199
7200         /*
7201          * The panel fitter should only be adjusted whilst the pipe is disabled,
7202          * according to register description and PRM.
7203          */
7204         drm_WARN_ON(&dev_priv->drm,
7205                     intel_de_read(dev_priv, PFIT_CONTROL) & PFIT_ENABLE);
7206         assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
7207
7208         intel_de_write(dev_priv, PFIT_PGM_RATIOS,
7209                        crtc_state->gmch_pfit.pgm_ratios);
7210         intel_de_write(dev_priv, PFIT_CONTROL, crtc_state->gmch_pfit.control);
7211
7212         /* Border color in case we don't scale up to the full screen. Black by
7213          * default, change to something else for debugging. */
7214         intel_de_write(dev_priv, BCLRPAT(crtc->pipe), 0);
7215 }
7216
7217 bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
7218 {
7219         if (phy == PHY_NONE)
7220                 return false;
7221         else if (IS_ROCKETLAKE(dev_priv))
7222                 return phy <= PHY_D;
7223         else if (IS_ELKHARTLAKE(dev_priv))
7224                 return phy <= PHY_C;
7225         else if (INTEL_GEN(dev_priv) >= 11)
7226                 return phy <= PHY_B;
7227         else
7228                 return false;
7229 }
7230
7231 bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
7232 {
7233         if (IS_ROCKETLAKE(dev_priv))
7234                 return false;
7235         else if (INTEL_GEN(dev_priv) >= 12)
7236                 return phy >= PHY_D && phy <= PHY_I;
7237         else if (INTEL_GEN(dev_priv) >= 11 && !IS_ELKHARTLAKE(dev_priv))
7238                 return phy >= PHY_C && phy <= PHY_F;
7239         else
7240                 return false;
7241 }
7242
7243 enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
7244 {
7245         if (IS_ROCKETLAKE(i915) && port >= PORT_D)
7246                 return (enum phy)port - 1;
7247         else if (IS_ELKHARTLAKE(i915) && port == PORT_D)
7248                 return PHY_A;
7249
7250         return (enum phy)port;
7251 }
7252
7253 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
7254 {
7255         if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port)))
7256                 return PORT_TC_NONE;
7257
7258         if (INTEL_GEN(dev_priv) >= 12)
7259                 return port - PORT_D;
7260
7261         return port - PORT_C;
7262 }
7263
7264 enum intel_display_power_domain intel_port_to_power_domain(enum port port)
7265 {
7266         switch (port) {
7267         case PORT_A:
7268                 return POWER_DOMAIN_PORT_DDI_A_LANES;
7269         case PORT_B:
7270                 return POWER_DOMAIN_PORT_DDI_B_LANES;
7271         case PORT_C:
7272                 return POWER_DOMAIN_PORT_DDI_C_LANES;
7273         case PORT_D:
7274                 return POWER_DOMAIN_PORT_DDI_D_LANES;
7275         case PORT_E:
7276                 return POWER_DOMAIN_PORT_DDI_E_LANES;
7277         case PORT_F:
7278                 return POWER_DOMAIN_PORT_DDI_F_LANES;
7279         case PORT_G:
7280                 return POWER_DOMAIN_PORT_DDI_G_LANES;
7281         default:
7282                 MISSING_CASE(port);
7283                 return POWER_DOMAIN_PORT_OTHER;
7284         }
7285 }
7286
7287 enum intel_display_power_domain
7288 intel_aux_power_domain(struct intel_digital_port *dig_port)
7289 {
7290         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
7291         enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
7292
7293         if (intel_phy_is_tc(dev_priv, phy) &&
7294             dig_port->tc_mode == TC_PORT_TBT_ALT) {
7295                 switch (dig_port->aux_ch) {
7296                 case AUX_CH_C:
7297                         return POWER_DOMAIN_AUX_C_TBT;
7298                 case AUX_CH_D:
7299                         return POWER_DOMAIN_AUX_D_TBT;
7300                 case AUX_CH_E:
7301                         return POWER_DOMAIN_AUX_E_TBT;
7302                 case AUX_CH_F:
7303                         return POWER_DOMAIN_AUX_F_TBT;
7304                 case AUX_CH_G:
7305                         return POWER_DOMAIN_AUX_G_TBT;
7306                 default:
7307                         MISSING_CASE(dig_port->aux_ch);
7308                         return POWER_DOMAIN_AUX_C_TBT;
7309                 }
7310         }
7311
7312         return intel_legacy_aux_to_power_domain(dig_port->aux_ch);
7313 }
7314
7315 /*
7316  * Converts aux_ch to power_domain without caring about TBT ports for that use
7317  * intel_aux_power_domain()
7318  */
7319 enum intel_display_power_domain
7320 intel_legacy_aux_to_power_domain(enum aux_ch aux_ch)
7321 {
7322         switch (aux_ch) {
7323         case AUX_CH_A:
7324                 return POWER_DOMAIN_AUX_A;
7325         case AUX_CH_B:
7326                 return POWER_DOMAIN_AUX_B;
7327         case AUX_CH_C:
7328                 return POWER_DOMAIN_AUX_C;
7329         case AUX_CH_D:
7330                 return POWER_DOMAIN_AUX_D;
7331         case AUX_CH_E:
7332                 return POWER_DOMAIN_AUX_E;
7333         case AUX_CH_F:
7334                 return POWER_DOMAIN_AUX_F;
7335         case AUX_CH_G:
7336                 return POWER_DOMAIN_AUX_G;
7337         default:
7338                 MISSING_CASE(aux_ch);
7339                 return POWER_DOMAIN_AUX_A;
7340         }
7341 }
7342
7343 static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state)
7344 {
7345         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7346         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7347         struct drm_encoder *encoder;
7348         enum pipe pipe = crtc->pipe;
7349         u64 mask;
7350         enum transcoder transcoder = crtc_state->cpu_transcoder;
7351
7352         if (!crtc_state->hw.active)
7353                 return 0;
7354
7355         mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
7356         mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder));
7357         if (crtc_state->pch_pfit.enabled ||
7358             crtc_state->pch_pfit.force_thru)
7359                 mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
7360
7361         drm_for_each_encoder_mask(encoder, &dev_priv->drm,
7362                                   crtc_state->uapi.encoder_mask) {
7363                 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
7364
7365                 mask |= BIT_ULL(intel_encoder->power_domain);
7366         }
7367
7368         if (HAS_DDI(dev_priv) && crtc_state->has_audio)
7369                 mask |= BIT_ULL(POWER_DOMAIN_AUDIO);
7370
7371         if (crtc_state->shared_dpll)
7372                 mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE);
7373
7374         return mask;
7375 }
7376
7377 static u64
7378 modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state)
7379 {
7380         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7381         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7382         enum intel_display_power_domain domain;
7383         u64 domains, new_domains, old_domains;
7384
7385         old_domains = crtc->enabled_power_domains;
7386         crtc->enabled_power_domains = new_domains =
7387                 get_crtc_power_domains(crtc_state);
7388
7389         domains = new_domains & ~old_domains;
7390
7391         for_each_power_domain(domain, domains)
7392                 intel_display_power_get(dev_priv, domain);
7393
7394         return old_domains & ~new_domains;
7395 }
7396
7397 static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
7398                                       u64 domains)
7399 {
7400         enum intel_display_power_domain domain;
7401
7402         for_each_power_domain(domain, domains)
7403                 intel_display_power_put_unchecked(dev_priv, domain);
7404 }
7405
7406 static void valleyview_crtc_enable(struct intel_atomic_state *state,
7407                                    struct intel_crtc *crtc)
7408 {
7409         const struct intel_crtc_state *new_crtc_state =
7410                 intel_atomic_get_new_crtc_state(state, crtc);
7411         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7412         enum pipe pipe = crtc->pipe;
7413
7414         if (drm_WARN_ON(&dev_priv->drm, crtc->active))
7415                 return;
7416
7417         if (intel_crtc_has_dp_encoder(new_crtc_state))
7418                 intel_dp_set_m_n(new_crtc_state, M1_N1);
7419
7420         intel_set_pipe_timings(new_crtc_state);
7421         intel_set_pipe_src_size(new_crtc_state);
7422
7423         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
7424                 intel_de_write(dev_priv, CHV_BLEND(pipe), CHV_BLEND_LEGACY);
7425                 intel_de_write(dev_priv, CHV_CANVAS(pipe), 0);
7426         }
7427
7428         i9xx_set_pipeconf(new_crtc_state);
7429
7430         crtc->active = true;
7431
7432         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
7433
7434         intel_encoders_pre_pll_enable(state, crtc);
7435
7436         if (IS_CHERRYVIEW(dev_priv)) {
7437                 chv_prepare_pll(crtc, new_crtc_state);
7438                 chv_enable_pll(crtc, new_crtc_state);
7439         } else {
7440                 vlv_prepare_pll(crtc, new_crtc_state);
7441                 vlv_enable_pll(crtc, new_crtc_state);
7442         }
7443
7444         intel_encoders_pre_enable(state, crtc);
7445
7446         i9xx_pfit_enable(new_crtc_state);
7447
7448         intel_color_load_luts(new_crtc_state);
7449         intel_color_commit(new_crtc_state);
7450         /* update DSPCNTR to configure gamma for pipe bottom color */
7451         intel_disable_primary_plane(new_crtc_state);
7452
7453         dev_priv->display.initial_watermarks(state, crtc);
7454         intel_enable_pipe(new_crtc_state);
7455
7456         intel_crtc_vblank_on(new_crtc_state);
7457
7458         intel_encoders_enable(state, crtc);
7459 }
7460
7461 static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state)
7462 {
7463         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7464         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7465
7466         intel_de_write(dev_priv, FP0(crtc->pipe),
7467                        crtc_state->dpll_hw_state.fp0);
7468         intel_de_write(dev_priv, FP1(crtc->pipe),
7469                        crtc_state->dpll_hw_state.fp1);
7470 }
7471
7472 static void i9xx_crtc_enable(struct intel_atomic_state *state,
7473                              struct intel_crtc *crtc)
7474 {
7475         const struct intel_crtc_state *new_crtc_state =
7476                 intel_atomic_get_new_crtc_state(state, crtc);
7477         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7478         enum pipe pipe = crtc->pipe;
7479
7480         if (drm_WARN_ON(&dev_priv->drm, crtc->active))
7481                 return;
7482
7483         i9xx_set_pll_dividers(new_crtc_state);
7484
7485         if (intel_crtc_has_dp_encoder(new_crtc_state))
7486                 intel_dp_set_m_n(new_crtc_state, M1_N1);
7487
7488         intel_set_pipe_timings(new_crtc_state);
7489         intel_set_pipe_src_size(new_crtc_state);
7490
7491         i9xx_set_pipeconf(new_crtc_state);
7492
7493         crtc->active = true;
7494
7495         if (!IS_GEN(dev_priv, 2))
7496                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
7497
7498         intel_encoders_pre_enable(state, crtc);
7499
7500         i9xx_enable_pll(crtc, new_crtc_state);
7501
7502         i9xx_pfit_enable(new_crtc_state);
7503
7504         intel_color_load_luts(new_crtc_state);
7505         intel_color_commit(new_crtc_state);
7506         /* update DSPCNTR to configure gamma for pipe bottom color */
7507         intel_disable_primary_plane(new_crtc_state);
7508
7509         if (dev_priv->display.initial_watermarks)
7510                 dev_priv->display.initial_watermarks(state, crtc);
7511         else
7512                 intel_update_watermarks(crtc);
7513         intel_enable_pipe(new_crtc_state);
7514
7515         intel_crtc_vblank_on(new_crtc_state);
7516
7517         intel_encoders_enable(state, crtc);
7518 }
7519
7520 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
7521 {
7522         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
7523         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7524
7525         if (!old_crtc_state->gmch_pfit.control)
7526                 return;
7527
7528         assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder);
7529
7530         drm_dbg_kms(&dev_priv->drm, "disabling pfit, current: 0x%08x\n",
7531                     intel_de_read(dev_priv, PFIT_CONTROL));
7532         intel_de_write(dev_priv, PFIT_CONTROL, 0);
7533 }
7534
7535 static void i9xx_crtc_disable(struct intel_atomic_state *state,
7536                               struct intel_crtc *crtc)
7537 {
7538         struct intel_crtc_state *old_crtc_state =
7539                 intel_atomic_get_old_crtc_state(state, crtc);
7540         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7541         enum pipe pipe = crtc->pipe;
7542
7543         /*
7544          * On gen2 planes are double buffered but the pipe isn't, so we must
7545          * wait for planes to fully turn off before disabling the pipe.
7546          */
7547         if (IS_GEN(dev_priv, 2))
7548                 intel_wait_for_vblank(dev_priv, pipe);
7549
7550         intel_encoders_disable(state, crtc);
7551
7552         intel_crtc_vblank_off(old_crtc_state);
7553
7554         intel_disable_pipe(old_crtc_state);
7555
7556         i9xx_pfit_disable(old_crtc_state);
7557
7558         intel_encoders_post_disable(state, crtc);
7559
7560         if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
7561                 if (IS_CHERRYVIEW(dev_priv))
7562                         chv_disable_pll(dev_priv, pipe);
7563                 else if (IS_VALLEYVIEW(dev_priv))
7564                         vlv_disable_pll(dev_priv, pipe);
7565                 else
7566                         i9xx_disable_pll(old_crtc_state);
7567         }
7568
7569         intel_encoders_post_pll_disable(state, crtc);
7570
7571         if (!IS_GEN(dev_priv, 2))
7572                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
7573
7574         if (!dev_priv->display.initial_watermarks)
7575                 intel_update_watermarks(crtc);
7576
7577         /* clock the pipe down to 640x480@60 to potentially save power */
7578         if (IS_I830(dev_priv))
7579                 i830_enable_pipe(dev_priv, pipe);
7580 }
7581
7582 static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
7583                                         struct drm_modeset_acquire_ctx *ctx)
7584 {
7585         struct intel_encoder *encoder;
7586         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7587         struct intel_bw_state *bw_state =
7588                 to_intel_bw_state(dev_priv->bw_obj.state);
7589         struct intel_cdclk_state *cdclk_state =
7590                 to_intel_cdclk_state(dev_priv->cdclk.obj.state);
7591         struct intel_dbuf_state *dbuf_state =
7592                 to_intel_dbuf_state(dev_priv->dbuf.obj.state);
7593         struct intel_crtc_state *crtc_state =
7594                 to_intel_crtc_state(crtc->base.state);
7595         enum intel_display_power_domain domain;
7596         struct intel_plane *plane;
7597         struct drm_atomic_state *state;
7598         struct intel_crtc_state *temp_crtc_state;
7599         enum pipe pipe = crtc->pipe;
7600         u64 domains;
7601         int ret;
7602
7603         if (!crtc_state->hw.active)
7604                 return;
7605
7606         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
7607                 const struct intel_plane_state *plane_state =
7608                         to_intel_plane_state(plane->base.state);
7609
7610                 if (plane_state->uapi.visible)
7611                         intel_plane_disable_noatomic(crtc, plane);
7612         }
7613
7614         state = drm_atomic_state_alloc(&dev_priv->drm);
7615         if (!state) {
7616                 drm_dbg_kms(&dev_priv->drm,
7617                             "failed to disable [CRTC:%d:%s], out of memory",
7618                             crtc->base.base.id, crtc->base.name);
7619                 return;
7620         }
7621
7622         state->acquire_ctx = ctx;
7623
7624         /* Everything's already locked, -EDEADLK can't happen. */
7625         temp_crtc_state = intel_atomic_get_crtc_state(state, crtc);
7626         ret = drm_atomic_add_affected_connectors(state, &crtc->base);
7627
7628         drm_WARN_ON(&dev_priv->drm, IS_ERR(temp_crtc_state) || ret);
7629
7630         dev_priv->display.crtc_disable(to_intel_atomic_state(state), crtc);
7631
7632         drm_atomic_state_put(state);
7633
7634         drm_dbg_kms(&dev_priv->drm,
7635                     "[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
7636                     crtc->base.base.id, crtc->base.name);
7637
7638         crtc->active = false;
7639         crtc->base.enabled = false;
7640
7641         drm_WARN_ON(&dev_priv->drm,
7642                     drm_atomic_set_mode_for_crtc(&crtc_state->uapi, NULL) < 0);
7643         crtc_state->uapi.active = false;
7644         crtc_state->uapi.connector_mask = 0;
7645         crtc_state->uapi.encoder_mask = 0;
7646         intel_crtc_free_hw_state(crtc_state);
7647         memset(&crtc_state->hw, 0, sizeof(crtc_state->hw));
7648
7649         for_each_encoder_on_crtc(&dev_priv->drm, &crtc->base, encoder)
7650                 encoder->base.crtc = NULL;
7651
7652         intel_fbc_disable(crtc);
7653         intel_update_watermarks(crtc);
7654         intel_disable_shared_dpll(crtc_state);
7655
7656         domains = crtc->enabled_power_domains;
7657         for_each_power_domain(domain, domains)
7658                 intel_display_power_put_unchecked(dev_priv, domain);
7659         crtc->enabled_power_domains = 0;
7660
7661         dev_priv->active_pipes &= ~BIT(pipe);
7662         cdclk_state->min_cdclk[pipe] = 0;
7663         cdclk_state->min_voltage_level[pipe] = 0;
7664         cdclk_state->active_pipes &= ~BIT(pipe);
7665
7666         dbuf_state->active_pipes &= ~BIT(pipe);
7667
7668         bw_state->data_rate[pipe] = 0;
7669         bw_state->num_active_planes[pipe] = 0;
7670 }
7671
7672 /*
7673  * turn all crtc's off, but do not adjust state
7674  * This has to be paired with a call to intel_modeset_setup_hw_state.
7675  */
7676 int intel_display_suspend(struct drm_device *dev)
7677 {
7678         struct drm_i915_private *dev_priv = to_i915(dev);
7679         struct drm_atomic_state *state;
7680         int ret;
7681
7682         state = drm_atomic_helper_suspend(dev);
7683         ret = PTR_ERR_OR_ZERO(state);
7684         if (ret)
7685                 drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
7686                         ret);
7687         else
7688                 dev_priv->modeset_restore_state = state;
7689         return ret;
7690 }
7691
7692 void intel_encoder_destroy(struct drm_encoder *encoder)
7693 {
7694         struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
7695
7696         drm_encoder_cleanup(encoder);
7697         kfree(intel_encoder);
7698 }
7699
7700 /* Cross check the actual hw state with our own modeset state tracking (and it's
7701  * internal consistency). */
7702 static void intel_connector_verify_state(struct intel_crtc_state *crtc_state,
7703                                          struct drm_connector_state *conn_state)
7704 {
7705         struct intel_connector *connector = to_intel_connector(conn_state->connector);
7706         struct drm_i915_private *i915 = to_i915(connector->base.dev);
7707
7708         drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
7709                     connector->base.base.id, connector->base.name);
7710
7711         if (connector->get_hw_state(connector)) {
7712                 struct intel_encoder *encoder = intel_attached_encoder(connector);
7713
7714                 I915_STATE_WARN(!crtc_state,
7715                          "connector enabled without attached crtc\n");
7716
7717                 if (!crtc_state)
7718                         return;
7719
7720                 I915_STATE_WARN(!crtc_state->hw.active,
7721                                 "connector is active, but attached crtc isn't\n");
7722
7723                 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
7724                         return;
7725
7726                 I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
7727                         "atomic encoder doesn't match attached encoder\n");
7728
7729                 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
7730                         "attached encoder crtc differs from connector crtc\n");
7731         } else {
7732                 I915_STATE_WARN(crtc_state && crtc_state->hw.active,
7733                                 "attached crtc is active, but connector isn't\n");
7734                 I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
7735                         "best encoder set without crtc!\n");
7736         }
7737 }
7738
7739 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
7740 {
7741         if (crtc_state->hw.enable && crtc_state->has_pch_encoder)
7742                 return crtc_state->fdi_lanes;
7743
7744         return 0;
7745 }
7746
7747 static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
7748                                struct intel_crtc_state *pipe_config)
7749 {
7750         struct drm_i915_private *dev_priv = to_i915(dev);
7751         struct drm_atomic_state *state = pipe_config->uapi.state;
7752         struct intel_crtc *other_crtc;
7753         struct intel_crtc_state *other_crtc_state;
7754
7755         drm_dbg_kms(&dev_priv->drm,
7756                     "checking fdi config on pipe %c, lanes %i\n",
7757                     pipe_name(pipe), pipe_config->fdi_lanes);
7758         if (pipe_config->fdi_lanes > 4) {
7759                 drm_dbg_kms(&dev_priv->drm,
7760                             "invalid fdi lane config on pipe %c: %i lanes\n",
7761                             pipe_name(pipe), pipe_config->fdi_lanes);
7762                 return -EINVAL;
7763         }
7764
7765         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
7766                 if (pipe_config->fdi_lanes > 2) {
7767                         drm_dbg_kms(&dev_priv->drm,
7768                                     "only 2 lanes on haswell, required: %i lanes\n",
7769                                     pipe_config->fdi_lanes);
7770                         return -EINVAL;
7771                 } else {
7772                         return 0;
7773                 }
7774         }
7775
7776         if (INTEL_NUM_PIPES(dev_priv) == 2)
7777                 return 0;
7778
7779         /* Ivybridge 3 pipe is really complicated */
7780         switch (pipe) {
7781         case PIPE_A:
7782                 return 0;
7783         case PIPE_B:
7784                 if (pipe_config->fdi_lanes <= 2)
7785                         return 0;
7786
7787                 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C);
7788                 other_crtc_state =
7789                         intel_atomic_get_crtc_state(state, other_crtc);
7790                 if (IS_ERR(other_crtc_state))
7791                         return PTR_ERR(other_crtc_state);
7792
7793                 if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
7794                         drm_dbg_kms(&dev_priv->drm,
7795                                     "invalid shared fdi lane config on pipe %c: %i lanes\n",
7796                                     pipe_name(pipe), pipe_config->fdi_lanes);
7797                         return -EINVAL;
7798                 }
7799                 return 0;
7800         case PIPE_C:
7801                 if (pipe_config->fdi_lanes > 2) {
7802                         drm_dbg_kms(&dev_priv->drm,
7803                                     "only 2 lanes on pipe %c: required %i lanes\n",
7804                                     pipe_name(pipe), pipe_config->fdi_lanes);
7805                         return -EINVAL;
7806                 }
7807
7808                 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B);
7809                 other_crtc_state =
7810                         intel_atomic_get_crtc_state(state, other_crtc);
7811                 if (IS_ERR(other_crtc_state))
7812                         return PTR_ERR(other_crtc_state);
7813
7814                 if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
7815                         drm_dbg_kms(&dev_priv->drm,
7816                                     "fdi link B uses too many lanes to enable link C\n");
7817                         return -EINVAL;
7818                 }
7819                 return 0;
7820         default:
7821                 BUG();
7822         }
7823 }
7824
7825 #define RETRY 1
7826 static int ilk_fdi_compute_config(struct intel_crtc *intel_crtc,
7827                                   struct intel_crtc_state *pipe_config)
7828 {
7829         struct drm_device *dev = intel_crtc->base.dev;
7830         struct drm_i915_private *i915 = to_i915(dev);
7831         const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
7832         int lane, link_bw, fdi_dotclock, ret;
7833         bool needs_recompute = false;
7834
7835 retry:
7836         /* FDI is a binary signal running at ~2.7GHz, encoding
7837          * each output octet as 10 bits. The actual frequency
7838          * is stored as a divider into a 100MHz clock, and the
7839          * mode pixel clock is stored in units of 1KHz.
7840          * Hence the bw of each lane in terms of the mode signal
7841          * is:
7842          */
7843         link_bw = intel_fdi_link_freq(i915, pipe_config);
7844
7845         fdi_dotclock = adjusted_mode->crtc_clock;
7846
7847         lane = ilk_get_lanes_required(fdi_dotclock, link_bw,
7848                                       pipe_config->pipe_bpp);
7849
7850         pipe_config->fdi_lanes = lane;
7851
7852         intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
7853                                link_bw, &pipe_config->fdi_m_n, false, false);
7854
7855         ret = ilk_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
7856         if (ret == -EDEADLK)
7857                 return ret;
7858
7859         if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
7860                 pipe_config->pipe_bpp -= 2*3;
7861                 drm_dbg_kms(&i915->drm,
7862                             "fdi link bw constraint, reducing pipe bpp to %i\n",
7863                             pipe_config->pipe_bpp);
7864                 needs_recompute = true;
7865                 pipe_config->bw_constrained = true;
7866
7867                 goto retry;
7868         }
7869
7870         if (needs_recompute)
7871                 return RETRY;
7872
7873         return ret;
7874 }
7875
7876 bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
7877 {
7878         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7879         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7880
7881         /* IPS only exists on ULT machines and is tied to pipe A. */
7882         if (!hsw_crtc_supports_ips(crtc))
7883                 return false;
7884
7885         if (!dev_priv->params.enable_ips)
7886                 return false;
7887
7888         if (crtc_state->pipe_bpp > 24)
7889                 return false;
7890
7891         /*
7892          * We compare against max which means we must take
7893          * the increased cdclk requirement into account when
7894          * calculating the new cdclk.
7895          *
7896          * Should measure whether using a lower cdclk w/o IPS
7897          */
7898         if (IS_BROADWELL(dev_priv) &&
7899             crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100)
7900                 return false;
7901
7902         return true;
7903 }
7904
7905 static int hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
7906 {
7907         struct drm_i915_private *dev_priv =
7908                 to_i915(crtc_state->uapi.crtc->dev);
7909         struct intel_atomic_state *state =
7910                 to_intel_atomic_state(crtc_state->uapi.state);
7911
7912         crtc_state->ips_enabled = false;
7913
7914         if (!hsw_crtc_state_ips_capable(crtc_state))
7915                 return 0;
7916
7917         /*
7918          * When IPS gets enabled, the pipe CRC changes. Since IPS gets
7919          * enabled and disabled dynamically based on package C states,
7920          * user space can't make reliable use of the CRCs, so let's just
7921          * completely disable it.
7922          */
7923         if (crtc_state->crc_enabled)
7924                 return 0;
7925
7926         /* IPS should be fine as long as at least one plane is enabled. */
7927         if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
7928                 return 0;
7929
7930         if (IS_BROADWELL(dev_priv)) {
7931                 const struct intel_cdclk_state *cdclk_state;
7932
7933                 cdclk_state = intel_atomic_get_cdclk_state(state);
7934                 if (IS_ERR(cdclk_state))
7935                         return PTR_ERR(cdclk_state);
7936
7937                 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
7938                 if (crtc_state->pixel_rate > cdclk_state->logical.cdclk * 95 / 100)
7939                         return 0;
7940         }
7941
7942         crtc_state->ips_enabled = true;
7943
7944         return 0;
7945 }
7946
7947 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
7948 {
7949         const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7950
7951         /* GDG double wide on either pipe, otherwise pipe A only */
7952         return INTEL_GEN(dev_priv) < 4 &&
7953                 (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
7954 }
7955
7956 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *crtc_state)
7957 {
7958         u32 pixel_rate = crtc_state->hw.adjusted_mode.crtc_clock;
7959         unsigned int pipe_w, pipe_h, pfit_w, pfit_h;
7960
7961         /*
7962          * We only use IF-ID interlacing. If we ever use
7963          * PF-ID we'll need to adjust the pixel_rate here.
7964          */
7965
7966         if (!crtc_state->pch_pfit.enabled)
7967                 return pixel_rate;
7968
7969         pipe_w = crtc_state->pipe_src_w;
7970         pipe_h = crtc_state->pipe_src_h;
7971
7972         pfit_w = drm_rect_width(&crtc_state->pch_pfit.dst);
7973         pfit_h = drm_rect_height(&crtc_state->pch_pfit.dst);
7974
7975         if (pipe_w < pfit_w)
7976                 pipe_w = pfit_w;
7977         if (pipe_h < pfit_h)
7978                 pipe_h = pfit_h;
7979
7980         if (drm_WARN_ON(crtc_state->uapi.crtc->dev,
7981                         !pfit_w || !pfit_h))
7982                 return pixel_rate;
7983
7984         return div_u64(mul_u32_u32(pixel_rate, pipe_w * pipe_h),
7985                        pfit_w * pfit_h);
7986 }
7987
7988 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
7989 {
7990         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
7991
7992         if (HAS_GMCH(dev_priv))
7993                 /* FIXME calculate proper pipe pixel rate for GMCH pfit */
7994                 crtc_state->pixel_rate =
7995                         crtc_state->hw.adjusted_mode.crtc_clock;
7996         else
7997                 crtc_state->pixel_rate =
7998                         ilk_pipe_pixel_rate(crtc_state);
7999 }
8000
8001 static int intel_crtc_compute_config(struct intel_crtc *crtc,
8002                                      struct intel_crtc_state *pipe_config)
8003 {
8004         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8005         const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
8006         int clock_limit = dev_priv->max_dotclk_freq;
8007
8008         if (INTEL_GEN(dev_priv) < 4) {
8009                 clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
8010
8011                 /*
8012                  * Enable double wide mode when the dot clock
8013                  * is > 90% of the (display) core speed.
8014                  */
8015                 if (intel_crtc_supports_double_wide(crtc) &&
8016                     adjusted_mode->crtc_clock > clock_limit) {
8017                         clock_limit = dev_priv->max_dotclk_freq;
8018                         pipe_config->double_wide = true;
8019                 }
8020         }
8021
8022         if (adjusted_mode->crtc_clock > clock_limit) {
8023                 drm_dbg_kms(&dev_priv->drm,
8024                             "requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
8025                             adjusted_mode->crtc_clock, clock_limit,
8026                             yesno(pipe_config->double_wide));
8027                 return -EINVAL;
8028         }
8029
8030         if ((pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
8031              pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) &&
8032              pipe_config->hw.ctm) {
8033                 /*
8034                  * There is only one pipe CSC unit per pipe, and we need that
8035                  * for output conversion from RGB->YCBCR. So if CTM is already
8036                  * applied we can't support YCBCR420 output.
8037                  */
8038                 drm_dbg_kms(&dev_priv->drm,
8039                             "YCBCR420 and CTM together are not possible\n");
8040                 return -EINVAL;
8041         }
8042
8043         /*
8044          * Pipe horizontal size must be even in:
8045          * - DVO ganged mode
8046          * - LVDS dual channel mode
8047          * - Double wide pipe
8048          */
8049         if (pipe_config->pipe_src_w & 1) {
8050                 if (pipe_config->double_wide) {
8051                         drm_dbg_kms(&dev_priv->drm,
8052                                     "Odd pipe source width not supported with double wide pipe\n");
8053                         return -EINVAL;
8054                 }
8055
8056                 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
8057                     intel_is_dual_link_lvds(dev_priv)) {
8058                         drm_dbg_kms(&dev_priv->drm,
8059                                     "Odd pipe source width not supported with dual link LVDS\n");
8060                         return -EINVAL;
8061                 }
8062         }
8063
8064         /* Cantiga+ cannot handle modes with a hsync front porch of 0.
8065          * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
8066          */
8067         if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) &&
8068                 adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
8069                 return -EINVAL;
8070
8071         intel_crtc_compute_pixel_rate(pipe_config);
8072
8073         if (pipe_config->has_pch_encoder)
8074                 return ilk_fdi_compute_config(crtc, pipe_config);
8075
8076         return 0;
8077 }
8078
8079 static void
8080 intel_reduce_m_n_ratio(u32 *num, u32 *den)
8081 {
8082         while (*num > DATA_LINK_M_N_MASK ||
8083                *den > DATA_LINK_M_N_MASK) {
8084                 *num >>= 1;
8085                 *den >>= 1;
8086         }
8087 }
8088
8089 static void compute_m_n(unsigned int m, unsigned int n,
8090                         u32 *ret_m, u32 *ret_n,
8091                         bool constant_n)
8092 {
8093         /*
8094          * Several DP dongles in particular seem to be fussy about
8095          * too large link M/N values. Give N value as 0x8000 that
8096          * should be acceptable by specific devices. 0x8000 is the
8097          * specified fixed N value for asynchronous clock mode,
8098          * which the devices expect also in synchronous clock mode.
8099          */
8100         if (constant_n)
8101                 *ret_n = 0x8000;
8102         else
8103                 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
8104
8105         *ret_m = div_u64(mul_u32_u32(m, *ret_n), n);
8106         intel_reduce_m_n_ratio(ret_m, ret_n);
8107 }
8108
8109 void
8110 intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
8111                        int pixel_clock, int link_clock,
8112                        struct intel_link_m_n *m_n,
8113                        bool constant_n, bool fec_enable)
8114 {
8115         u32 data_clock = bits_per_pixel * pixel_clock;
8116
8117         if (fec_enable)
8118                 data_clock = intel_dp_mode_to_fec_clock(data_clock);
8119
8120         m_n->tu = 64;
8121         compute_m_n(data_clock,
8122                     link_clock * nlanes * 8,
8123                     &m_n->gmch_m, &m_n->gmch_n,
8124                     constant_n);
8125
8126         compute_m_n(pixel_clock, link_clock,
8127                     &m_n->link_m, &m_n->link_n,
8128                     constant_n);
8129 }
8130
8131 static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
8132 {
8133         /*
8134          * There may be no VBT; and if the BIOS enabled SSC we can
8135          * just keep using it to avoid unnecessary flicker.  Whereas if the
8136          * BIOS isn't using it, don't assume it will work even if the VBT
8137          * indicates as much.
8138          */
8139         if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
8140                 bool bios_lvds_use_ssc = intel_de_read(dev_priv,
8141                                                        PCH_DREF_CONTROL) &
8142                         DREF_SSC1_ENABLE;
8143
8144                 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
8145                         drm_dbg_kms(&dev_priv->drm,
8146                                     "SSC %s by BIOS, overriding VBT which says %s\n",
8147                                     enableddisabled(bios_lvds_use_ssc),
8148                                     enableddisabled(dev_priv->vbt.lvds_use_ssc));
8149                         dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
8150                 }
8151         }
8152 }
8153
8154 static bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
8155 {
8156         if (dev_priv->params.panel_use_ssc >= 0)
8157                 return dev_priv->params.panel_use_ssc != 0;
8158         return dev_priv->vbt.lvds_use_ssc
8159                 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
8160 }
8161
8162 static u32 pnv_dpll_compute_fp(struct dpll *dpll)
8163 {
8164         return (1 << dpll->n) << 16 | dpll->m2;
8165 }
8166
8167 static u32 i9xx_dpll_compute_fp(struct dpll *dpll)
8168 {
8169         return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
8170 }
8171
8172 static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
8173                                      struct intel_crtc_state *crtc_state,
8174                                      struct dpll *reduced_clock)
8175 {
8176         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8177         u32 fp, fp2 = 0;
8178
8179         if (IS_PINEVIEW(dev_priv)) {
8180                 fp = pnv_dpll_compute_fp(&crtc_state->dpll);
8181                 if (reduced_clock)
8182                         fp2 = pnv_dpll_compute_fp(reduced_clock);
8183         } else {
8184                 fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
8185                 if (reduced_clock)
8186                         fp2 = i9xx_dpll_compute_fp(reduced_clock);
8187         }
8188
8189         crtc_state->dpll_hw_state.fp0 = fp;
8190
8191         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
8192             reduced_clock) {
8193                 crtc_state->dpll_hw_state.fp1 = fp2;
8194         } else {
8195                 crtc_state->dpll_hw_state.fp1 = fp;
8196         }
8197 }
8198
8199 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
8200                 pipe)
8201 {
8202         u32 reg_val;
8203
8204         /*
8205          * PLLB opamp always calibrates to max value of 0x3f, force enable it
8206          * and set it to a reasonable value instead.
8207          */
8208         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
8209         reg_val &= 0xffffff00;
8210         reg_val |= 0x00000030;
8211         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
8212
8213         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
8214         reg_val &= 0x00ffffff;
8215         reg_val |= 0x8c000000;
8216         vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
8217
8218         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
8219         reg_val &= 0xffffff00;
8220         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
8221
8222         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
8223         reg_val &= 0x00ffffff;
8224         reg_val |= 0xb0000000;
8225         vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
8226 }
8227
8228 static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
8229                                          const struct intel_link_m_n *m_n)
8230 {
8231         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8232         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8233         enum pipe pipe = crtc->pipe;
8234
8235         intel_de_write(dev_priv, PCH_TRANS_DATA_M1(pipe),
8236                        TU_SIZE(m_n->tu) | m_n->gmch_m);
8237         intel_de_write(dev_priv, PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
8238         intel_de_write(dev_priv, PCH_TRANS_LINK_M1(pipe), m_n->link_m);
8239         intel_de_write(dev_priv, PCH_TRANS_LINK_N1(pipe), m_n->link_n);
8240 }
8241
8242 static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
8243                                  enum transcoder transcoder)
8244 {
8245         if (IS_HASWELL(dev_priv))
8246                 return transcoder == TRANSCODER_EDP;
8247
8248         /*
8249          * Strictly speaking some registers are available before
8250          * gen7, but we only support DRRS on gen7+
8251          */
8252         return IS_GEN(dev_priv, 7) || IS_CHERRYVIEW(dev_priv);
8253 }
8254
8255 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
8256                                          const struct intel_link_m_n *m_n,
8257                                          const struct intel_link_m_n *m2_n2)
8258 {
8259         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8260         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8261         enum pipe pipe = crtc->pipe;
8262         enum transcoder transcoder = crtc_state->cpu_transcoder;
8263
8264         if (INTEL_GEN(dev_priv) >= 5) {
8265                 intel_de_write(dev_priv, PIPE_DATA_M1(transcoder),
8266                                TU_SIZE(m_n->tu) | m_n->gmch_m);
8267                 intel_de_write(dev_priv, PIPE_DATA_N1(transcoder),
8268                                m_n->gmch_n);
8269                 intel_de_write(dev_priv, PIPE_LINK_M1(transcoder),
8270                                m_n->link_m);
8271                 intel_de_write(dev_priv, PIPE_LINK_N1(transcoder),
8272                                m_n->link_n);
8273                 /*
8274                  *  M2_N2 registers are set only if DRRS is supported
8275                  * (to make sure the registers are not unnecessarily accessed).
8276                  */
8277                 if (m2_n2 && crtc_state->has_drrs &&
8278                     transcoder_has_m2_n2(dev_priv, transcoder)) {
8279                         intel_de_write(dev_priv, PIPE_DATA_M2(transcoder),
8280                                        TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
8281                         intel_de_write(dev_priv, PIPE_DATA_N2(transcoder),
8282                                        m2_n2->gmch_n);
8283                         intel_de_write(dev_priv, PIPE_LINK_M2(transcoder),
8284                                        m2_n2->link_m);
8285                         intel_de_write(dev_priv, PIPE_LINK_N2(transcoder),
8286                                        m2_n2->link_n);
8287                 }
8288         } else {
8289                 intel_de_write(dev_priv, PIPE_DATA_M_G4X(pipe),
8290                                TU_SIZE(m_n->tu) | m_n->gmch_m);
8291                 intel_de_write(dev_priv, PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
8292                 intel_de_write(dev_priv, PIPE_LINK_M_G4X(pipe), m_n->link_m);
8293                 intel_de_write(dev_priv, PIPE_LINK_N_G4X(pipe), m_n->link_n);
8294         }
8295 }
8296
8297 void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n)
8298 {
8299         const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
8300         struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
8301
8302         if (m_n == M1_N1) {
8303                 dp_m_n = &crtc_state->dp_m_n;
8304                 dp_m2_n2 = &crtc_state->dp_m2_n2;
8305         } else if (m_n == M2_N2) {
8306
8307                 /*
8308                  * M2_N2 registers are not supported. Hence m2_n2 divider value
8309                  * needs to be programmed into M1_N1.
8310                  */
8311                 dp_m_n = &crtc_state->dp_m2_n2;
8312         } else {
8313                 drm_err(&i915->drm, "Unsupported divider value\n");
8314                 return;
8315         }
8316
8317         if (crtc_state->has_pch_encoder)
8318                 intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n);
8319         else
8320                 intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2);
8321 }
8322
8323 static void vlv_compute_dpll(struct intel_crtc *crtc,
8324                              struct intel_crtc_state *pipe_config)
8325 {
8326         pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
8327                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
8328         if (crtc->pipe != PIPE_A)
8329                 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
8330
8331         /* DPLL not used with DSI, but still need the rest set up */
8332         if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
8333                 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
8334                         DPLL_EXT_BUFFER_ENABLE_VLV;
8335
8336         pipe_config->dpll_hw_state.dpll_md =
8337                 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
8338 }
8339
8340 static void chv_compute_dpll(struct intel_crtc *crtc,
8341                              struct intel_crtc_state *pipe_config)
8342 {
8343         pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
8344                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
8345         if (crtc->pipe != PIPE_A)
8346                 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
8347
8348         /* DPLL not used with DSI, but still need the rest set up */
8349         if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
8350                 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
8351
8352         pipe_config->dpll_hw_state.dpll_md =
8353                 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
8354 }
8355
8356 static void vlv_prepare_pll(struct intel_crtc *crtc,
8357                             const struct intel_crtc_state *pipe_config)
8358 {
8359         struct drm_device *dev = crtc->base.dev;
8360         struct drm_i915_private *dev_priv = to_i915(dev);
8361         enum pipe pipe = crtc->pipe;
8362         u32 mdiv;
8363         u32 bestn, bestm1, bestm2, bestp1, bestp2;
8364         u32 coreclk, reg_val;
8365
8366         /* Enable Refclk */
8367         intel_de_write(dev_priv, DPLL(pipe),
8368                        pipe_config->dpll_hw_state.dpll & ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
8369
8370         /* No need to actually set up the DPLL with DSI */
8371         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8372                 return;
8373
8374         vlv_dpio_get(dev_priv);
8375
8376         bestn = pipe_config->dpll.n;
8377         bestm1 = pipe_config->dpll.m1;
8378         bestm2 = pipe_config->dpll.m2;
8379         bestp1 = pipe_config->dpll.p1;
8380         bestp2 = pipe_config->dpll.p2;
8381
8382         /* See eDP HDMI DPIO driver vbios notes doc */
8383
8384         /* PLL B needs special handling */
8385         if (pipe == PIPE_B)
8386                 vlv_pllb_recal_opamp(dev_priv, pipe);
8387
8388         /* Set up Tx target for periodic Rcomp update */
8389         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
8390
8391         /* Disable target IRef on PLL */
8392         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
8393         reg_val &= 0x00ffffff;
8394         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
8395
8396         /* Disable fast lock */
8397         vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
8398
8399         /* Set idtafcrecal before PLL is enabled */
8400         mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
8401         mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
8402         mdiv |= ((bestn << DPIO_N_SHIFT));
8403         mdiv |= (1 << DPIO_K_SHIFT);
8404
8405         /*
8406          * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
8407          * but we don't support that).
8408          * Note: don't use the DAC post divider as it seems unstable.
8409          */
8410         mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
8411         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
8412
8413         mdiv |= DPIO_ENABLE_CALIBRATION;
8414         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
8415
8416         /* Set HBR and RBR LPF coefficients */
8417         if (pipe_config->port_clock == 162000 ||
8418             intel_crtc_has_type(pipe_config, INTEL_OUTPUT_ANALOG) ||
8419             intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
8420                 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
8421                                  0x009f0003);
8422         else
8423                 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
8424                                  0x00d0000f);
8425
8426         if (intel_crtc_has_dp_encoder(pipe_config)) {
8427                 /* Use SSC source */
8428                 if (pipe == PIPE_A)
8429                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
8430                                          0x0df40000);
8431                 else
8432                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
8433                                          0x0df70000);
8434         } else { /* HDMI or VGA */
8435                 /* Use bend source */
8436                 if (pipe == PIPE_A)
8437                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
8438                                          0x0df70000);
8439                 else
8440                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
8441                                          0x0df40000);
8442         }
8443
8444         coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
8445         coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
8446         if (intel_crtc_has_dp_encoder(pipe_config))
8447                 coreclk |= 0x01000000;
8448         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
8449
8450         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
8451
8452         vlv_dpio_put(dev_priv);
8453 }
8454
8455 static void chv_prepare_pll(struct intel_crtc *crtc,
8456                             const struct intel_crtc_state *pipe_config)
8457 {
8458         struct drm_device *dev = crtc->base.dev;
8459         struct drm_i915_private *dev_priv = to_i915(dev);
8460         enum pipe pipe = crtc->pipe;
8461         enum dpio_channel port = vlv_pipe_to_channel(pipe);
8462         u32 loopfilter, tribuf_calcntr;
8463         u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
8464         u32 dpio_val;
8465         int vco;
8466
8467         /* Enable Refclk and SSC */
8468         intel_de_write(dev_priv, DPLL(pipe),
8469                        pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
8470
8471         /* No need to actually set up the DPLL with DSI */
8472         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8473                 return;
8474
8475         bestn = pipe_config->dpll.n;
8476         bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
8477         bestm1 = pipe_config->dpll.m1;
8478         bestm2 = pipe_config->dpll.m2 >> 22;
8479         bestp1 = pipe_config->dpll.p1;
8480         bestp2 = pipe_config->dpll.p2;
8481         vco = pipe_config->dpll.vco;
8482         dpio_val = 0;
8483         loopfilter = 0;
8484
8485         vlv_dpio_get(dev_priv);
8486
8487         /* p1 and p2 divider */
8488         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
8489                         5 << DPIO_CHV_S1_DIV_SHIFT |
8490                         bestp1 << DPIO_CHV_P1_DIV_SHIFT |
8491                         bestp2 << DPIO_CHV_P2_DIV_SHIFT |
8492                         1 << DPIO_CHV_K_DIV_SHIFT);
8493
8494         /* Feedback post-divider - m2 */
8495         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
8496
8497         /* Feedback refclk divider - n and m1 */
8498         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
8499                         DPIO_CHV_M1_DIV_BY_2 |
8500                         1 << DPIO_CHV_N_DIV_SHIFT);
8501
8502         /* M2 fraction division */
8503         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
8504
8505         /* M2 fraction division enable */
8506         dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
8507         dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
8508         dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
8509         if (bestm2_frac)
8510                 dpio_val |= DPIO_CHV_FRAC_DIV_EN;
8511         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
8512
8513         /* Program digital lock detect threshold */
8514         dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
8515         dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
8516                                         DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
8517         dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
8518         if (!bestm2_frac)
8519                 dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
8520         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
8521
8522         /* Loop filter */
8523         if (vco == 5400000) {
8524                 loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
8525                 loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
8526                 loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
8527                 tribuf_calcntr = 0x9;
8528         } else if (vco <= 6200000) {
8529                 loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
8530                 loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
8531                 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
8532                 tribuf_calcntr = 0x9;
8533         } else if (vco <= 6480000) {
8534                 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
8535                 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
8536                 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
8537                 tribuf_calcntr = 0x8;
8538         } else {
8539                 /* Not supported. Apply the same limits as in the max case */
8540                 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
8541                 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
8542                 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
8543                 tribuf_calcntr = 0;
8544         }
8545         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
8546
8547         dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
8548         dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
8549         dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
8550         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
8551
8552         /* AFC Recal */
8553         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
8554                         vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
8555                         DPIO_AFC_RECAL);
8556
8557         vlv_dpio_put(dev_priv);
8558 }
8559
8560 /**
8561  * vlv_force_pll_on - forcibly enable just the PLL
8562  * @dev_priv: i915 private structure
8563  * @pipe: pipe PLL to enable
8564  * @dpll: PLL configuration
8565  *
8566  * Enable the PLL for @pipe using the supplied @dpll config. To be used
8567  * in cases where we need the PLL enabled even when @pipe is not going to
8568  * be enabled.
8569  */
8570 int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
8571                      const struct dpll *dpll)
8572 {
8573         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
8574         struct intel_crtc_state *pipe_config;
8575
8576         pipe_config = intel_crtc_state_alloc(crtc);
8577         if (!pipe_config)
8578                 return -ENOMEM;
8579
8580         pipe_config->cpu_transcoder = (enum transcoder)pipe;
8581         pipe_config->pixel_multiplier = 1;
8582         pipe_config->dpll = *dpll;
8583
8584         if (IS_CHERRYVIEW(dev_priv)) {
8585                 chv_compute_dpll(crtc, pipe_config);
8586                 chv_prepare_pll(crtc, pipe_config);
8587                 chv_enable_pll(crtc, pipe_config);
8588         } else {
8589                 vlv_compute_dpll(crtc, pipe_config);
8590                 vlv_prepare_pll(crtc, pipe_config);
8591                 vlv_enable_pll(crtc, pipe_config);
8592         }
8593
8594         kfree(pipe_config);
8595
8596         return 0;
8597 }
8598
8599 /**
8600  * vlv_force_pll_off - forcibly disable just the PLL
8601  * @dev_priv: i915 private structure
8602  * @pipe: pipe PLL to disable
8603  *
8604  * Disable the PLL for @pipe. To be used in cases where we need
8605  * the PLL enabled even when @pipe is not going to be enabled.
8606  */
8607 void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
8608 {
8609         if (IS_CHERRYVIEW(dev_priv))
8610                 chv_disable_pll(dev_priv, pipe);
8611         else
8612                 vlv_disable_pll(dev_priv, pipe);
8613 }
8614
8615 static void i9xx_compute_dpll(struct intel_crtc *crtc,
8616                               struct intel_crtc_state *crtc_state,
8617                               struct dpll *reduced_clock)
8618 {
8619         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8620         u32 dpll;
8621         struct dpll *clock = &crtc_state->dpll;
8622
8623         i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
8624
8625         dpll = DPLL_VGA_MODE_DIS;
8626
8627         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
8628                 dpll |= DPLLB_MODE_LVDS;
8629         else
8630                 dpll |= DPLLB_MODE_DAC_SERIAL;
8631
8632         if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
8633             IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
8634                 dpll |= (crtc_state->pixel_multiplier - 1)
8635                         << SDVO_MULTIPLIER_SHIFT_HIRES;
8636         }
8637
8638         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
8639             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
8640                 dpll |= DPLL_SDVO_HIGH_SPEED;
8641
8642         if (intel_crtc_has_dp_encoder(crtc_state))
8643                 dpll |= DPLL_SDVO_HIGH_SPEED;
8644
8645         /* compute bitmask from p1 value */
8646         if (IS_PINEVIEW(dev_priv))
8647                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
8648         else {
8649                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8650                 if (IS_G4X(dev_priv) && reduced_clock)
8651                         dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
8652         }
8653         switch (clock->p2) {
8654         case 5:
8655                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
8656                 break;
8657         case 7:
8658                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
8659                 break;
8660         case 10:
8661                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
8662                 break;
8663         case 14:
8664                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
8665                 break;
8666         }
8667         if (INTEL_GEN(dev_priv) >= 4)
8668                 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
8669
8670         if (crtc_state->sdvo_tv_clock)
8671                 dpll |= PLL_REF_INPUT_TVCLKINBC;
8672         else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
8673                  intel_panel_use_ssc(dev_priv))
8674                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
8675         else
8676                 dpll |= PLL_REF_INPUT_DREFCLK;
8677
8678         dpll |= DPLL_VCO_ENABLE;
8679         crtc_state->dpll_hw_state.dpll = dpll;
8680
8681         if (INTEL_GEN(dev_priv) >= 4) {
8682                 u32 dpll_md = (crtc_state->pixel_multiplier - 1)
8683                         << DPLL_MD_UDI_MULTIPLIER_SHIFT;
8684                 crtc_state->dpll_hw_state.dpll_md = dpll_md;
8685         }
8686 }
8687
8688 static void i8xx_compute_dpll(struct intel_crtc *crtc,
8689                               struct intel_crtc_state *crtc_state,
8690                               struct dpll *reduced_clock)
8691 {
8692         struct drm_device *dev = crtc->base.dev;
8693         struct drm_i915_private *dev_priv = to_i915(dev);
8694         u32 dpll;
8695         struct dpll *clock = &crtc_state->dpll;
8696
8697         i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
8698
8699         dpll = DPLL_VGA_MODE_DIS;
8700
8701         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8702                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8703         } else {
8704                 if (clock->p1 == 2)
8705                         dpll |= PLL_P1_DIVIDE_BY_TWO;
8706                 else
8707                         dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8708                 if (clock->p2 == 4)
8709                         dpll |= PLL_P2_DIVIDE_BY_4;
8710         }
8711
8712         /*
8713          * Bspec:
8714          * "[Almador Errata}: For the correct operation of the muxed DVO pins
8715          *  (GDEVSELB/I2Cdata, GIRDBY/I2CClk) and (GFRAMEB/DVI_Data,
8716          *  GTRDYB/DVI_Clk): Bit 31 (DPLL VCO Enable) and Bit 30 (2X Clock
8717          *  Enable) must be set to “1” in both the DPLL A Control Register
8718          *  (06014h-06017h) and DPLL B Control Register (06018h-0601Bh)."
8719          *
8720          * For simplicity We simply keep both bits always enabled in
8721          * both DPLLS. The spec says we should disable the DVO 2X clock
8722          * when not needed, but this seems to work fine in practice.
8723          */
8724         if (IS_I830(dev_priv) ||
8725             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
8726                 dpll |= DPLL_DVO_2X_MODE;
8727
8728         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
8729             intel_panel_use_ssc(dev_priv))
8730                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
8731         else
8732                 dpll |= PLL_REF_INPUT_DREFCLK;
8733
8734         dpll |= DPLL_VCO_ENABLE;
8735         crtc_state->dpll_hw_state.dpll = dpll;
8736 }
8737
8738 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state)
8739 {
8740         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8741         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8742         enum pipe pipe = crtc->pipe;
8743         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
8744         const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
8745         u32 crtc_vtotal, crtc_vblank_end;
8746         int vsyncshift = 0;
8747
8748         /* We need to be careful not to changed the adjusted mode, for otherwise
8749          * the hw state checker will get angry at the mismatch. */
8750         crtc_vtotal = adjusted_mode->crtc_vtotal;
8751         crtc_vblank_end = adjusted_mode->crtc_vblank_end;
8752
8753         if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
8754                 /* the chip adds 2 halflines automatically */
8755                 crtc_vtotal -= 1;
8756                 crtc_vblank_end -= 1;
8757
8758                 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
8759                         vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
8760                 else
8761                         vsyncshift = adjusted_mode->crtc_hsync_start -
8762                                 adjusted_mode->crtc_htotal / 2;
8763                 if (vsyncshift < 0)
8764                         vsyncshift += adjusted_mode->crtc_htotal;
8765         }
8766
8767         if (INTEL_GEN(dev_priv) > 3)
8768                 intel_de_write(dev_priv, VSYNCSHIFT(cpu_transcoder),
8769                                vsyncshift);
8770
8771         intel_de_write(dev_priv, HTOTAL(cpu_transcoder),
8772                        (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16));
8773         intel_de_write(dev_priv, HBLANK(cpu_transcoder),
8774                        (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
8775         intel_de_write(dev_priv, HSYNC(cpu_transcoder),
8776                        (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
8777
8778         intel_de_write(dev_priv, VTOTAL(cpu_transcoder),
8779                        (adjusted_mode->crtc_vdisplay - 1) | ((crtc_vtotal - 1) << 16));
8780         intel_de_write(dev_priv, VBLANK(cpu_transcoder),
8781                        (adjusted_mode->crtc_vblank_start - 1) | ((crtc_vblank_end - 1) << 16));
8782         intel_de_write(dev_priv, VSYNC(cpu_transcoder),
8783                        (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
8784
8785         /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
8786          * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
8787          * documented on the DDI_FUNC_CTL register description, EDP Input Select
8788          * bits. */
8789         if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
8790             (pipe == PIPE_B || pipe == PIPE_C))
8791                 intel_de_write(dev_priv, VTOTAL(pipe),
8792                                intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
8793
8794 }
8795
8796 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
8797 {
8798         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8799         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8800         enum pipe pipe = crtc->pipe;
8801
8802         /* pipesrc controls the size that is scaled from, which should
8803          * always be the user's requested size.
8804          */
8805         intel_de_write(dev_priv, PIPESRC(pipe),
8806                        ((crtc_state->pipe_src_w - 1) << 16) | (crtc_state->pipe_src_h - 1));
8807 }
8808
8809 static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
8810 {
8811         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
8812         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
8813
8814         if (IS_GEN(dev_priv, 2))
8815                 return false;
8816
8817         if (INTEL_GEN(dev_priv) >= 9 ||
8818             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
8819                 return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW;
8820         else
8821                 return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK;
8822 }
8823
8824 static void intel_get_pipe_timings(struct intel_crtc *crtc,
8825                                    struct intel_crtc_state *pipe_config)
8826 {
8827         struct drm_device *dev = crtc->base.dev;
8828         struct drm_i915_private *dev_priv = to_i915(dev);
8829         enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
8830         u32 tmp;
8831
8832         tmp = intel_de_read(dev_priv, HTOTAL(cpu_transcoder));
8833         pipe_config->hw.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
8834         pipe_config->hw.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
8835
8836         if (!transcoder_is_dsi(cpu_transcoder)) {
8837                 tmp = intel_de_read(dev_priv, HBLANK(cpu_transcoder));
8838                 pipe_config->hw.adjusted_mode.crtc_hblank_start =
8839                                                         (tmp & 0xffff) + 1;
8840                 pipe_config->hw.adjusted_mode.crtc_hblank_end =
8841                                                 ((tmp >> 16) & 0xffff) + 1;
8842         }
8843         tmp = intel_de_read(dev_priv, HSYNC(cpu_transcoder));
8844         pipe_config->hw.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
8845         pipe_config->hw.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
8846
8847         tmp = intel_de_read(dev_priv, VTOTAL(cpu_transcoder));
8848         pipe_config->hw.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
8849         pipe_config->hw.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
8850
8851         if (!transcoder_is_dsi(cpu_transcoder)) {
8852                 tmp = intel_de_read(dev_priv, VBLANK(cpu_transcoder));
8853                 pipe_config->hw.adjusted_mode.crtc_vblank_start =
8854                                                         (tmp & 0xffff) + 1;
8855                 pipe_config->hw.adjusted_mode.crtc_vblank_end =
8856                                                 ((tmp >> 16) & 0xffff) + 1;
8857         }
8858         tmp = intel_de_read(dev_priv, VSYNC(cpu_transcoder));
8859         pipe_config->hw.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
8860         pipe_config->hw.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
8861
8862         if (intel_pipe_is_interlaced(pipe_config)) {
8863                 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
8864                 pipe_config->hw.adjusted_mode.crtc_vtotal += 1;
8865                 pipe_config->hw.adjusted_mode.crtc_vblank_end += 1;
8866         }
8867 }
8868
8869 static void intel_get_pipe_src_size(struct intel_crtc *crtc,
8870                                     struct intel_crtc_state *pipe_config)
8871 {
8872         struct drm_device *dev = crtc->base.dev;
8873         struct drm_i915_private *dev_priv = to_i915(dev);
8874         u32 tmp;
8875
8876         tmp = intel_de_read(dev_priv, PIPESRC(crtc->pipe));
8877         pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
8878         pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
8879
8880         pipe_config->hw.mode.vdisplay = pipe_config->pipe_src_h;
8881         pipe_config->hw.mode.hdisplay = pipe_config->pipe_src_w;
8882 }
8883
8884 void intel_mode_from_pipe_config(struct drm_display_mode *mode,
8885                                  struct intel_crtc_state *pipe_config)
8886 {
8887         mode->hdisplay = pipe_config->hw.adjusted_mode.crtc_hdisplay;
8888         mode->htotal = pipe_config->hw.adjusted_mode.crtc_htotal;
8889         mode->hsync_start = pipe_config->hw.adjusted_mode.crtc_hsync_start;
8890         mode->hsync_end = pipe_config->hw.adjusted_mode.crtc_hsync_end;
8891
8892         mode->vdisplay = pipe_config->hw.adjusted_mode.crtc_vdisplay;
8893         mode->vtotal = pipe_config->hw.adjusted_mode.crtc_vtotal;
8894         mode->vsync_start = pipe_config->hw.adjusted_mode.crtc_vsync_start;
8895         mode->vsync_end = pipe_config->hw.adjusted_mode.crtc_vsync_end;
8896
8897         mode->flags = pipe_config->hw.adjusted_mode.flags;
8898         mode->type = DRM_MODE_TYPE_DRIVER;
8899
8900         mode->clock = pipe_config->hw.adjusted_mode.crtc_clock;
8901
8902         drm_mode_set_name(mode);
8903 }
8904
8905 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
8906 {
8907         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8908         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8909         u32 pipeconf;
8910
8911         pipeconf = 0;
8912
8913         /* we keep both pipes enabled on 830 */
8914         if (IS_I830(dev_priv))
8915                 pipeconf |= intel_de_read(dev_priv, PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
8916
8917         if (crtc_state->double_wide)
8918                 pipeconf |= PIPECONF_DOUBLE_WIDE;
8919
8920         /* only g4x and later have fancy bpc/dither controls */
8921         if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
8922             IS_CHERRYVIEW(dev_priv)) {
8923                 /* Bspec claims that we can't use dithering for 30bpp pipes. */
8924                 if (crtc_state->dither && crtc_state->pipe_bpp != 30)
8925                         pipeconf |= PIPECONF_DITHER_EN |
8926                                     PIPECONF_DITHER_TYPE_SP;
8927
8928                 switch (crtc_state->pipe_bpp) {
8929                 case 18:
8930                         pipeconf |= PIPECONF_6BPC;
8931                         break;
8932                 case 24:
8933                         pipeconf |= PIPECONF_8BPC;
8934                         break;
8935                 case 30:
8936                         pipeconf |= PIPECONF_10BPC;
8937                         break;
8938                 default:
8939                         /* Case prevented by intel_choose_pipe_bpp_dither. */
8940                         BUG();
8941                 }
8942         }
8943
8944         if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
8945                 if (INTEL_GEN(dev_priv) < 4 ||
8946                     intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
8947                         pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
8948                 else
8949                         pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
8950         } else {
8951                 pipeconf |= PIPECONF_PROGRESSIVE;
8952         }
8953
8954         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
8955              crtc_state->limited_color_range)
8956                 pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
8957
8958         pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
8959
8960         pipeconf |= PIPECONF_FRAME_START_DELAY(0);
8961
8962         intel_de_write(dev_priv, PIPECONF(crtc->pipe), pipeconf);
8963         intel_de_posting_read(dev_priv, PIPECONF(crtc->pipe));
8964 }
8965
8966 static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
8967                                    struct intel_crtc_state *crtc_state)
8968 {
8969         struct drm_device *dev = crtc->base.dev;
8970         struct drm_i915_private *dev_priv = to_i915(dev);
8971         const struct intel_limit *limit;
8972         int refclk = 48000;
8973
8974         memset(&crtc_state->dpll_hw_state, 0,
8975                sizeof(crtc_state->dpll_hw_state));
8976
8977         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8978                 if (intel_panel_use_ssc(dev_priv)) {
8979                         refclk = dev_priv->vbt.lvds_ssc_freq;
8980                         drm_dbg_kms(&dev_priv->drm,
8981                                     "using SSC reference clock of %d kHz\n",
8982                                     refclk);
8983                 }
8984
8985                 limit = &intel_limits_i8xx_lvds;
8986         } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) {
8987                 limit = &intel_limits_i8xx_dvo;
8988         } else {
8989                 limit = &intel_limits_i8xx_dac;
8990         }
8991
8992         if (!crtc_state->clock_set &&
8993             !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8994                                  refclk, NULL, &crtc_state->dpll)) {
8995                 drm_err(&dev_priv->drm,
8996                         "Couldn't find PLL settings for mode!\n");
8997                 return -EINVAL;
8998         }
8999
9000         i8xx_compute_dpll(crtc, crtc_state, NULL);
9001
9002         return 0;
9003 }
9004
9005 static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
9006                                   struct intel_crtc_state *crtc_state)
9007 {
9008         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9009         const struct intel_limit *limit;
9010         int refclk = 96000;
9011
9012         memset(&crtc_state->dpll_hw_state, 0,
9013                sizeof(crtc_state->dpll_hw_state));
9014
9015         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
9016                 if (intel_panel_use_ssc(dev_priv)) {
9017                         refclk = dev_priv->vbt.lvds_ssc_freq;
9018                         drm_dbg_kms(&dev_priv->drm,
9019                                     "using SSC reference clock of %d kHz\n",
9020                                     refclk);
9021                 }
9022
9023                 if (intel_is_dual_link_lvds(dev_priv))
9024                         limit = &intel_limits_g4x_dual_channel_lvds;
9025                 else
9026                         limit = &intel_limits_g4x_single_channel_lvds;
9027         } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
9028                    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
9029                 limit = &intel_limits_g4x_hdmi;
9030         } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) {
9031                 limit = &intel_limits_g4x_sdvo;
9032         } else {
9033                 /* The option is for other outputs */
9034                 limit = &intel_limits_i9xx_sdvo;
9035         }
9036
9037         if (!crtc_state->clock_set &&
9038             !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
9039                                 refclk, NULL, &crtc_state->dpll)) {
9040                 drm_err(&dev_priv->drm,
9041                         "Couldn't find PLL settings for mode!\n");
9042                 return -EINVAL;
9043         }
9044
9045         i9xx_compute_dpll(crtc, crtc_state, NULL);
9046
9047         return 0;
9048 }
9049
9050 static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
9051                                   struct intel_crtc_state *crtc_state)
9052 {
9053         struct drm_device *dev = crtc->base.dev;
9054         struct drm_i915_private *dev_priv = to_i915(dev);
9055         const struct intel_limit *limit;
9056         int refclk = 96000;
9057
9058         memset(&crtc_state->dpll_hw_state, 0,
9059                sizeof(crtc_state->dpll_hw_state));
9060
9061         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
9062                 if (intel_panel_use_ssc(dev_priv)) {
9063                         refclk = dev_priv->vbt.lvds_ssc_freq;
9064                         drm_dbg_kms(&dev_priv->drm,
9065                                     "using SSC reference clock of %d kHz\n",
9066                                     refclk);
9067                 }
9068
9069                 limit = &pnv_limits_lvds;
9070         } else {
9071                 limit = &pnv_limits_sdvo;
9072         }
9073
9074         if (!crtc_state->clock_set &&
9075             !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
9076                                 refclk, NULL, &crtc_state->dpll)) {
9077                 drm_err(&dev_priv->drm,
9078                         "Couldn't find PLL settings for mode!\n");
9079                 return -EINVAL;
9080         }
9081
9082         i9xx_compute_dpll(crtc, crtc_state, NULL);
9083
9084         return 0;
9085 }
9086
9087 static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
9088                                    struct intel_crtc_state *crtc_state)
9089 {
9090         struct drm_device *dev = crtc->base.dev;
9091         struct drm_i915_private *dev_priv = to_i915(dev);
9092         const struct intel_limit *limit;
9093         int refclk = 96000;
9094
9095         memset(&crtc_state->dpll_hw_state, 0,
9096                sizeof(crtc_state->dpll_hw_state));
9097
9098         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
9099                 if (intel_panel_use_ssc(dev_priv)) {
9100                         refclk = dev_priv->vbt.lvds_ssc_freq;
9101                         drm_dbg_kms(&dev_priv->drm,
9102                                     "using SSC reference clock of %d kHz\n",
9103                                     refclk);
9104                 }
9105
9106                 limit = &intel_limits_i9xx_lvds;
9107         } else {
9108                 limit = &intel_limits_i9xx_sdvo;
9109         }
9110
9111         if (!crtc_state->clock_set &&
9112             !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
9113                                  refclk, NULL, &crtc_state->dpll)) {
9114                 drm_err(&dev_priv->drm,
9115                         "Couldn't find PLL settings for mode!\n");
9116                 return -EINVAL;
9117         }
9118
9119         i9xx_compute_dpll(crtc, crtc_state, NULL);
9120
9121         return 0;
9122 }
9123
9124 static int chv_crtc_compute_clock(struct intel_crtc *crtc,
9125                                   struct intel_crtc_state *crtc_state)
9126 {
9127         int refclk = 100000;
9128         const struct intel_limit *limit = &intel_limits_chv;
9129         struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
9130
9131         memset(&crtc_state->dpll_hw_state, 0,
9132                sizeof(crtc_state->dpll_hw_state));
9133
9134         if (!crtc_state->clock_set &&
9135             !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
9136                                 refclk, NULL, &crtc_state->dpll)) {
9137                 drm_err(&i915->drm, "Couldn't find PLL settings for mode!\n");
9138                 return -EINVAL;
9139         }
9140
9141         chv_compute_dpll(crtc, crtc_state);
9142
9143         return 0;
9144 }
9145
9146 static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
9147                                   struct intel_crtc_state *crtc_state)
9148 {
9149         int refclk = 100000;
9150         const struct intel_limit *limit = &intel_limits_vlv;
9151         struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
9152
9153         memset(&crtc_state->dpll_hw_state, 0,
9154                sizeof(crtc_state->dpll_hw_state));
9155
9156         if (!crtc_state->clock_set &&
9157             !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
9158                                 refclk, NULL, &crtc_state->dpll)) {
9159                 drm_err(&i915->drm,  "Couldn't find PLL settings for mode!\n");
9160                 return -EINVAL;
9161         }
9162
9163         vlv_compute_dpll(crtc, crtc_state);
9164
9165         return 0;
9166 }
9167
9168 static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
9169 {
9170         if (IS_I830(dev_priv))
9171                 return false;
9172
9173         return INTEL_GEN(dev_priv) >= 4 ||
9174                 IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
9175 }
9176
9177 static void i9xx_get_pfit_config(struct intel_crtc_state *crtc_state)
9178 {
9179         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
9180         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9181         u32 tmp;
9182
9183         if (!i9xx_has_pfit(dev_priv))
9184                 return;
9185
9186         tmp = intel_de_read(dev_priv, PFIT_CONTROL);
9187         if (!(tmp & PFIT_ENABLE))
9188                 return;
9189
9190         /* Check whether the pfit is attached to our pipe. */
9191         if (INTEL_GEN(dev_priv) < 4) {
9192                 if (crtc->pipe != PIPE_B)
9193                         return;
9194         } else {
9195                 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
9196                         return;
9197         }
9198
9199         crtc_state->gmch_pfit.control = tmp;
9200         crtc_state->gmch_pfit.pgm_ratios =
9201                 intel_de_read(dev_priv, PFIT_PGM_RATIOS);
9202 }
9203
9204 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
9205                                struct intel_crtc_state *pipe_config)
9206 {
9207         struct drm_device *dev = crtc->base.dev;
9208         struct drm_i915_private *dev_priv = to_i915(dev);
9209         enum pipe pipe = crtc->pipe;
9210         struct dpll clock;
9211         u32 mdiv;
9212         int refclk = 100000;
9213
9214         /* In case of DSI, DPLL will not be used */
9215         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
9216                 return;
9217
9218         vlv_dpio_get(dev_priv);
9219         mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
9220         vlv_dpio_put(dev_priv);
9221
9222         clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
9223         clock.m2 = mdiv & DPIO_M2DIV_MASK;
9224         clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
9225         clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
9226         clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
9227
9228         pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
9229 }
9230
9231 static void
9232 i9xx_get_initial_plane_config(struct intel_crtc *crtc,
9233                               struct intel_initial_plane_config *plane_config)
9234 {
9235         struct drm_device *dev = crtc->base.dev;
9236         struct drm_i915_private *dev_priv = to_i915(dev);
9237         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
9238         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
9239         enum pipe pipe;
9240         u32 val, base, offset;
9241         int fourcc, pixel_format;
9242         unsigned int aligned_height;
9243         struct drm_framebuffer *fb;
9244         struct intel_framebuffer *intel_fb;
9245
9246         if (!plane->get_hw_state(plane, &pipe))
9247                 return;
9248
9249         drm_WARN_ON(dev, pipe != crtc->pipe);
9250
9251         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
9252         if (!intel_fb) {
9253                 drm_dbg_kms(&dev_priv->drm, "failed to alloc fb\n");
9254                 return;
9255         }
9256
9257         fb = &intel_fb->base;
9258
9259         fb->dev = dev;
9260
9261         val = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
9262
9263         if (INTEL_GEN(dev_priv) >= 4) {
9264                 if (val & DISPPLANE_TILED) {
9265                         plane_config->tiling = I915_TILING_X;
9266                         fb->modifier = I915_FORMAT_MOD_X_TILED;
9267                 }
9268
9269                 if (val & DISPPLANE_ROTATE_180)
9270                         plane_config->rotation = DRM_MODE_ROTATE_180;
9271         }
9272
9273         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B &&
9274             val & DISPPLANE_MIRROR)
9275                 plane_config->rotation |= DRM_MODE_REFLECT_X;
9276
9277         pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
9278         fourcc = i9xx_format_to_fourcc(pixel_format);
9279         fb->format = drm_format_info(fourcc);
9280
9281         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
9282                 offset = intel_de_read(dev_priv, DSPOFFSET(i9xx_plane));
9283                 base = intel_de_read(dev_priv, DSPSURF(i9xx_plane)) & 0xfffff000;
9284         } else if (INTEL_GEN(dev_priv) >= 4) {
9285                 if (plane_config->tiling)
9286                         offset = intel_de_read(dev_priv,
9287                                                DSPTILEOFF(i9xx_plane));
9288                 else
9289                         offset = intel_de_read(dev_priv,
9290                                                DSPLINOFF(i9xx_plane));
9291                 base = intel_de_read(dev_priv, DSPSURF(i9xx_plane)) & 0xfffff000;
9292         } else {
9293                 base = intel_de_read(dev_priv, DSPADDR(i9xx_plane));
9294         }
9295         plane_config->base = base;
9296
9297         val = intel_de_read(dev_priv, PIPESRC(pipe));
9298         fb->width = ((val >> 16) & 0xfff) + 1;
9299         fb->height = ((val >> 0) & 0xfff) + 1;
9300
9301         val = intel_de_read(dev_priv, DSPSTRIDE(i9xx_plane));
9302         fb->pitches[0] = val & 0xffffffc0;
9303
9304         aligned_height = intel_fb_align_height(fb, 0, fb->height);
9305
9306         plane_config->size = fb->pitches[0] * aligned_height;
9307
9308         drm_dbg_kms(&dev_priv->drm,
9309                     "%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
9310                     crtc->base.name, plane->base.name, fb->width, fb->height,
9311                     fb->format->cpp[0] * 8, base, fb->pitches[0],
9312                     plane_config->size);
9313
9314         plane_config->fb = intel_fb;
9315 }
9316
9317 static void chv_crtc_clock_get(struct intel_crtc *crtc,
9318                                struct intel_crtc_state *pipe_config)
9319 {
9320         struct drm_device *dev = crtc->base.dev;
9321         struct drm_i915_private *dev_priv = to_i915(dev);
9322         enum pipe pipe = crtc->pipe;
9323         enum dpio_channel port = vlv_pipe_to_channel(pipe);
9324         struct dpll clock;
9325         u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
9326         int refclk = 100000;
9327
9328         /* In case of DSI, DPLL will not be used */
9329         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
9330                 return;
9331
9332         vlv_dpio_get(dev_priv);
9333         cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
9334         pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
9335         pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
9336         pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
9337         pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
9338         vlv_dpio_put(dev_priv);
9339
9340         clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
9341         clock.m2 = (pll_dw0 & 0xff) << 22;
9342         if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
9343                 clock.m2 |= pll_dw2 & 0x3fffff;
9344         clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
9345         clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
9346         clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
9347
9348         pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
9349 }
9350
9351 static enum intel_output_format
9352 bdw_get_pipemisc_output_format(struct intel_crtc *crtc)
9353 {
9354         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9355         u32 tmp;
9356
9357         tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
9358
9359         if (tmp & PIPEMISC_YUV420_ENABLE) {
9360                 /* We support 4:2:0 in full blend mode only */
9361                 drm_WARN_ON(&dev_priv->drm,
9362                             (tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0);
9363
9364                 return INTEL_OUTPUT_FORMAT_YCBCR420;
9365         } else if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
9366                 return INTEL_OUTPUT_FORMAT_YCBCR444;
9367         } else {
9368                 return INTEL_OUTPUT_FORMAT_RGB;
9369         }
9370 }
9371
9372 static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
9373 {
9374         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
9375         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
9376         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9377         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
9378         u32 tmp;
9379
9380         tmp = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
9381
9382         if (tmp & DISPPLANE_GAMMA_ENABLE)
9383                 crtc_state->gamma_enable = true;
9384
9385         if (!HAS_GMCH(dev_priv) &&
9386             tmp & DISPPLANE_PIPE_CSC_ENABLE)
9387                 crtc_state->csc_enable = true;
9388 }
9389
9390 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
9391                                  struct intel_crtc_state *pipe_config)
9392 {
9393         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9394         enum intel_display_power_domain power_domain;
9395         intel_wakeref_t wakeref;
9396         u32 tmp;
9397         bool ret;
9398
9399         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
9400         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
9401         if (!wakeref)
9402                 return false;
9403
9404         pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
9405         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9406         pipe_config->shared_dpll = NULL;
9407
9408         ret = false;
9409
9410         tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
9411         if (!(tmp & PIPECONF_ENABLE))
9412                 goto out;
9413
9414         if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
9415             IS_CHERRYVIEW(dev_priv)) {
9416                 switch (tmp & PIPECONF_BPC_MASK) {
9417                 case PIPECONF_6BPC:
9418                         pipe_config->pipe_bpp = 18;
9419                         break;
9420                 case PIPECONF_8BPC:
9421                         pipe_config->pipe_bpp = 24;
9422                         break;
9423                 case PIPECONF_10BPC:
9424                         pipe_config->pipe_bpp = 30;
9425                         break;
9426                 default:
9427                         break;
9428                 }
9429         }
9430
9431         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
9432             (tmp & PIPECONF_COLOR_RANGE_SELECT))
9433                 pipe_config->limited_color_range = true;
9434
9435         pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_I9XX) >>
9436                 PIPECONF_GAMMA_MODE_SHIFT;
9437
9438         if (IS_CHERRYVIEW(dev_priv))
9439                 pipe_config->cgm_mode = intel_de_read(dev_priv,
9440                                                       CGM_PIPE_MODE(crtc->pipe));
9441
9442         i9xx_get_pipe_color_config(pipe_config);
9443         intel_color_get_config(pipe_config);
9444
9445         if (INTEL_GEN(dev_priv) < 4)
9446                 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
9447
9448         intel_get_pipe_timings(crtc, pipe_config);
9449         intel_get_pipe_src_size(crtc, pipe_config);
9450
9451         i9xx_get_pfit_config(pipe_config);
9452
9453         if (INTEL_GEN(dev_priv) >= 4) {
9454                 /* No way to read it out on pipes B and C */
9455                 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
9456                         tmp = dev_priv->chv_dpll_md[crtc->pipe];
9457                 else
9458                         tmp = intel_de_read(dev_priv, DPLL_MD(crtc->pipe));
9459                 pipe_config->pixel_multiplier =
9460                         ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
9461                          >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
9462                 pipe_config->dpll_hw_state.dpll_md = tmp;
9463         } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
9464                    IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
9465                 tmp = intel_de_read(dev_priv, DPLL(crtc->pipe));
9466                 pipe_config->pixel_multiplier =
9467                         ((tmp & SDVO_MULTIPLIER_MASK)
9468                          >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
9469         } else {
9470                 /* Note that on i915G/GM the pixel multiplier is in the sdvo
9471                  * port and will be fixed up in the encoder->get_config
9472                  * function. */
9473                 pipe_config->pixel_multiplier = 1;
9474         }
9475         pipe_config->dpll_hw_state.dpll = intel_de_read(dev_priv,
9476                                                         DPLL(crtc->pipe));
9477         if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
9478                 pipe_config->dpll_hw_state.fp0 = intel_de_read(dev_priv,
9479                                                                FP0(crtc->pipe));
9480                 pipe_config->dpll_hw_state.fp1 = intel_de_read(dev_priv,
9481                                                                FP1(crtc->pipe));
9482         } else {
9483                 /* Mask out read-only status bits. */
9484                 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
9485                                                      DPLL_PORTC_READY_MASK |
9486                                                      DPLL_PORTB_READY_MASK);
9487         }
9488
9489         if (IS_CHERRYVIEW(dev_priv))
9490                 chv_crtc_clock_get(crtc, pipe_config);
9491         else if (IS_VALLEYVIEW(dev_priv))
9492                 vlv_crtc_clock_get(crtc, pipe_config);
9493         else
9494                 i9xx_crtc_clock_get(crtc, pipe_config);
9495
9496         /*
9497          * Normally the dotclock is filled in by the encoder .get_config()
9498          * but in case the pipe is enabled w/o any ports we need a sane
9499          * default.
9500          */
9501         pipe_config->hw.adjusted_mode.crtc_clock =
9502                 pipe_config->port_clock / pipe_config->pixel_multiplier;
9503
9504         ret = true;
9505
9506 out:
9507         intel_display_power_put(dev_priv, power_domain, wakeref);
9508
9509         return ret;
9510 }
9511
9512 static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
9513 {
9514         struct intel_encoder *encoder;
9515         int i;
9516         u32 val, final;
9517         bool has_lvds = false;
9518         bool has_cpu_edp = false;
9519         bool has_panel = false;
9520         bool has_ck505 = false;
9521         bool can_ssc = false;
9522         bool using_ssc_source = false;
9523
9524         /* We need to take the global config into account */
9525         for_each_intel_encoder(&dev_priv->drm, encoder) {
9526                 switch (encoder->type) {
9527                 case INTEL_OUTPUT_LVDS:
9528                         has_panel = true;
9529                         has_lvds = true;
9530                         break;
9531                 case INTEL_OUTPUT_EDP:
9532                         has_panel = true;
9533                         if (encoder->port == PORT_A)
9534                                 has_cpu_edp = true;
9535                         break;
9536                 default:
9537                         break;
9538                 }
9539         }
9540
9541         if (HAS_PCH_IBX(dev_priv)) {
9542                 has_ck505 = dev_priv->vbt.display_clock_mode;
9543                 can_ssc = has_ck505;
9544         } else {
9545                 has_ck505 = false;
9546                 can_ssc = true;
9547         }
9548
9549         /* Check if any DPLLs are using the SSC source */
9550         for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
9551                 u32 temp = intel_de_read(dev_priv, PCH_DPLL(i));
9552
9553                 if (!(temp & DPLL_VCO_ENABLE))
9554                         continue;
9555
9556                 if ((temp & PLL_REF_INPUT_MASK) ==
9557                     PLLB_REF_INPUT_SPREADSPECTRUMIN) {
9558                         using_ssc_source = true;
9559                         break;
9560                 }
9561         }
9562
9563         drm_dbg_kms(&dev_priv->drm,
9564                     "has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
9565                     has_panel, has_lvds, has_ck505, using_ssc_source);
9566
9567         /* Ironlake: try to setup display ref clock before DPLL
9568          * enabling. This is only under driver's control after
9569          * PCH B stepping, previous chipset stepping should be
9570          * ignoring this setting.
9571          */
9572         val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
9573
9574         /* As we must carefully and slowly disable/enable each source in turn,
9575          * compute the final state we want first and check if we need to
9576          * make any changes at all.
9577          */
9578         final = val;
9579         final &= ~DREF_NONSPREAD_SOURCE_MASK;
9580         if (has_ck505)
9581                 final |= DREF_NONSPREAD_CK505_ENABLE;
9582         else
9583                 final |= DREF_NONSPREAD_SOURCE_ENABLE;
9584
9585         final &= ~DREF_SSC_SOURCE_MASK;
9586         final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
9587         final &= ~DREF_SSC1_ENABLE;
9588
9589         if (has_panel) {
9590                 final |= DREF_SSC_SOURCE_ENABLE;
9591
9592                 if (intel_panel_use_ssc(dev_priv) && can_ssc)
9593                         final |= DREF_SSC1_ENABLE;
9594
9595                 if (has_cpu_edp) {
9596                         if (intel_panel_use_ssc(dev_priv) && can_ssc)
9597                                 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
9598                         else
9599                                 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
9600                 } else
9601                         final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
9602         } else if (using_ssc_source) {
9603                 final |= DREF_SSC_SOURCE_ENABLE;
9604                 final |= DREF_SSC1_ENABLE;
9605         }
9606
9607         if (final == val)
9608                 return;
9609
9610         /* Always enable nonspread source */
9611         val &= ~DREF_NONSPREAD_SOURCE_MASK;
9612
9613         if (has_ck505)
9614                 val |= DREF_NONSPREAD_CK505_ENABLE;
9615         else
9616                 val |= DREF_NONSPREAD_SOURCE_ENABLE;
9617
9618         if (has_panel) {
9619                 val &= ~DREF_SSC_SOURCE_MASK;
9620                 val |= DREF_SSC_SOURCE_ENABLE;
9621
9622                 /* SSC must be turned on before enabling the CPU output  */
9623                 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
9624                         drm_dbg_kms(&dev_priv->drm, "Using SSC on panel\n");
9625                         val |= DREF_SSC1_ENABLE;
9626                 } else
9627                         val &= ~DREF_SSC1_ENABLE;
9628
9629                 /* Get SSC going before enabling the outputs */
9630                 intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
9631                 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
9632                 udelay(200);
9633
9634                 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
9635
9636                 /* Enable CPU source on CPU attached eDP */
9637                 if (has_cpu_edp) {
9638                         if (intel_panel_use_ssc(dev_priv) && can_ssc) {
9639                                 drm_dbg_kms(&dev_priv->drm,
9640                                             "Using SSC on eDP\n");
9641                                 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
9642                         } else
9643                                 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
9644                 } else
9645                         val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
9646
9647                 intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
9648                 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
9649                 udelay(200);
9650         } else {
9651                 drm_dbg_kms(&dev_priv->drm, "Disabling CPU source output\n");
9652
9653                 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
9654
9655                 /* Turn off CPU output */
9656                 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
9657
9658                 intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
9659                 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
9660                 udelay(200);
9661
9662                 if (!using_ssc_source) {
9663                         drm_dbg_kms(&dev_priv->drm, "Disabling SSC source\n");
9664
9665                         /* Turn off the SSC source */
9666                         val &= ~DREF_SSC_SOURCE_MASK;
9667                         val |= DREF_SSC_SOURCE_DISABLE;
9668
9669                         /* Turn off SSC1 */
9670                         val &= ~DREF_SSC1_ENABLE;
9671
9672                         intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
9673                         intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
9674                         udelay(200);
9675                 }
9676         }
9677
9678         BUG_ON(val != final);
9679 }
9680
9681 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
9682 {
9683         u32 tmp;
9684
9685         tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
9686         tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
9687         intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
9688
9689         if (wait_for_us(intel_de_read(dev_priv, SOUTH_CHICKEN2) &
9690                         FDI_MPHY_IOSFSB_RESET_STATUS, 100))
9691                 drm_err(&dev_priv->drm, "FDI mPHY reset assert timeout\n");
9692
9693         tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
9694         tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
9695         intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
9696
9697         if (wait_for_us((intel_de_read(dev_priv, SOUTH_CHICKEN2) &
9698                          FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
9699                 drm_err(&dev_priv->drm, "FDI mPHY reset de-assert timeout\n");
9700 }
9701
9702 /* WaMPhyProgramming:hsw */
9703 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
9704 {
9705         u32 tmp;
9706
9707         tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
9708         tmp &= ~(0xFF << 24);
9709         tmp |= (0x12 << 24);
9710         intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
9711
9712         tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
9713         tmp |= (1 << 11);
9714         intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
9715
9716         tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
9717         tmp |= (1 << 11);
9718         intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
9719
9720         tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
9721         tmp |= (1 << 24) | (1 << 21) | (1 << 18);
9722         intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
9723
9724         tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
9725         tmp |= (1 << 24) | (1 << 21) | (1 << 18);
9726         intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
9727
9728         tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
9729         tmp &= ~(7 << 13);
9730         tmp |= (5 << 13);
9731         intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
9732
9733         tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
9734         tmp &= ~(7 << 13);
9735         tmp |= (5 << 13);
9736         intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
9737
9738         tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
9739         tmp &= ~0xFF;
9740         tmp |= 0x1C;
9741         intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
9742
9743         tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
9744         tmp &= ~0xFF;
9745         tmp |= 0x1C;
9746         intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
9747
9748         tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
9749         tmp &= ~(0xFF << 16);
9750         tmp |= (0x1C << 16);
9751         intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
9752
9753         tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
9754         tmp &= ~(0xFF << 16);
9755         tmp |= (0x1C << 16);
9756         intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
9757
9758         tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
9759         tmp |= (1 << 27);
9760         intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
9761
9762         tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
9763         tmp |= (1 << 27);
9764         intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
9765
9766         tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
9767         tmp &= ~(0xF << 28);
9768         tmp |= (4 << 28);
9769         intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
9770
9771         tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
9772         tmp &= ~(0xF << 28);
9773         tmp |= (4 << 28);
9774         intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
9775 }
9776
9777 /* Implements 3 different sequences from BSpec chapter "Display iCLK
9778  * Programming" based on the parameters passed:
9779  * - Sequence to enable CLKOUT_DP
9780  * - Sequence to enable CLKOUT_DP without spread
9781  * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
9782  */
9783 static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
9784                                  bool with_spread, bool with_fdi)
9785 {
9786         u32 reg, tmp;
9787
9788         if (drm_WARN(&dev_priv->drm, with_fdi && !with_spread,
9789                      "FDI requires downspread\n"))
9790                 with_spread = true;
9791         if (drm_WARN(&dev_priv->drm, HAS_PCH_LPT_LP(dev_priv) &&
9792                      with_fdi, "LP PCH doesn't have FDI\n"))
9793                 with_fdi = false;
9794
9795         mutex_lock(&dev_priv->sb_lock);
9796
9797         tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
9798         tmp &= ~SBI_SSCCTL_DISABLE;
9799         tmp |= SBI_SSCCTL_PATHALT;
9800         intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9801
9802         udelay(24);
9803
9804         if (with_spread) {
9805                 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
9806                 tmp &= ~SBI_SSCCTL_PATHALT;
9807                 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9808
9809                 if (with_fdi) {
9810                         lpt_reset_fdi_mphy(dev_priv);
9811                         lpt_program_fdi_mphy(dev_priv);
9812                 }
9813         }
9814
9815         reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
9816         tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
9817         tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
9818         intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
9819
9820         mutex_unlock(&dev_priv->sb_lock);
9821 }
9822
9823 /* Sequence to disable CLKOUT_DP */
9824 void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
9825 {
9826         u32 reg, tmp;
9827
9828         mutex_lock(&dev_priv->sb_lock);
9829
9830         reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
9831         tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
9832         tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
9833         intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
9834
9835         tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
9836         if (!(tmp & SBI_SSCCTL_DISABLE)) {
9837                 if (!(tmp & SBI_SSCCTL_PATHALT)) {
9838                         tmp |= SBI_SSCCTL_PATHALT;
9839                         intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9840                         udelay(32);
9841                 }
9842                 tmp |= SBI_SSCCTL_DISABLE;
9843                 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9844         }
9845
9846         mutex_unlock(&dev_priv->sb_lock);
9847 }
9848
9849 #define BEND_IDX(steps) ((50 + (steps)) / 5)
9850
9851 static const u16 sscdivintphase[] = {
9852         [BEND_IDX( 50)] = 0x3B23,
9853         [BEND_IDX( 45)] = 0x3B23,
9854         [BEND_IDX( 40)] = 0x3C23,
9855         [BEND_IDX( 35)] = 0x3C23,
9856         [BEND_IDX( 30)] = 0x3D23,
9857         [BEND_IDX( 25)] = 0x3D23,
9858         [BEND_IDX( 20)] = 0x3E23,
9859         [BEND_IDX( 15)] = 0x3E23,
9860         [BEND_IDX( 10)] = 0x3F23,
9861         [BEND_IDX(  5)] = 0x3F23,
9862         [BEND_IDX(  0)] = 0x0025,
9863         [BEND_IDX( -5)] = 0x0025,
9864         [BEND_IDX(-10)] = 0x0125,
9865         [BEND_IDX(-15)] = 0x0125,
9866         [BEND_IDX(-20)] = 0x0225,
9867         [BEND_IDX(-25)] = 0x0225,
9868         [BEND_IDX(-30)] = 0x0325,
9869         [BEND_IDX(-35)] = 0x0325,
9870         [BEND_IDX(-40)] = 0x0425,
9871         [BEND_IDX(-45)] = 0x0425,
9872         [BEND_IDX(-50)] = 0x0525,
9873 };
9874
9875 /*
9876  * Bend CLKOUT_DP
9877  * steps -50 to 50 inclusive, in steps of 5
9878  * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
9879  * change in clock period = -(steps / 10) * 5.787 ps
9880  */
9881 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
9882 {
9883         u32 tmp;
9884         int idx = BEND_IDX(steps);
9885
9886         if (drm_WARN_ON(&dev_priv->drm, steps % 5 != 0))
9887                 return;
9888
9889         if (drm_WARN_ON(&dev_priv->drm, idx >= ARRAY_SIZE(sscdivintphase)))
9890                 return;
9891
9892         mutex_lock(&dev_priv->sb_lock);
9893
9894         if (steps % 10 != 0)
9895                 tmp = 0xAAAAAAAB;
9896         else
9897                 tmp = 0x00000000;
9898         intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
9899
9900         tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
9901         tmp &= 0xffff0000;
9902         tmp |= sscdivintphase[idx];
9903         intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
9904
9905         mutex_unlock(&dev_priv->sb_lock);
9906 }
9907
9908 #undef BEND_IDX
9909
9910 static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv)
9911 {
9912         u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
9913         u32 ctl = intel_de_read(dev_priv, SPLL_CTL);
9914
9915         if ((ctl & SPLL_PLL_ENABLE) == 0)
9916                 return false;
9917
9918         if ((ctl & SPLL_REF_MASK) == SPLL_REF_MUXED_SSC &&
9919             (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
9920                 return true;
9921
9922         if (IS_BROADWELL(dev_priv) &&
9923             (ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW)
9924                 return true;
9925
9926         return false;
9927 }
9928
9929 static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
9930                                enum intel_dpll_id id)
9931 {
9932         u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
9933         u32 ctl = intel_de_read(dev_priv, WRPLL_CTL(id));
9934
9935         if ((ctl & WRPLL_PLL_ENABLE) == 0)
9936                 return false;
9937
9938         if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC)
9939                 return true;
9940
9941         if ((IS_BROADWELL(dev_priv) || IS_HSW_ULT(dev_priv)) &&
9942             (ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW &&
9943             (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
9944                 return true;
9945
9946         return false;
9947 }
9948
9949 static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
9950 {
9951         struct intel_encoder *encoder;
9952         bool has_fdi = false;
9953
9954         for_each_intel_encoder(&dev_priv->drm, encoder) {
9955                 switch (encoder->type) {
9956                 case INTEL_OUTPUT_ANALOG:
9957                         has_fdi = true;
9958                         break;
9959                 default:
9960                         break;
9961                 }
9962         }
9963
9964         /*
9965          * The BIOS may have decided to use the PCH SSC
9966          * reference so we must not disable it until the
9967          * relevant PLLs have stopped relying on it. We'll
9968          * just leave the PCH SSC reference enabled in case
9969          * any active PLL is using it. It will get disabled
9970          * after runtime suspend if we don't have FDI.
9971          *
9972          * TODO: Move the whole reference clock handling
9973          * to the modeset sequence proper so that we can
9974          * actually enable/disable/reconfigure these things
9975          * safely. To do that we need to introduce a real
9976          * clock hierarchy. That would also allow us to do
9977          * clock bending finally.
9978          */
9979         dev_priv->pch_ssc_use = 0;
9980
9981         if (spll_uses_pch_ssc(dev_priv)) {
9982                 drm_dbg_kms(&dev_priv->drm, "SPLL using PCH SSC\n");
9983                 dev_priv->pch_ssc_use |= BIT(DPLL_ID_SPLL);
9984         }
9985
9986         if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) {
9987                 drm_dbg_kms(&dev_priv->drm, "WRPLL1 using PCH SSC\n");
9988                 dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL1);
9989         }
9990
9991         if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) {
9992                 drm_dbg_kms(&dev_priv->drm, "WRPLL2 using PCH SSC\n");
9993                 dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL2);
9994         }
9995
9996         if (dev_priv->pch_ssc_use)
9997                 return;
9998
9999         if (has_fdi) {
10000                 lpt_bend_clkout_dp(dev_priv, 0);
10001                 lpt_enable_clkout_dp(dev_priv, true, true);
10002         } else {
10003                 lpt_disable_clkout_dp(dev_priv);
10004         }
10005 }
10006
10007 /*
10008  * Initialize reference clocks when the driver loads
10009  */
10010 void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
10011 {
10012         if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
10013                 ilk_init_pch_refclk(dev_priv);
10014         else if (HAS_PCH_LPT(dev_priv))
10015                 lpt_init_pch_refclk(dev_priv);
10016 }
10017
10018 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
10019 {
10020         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
10021         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10022         enum pipe pipe = crtc->pipe;
10023         u32 val;
10024
10025         val = 0;
10026
10027         switch (crtc_state->pipe_bpp) {
10028         case 18:
10029                 val |= PIPECONF_6BPC;
10030                 break;
10031         case 24:
10032                 val |= PIPECONF_8BPC;
10033                 break;
10034         case 30:
10035                 val |= PIPECONF_10BPC;
10036                 break;
10037         case 36:
10038                 val |= PIPECONF_12BPC;
10039                 break;
10040         default:
10041                 /* Case prevented by intel_choose_pipe_bpp_dither. */
10042                 BUG();
10043         }
10044
10045         if (crtc_state->dither)
10046                 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
10047
10048         if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
10049                 val |= PIPECONF_INTERLACED_ILK;
10050         else
10051                 val |= PIPECONF_PROGRESSIVE;
10052
10053         /*
10054          * This would end up with an odd purple hue over
10055          * the entire display. Make sure we don't do it.
10056          */
10057         drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range &&
10058                     crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
10059
10060         if (crtc_state->limited_color_range)
10061                 val |= PIPECONF_COLOR_RANGE_SELECT;
10062
10063         if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
10064                 val |= PIPECONF_OUTPUT_COLORSPACE_YUV709;
10065
10066         val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
10067
10068         val |= PIPECONF_FRAME_START_DELAY(0);
10069
10070         intel_de_write(dev_priv, PIPECONF(pipe), val);
10071         intel_de_posting_read(dev_priv, PIPECONF(pipe));
10072 }
10073
10074 static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state)
10075 {
10076         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
10077         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10078         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
10079         u32 val = 0;
10080
10081         if (IS_HASWELL(dev_priv) && crtc_state->dither)
10082                 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
10083
10084         if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
10085                 val |= PIPECONF_INTERLACED_ILK;
10086         else
10087                 val |= PIPECONF_PROGRESSIVE;
10088
10089         if (IS_HASWELL(dev_priv) &&
10090             crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
10091                 val |= PIPECONF_OUTPUT_COLORSPACE_YUV_HSW;
10092
10093         intel_de_write(dev_priv, PIPECONF(cpu_transcoder), val);
10094         intel_de_posting_read(dev_priv, PIPECONF(cpu_transcoder));
10095 }
10096
10097 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
10098 {
10099         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
10100         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10101         u32 val = 0;
10102
10103         switch (crtc_state->pipe_bpp) {
10104         case 18:
10105                 val |= PIPEMISC_DITHER_6_BPC;
10106                 break;
10107         case 24:
10108                 val |= PIPEMISC_DITHER_8_BPC;
10109                 break;
10110         case 30:
10111                 val |= PIPEMISC_DITHER_10_BPC;
10112                 break;
10113         case 36:
10114                 val |= PIPEMISC_DITHER_12_BPC;
10115                 break;
10116         default:
10117                 MISSING_CASE(crtc_state->pipe_bpp);
10118                 break;
10119         }
10120
10121         if (crtc_state->dither)
10122                 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
10123
10124         if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
10125             crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
10126                 val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
10127
10128         if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
10129                 val |= PIPEMISC_YUV420_ENABLE |
10130                         PIPEMISC_YUV420_MODE_FULL_BLEND;
10131
10132         if (INTEL_GEN(dev_priv) >= 11 &&
10133             (crtc_state->active_planes & ~(icl_hdr_plane_mask() |
10134                                            BIT(PLANE_CURSOR))) == 0)
10135                 val |= PIPEMISC_HDR_MODE_PRECISION;
10136
10137         if (INTEL_GEN(dev_priv) >= 12)
10138                 val |= PIPEMISC_PIXEL_ROUNDING_TRUNC;
10139
10140         intel_de_write(dev_priv, PIPEMISC(crtc->pipe), val);
10141 }
10142
10143 int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
10144 {
10145         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10146         u32 tmp;
10147
10148         tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
10149
10150         switch (tmp & PIPEMISC_DITHER_BPC_MASK) {
10151         case PIPEMISC_DITHER_6_BPC:
10152                 return 18;
10153         case PIPEMISC_DITHER_8_BPC:
10154                 return 24;
10155         case PIPEMISC_DITHER_10_BPC:
10156                 return 30;
10157         case PIPEMISC_DITHER_12_BPC:
10158                 return 36;
10159         default:
10160                 MISSING_CASE(tmp);
10161                 return 0;
10162         }
10163 }
10164
10165 int ilk_get_lanes_required(int target_clock, int link_bw, int bpp)
10166 {
10167         /*
10168          * Account for spread spectrum to avoid
10169          * oversubscribing the link. Max center spread
10170          * is 2.5%; use 5% for safety's sake.
10171          */
10172         u32 bps = target_clock * bpp * 21 / 20;
10173         return DIV_ROUND_UP(bps, link_bw * 8);
10174 }
10175
10176 static bool ilk_needs_fb_cb_tune(struct dpll *dpll, int factor)
10177 {
10178         return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
10179 }
10180
10181 static void ilk_compute_dpll(struct intel_crtc *crtc,
10182                              struct intel_crtc_state *crtc_state,
10183                              struct dpll *reduced_clock)
10184 {
10185         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10186         u32 dpll, fp, fp2;
10187         int factor;
10188
10189         /* Enable autotuning of the PLL clock (if permissible) */
10190         factor = 21;
10191         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
10192                 if ((intel_panel_use_ssc(dev_priv) &&
10193                      dev_priv->vbt.lvds_ssc_freq == 100000) ||
10194                     (HAS_PCH_IBX(dev_priv) &&
10195                      intel_is_dual_link_lvds(dev_priv)))
10196                         factor = 25;
10197         } else if (crtc_state->sdvo_tv_clock) {
10198                 factor = 20;
10199         }
10200
10201         fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
10202
10203         if (ilk_needs_fb_cb_tune(&crtc_state->dpll, factor))
10204                 fp |= FP_CB_TUNE;
10205
10206         if (reduced_clock) {
10207                 fp2 = i9xx_dpll_compute_fp(reduced_clock);
10208
10209                 if (reduced_clock->m < factor * reduced_clock->n)
10210                         fp2 |= FP_CB_TUNE;
10211         } else {
10212                 fp2 = fp;
10213         }
10214
10215         dpll = 0;
10216
10217         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
10218                 dpll |= DPLLB_MODE_LVDS;
10219         else
10220                 dpll |= DPLLB_MODE_DAC_SERIAL;
10221
10222         dpll |= (crtc_state->pixel_multiplier - 1)
10223                 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
10224
10225         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
10226             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
10227                 dpll |= DPLL_SDVO_HIGH_SPEED;
10228
10229         if (intel_crtc_has_dp_encoder(crtc_state))
10230                 dpll |= DPLL_SDVO_HIGH_SPEED;
10231
10232         /*
10233          * The high speed IO clock is only really required for
10234          * SDVO/HDMI/DP, but we also enable it for CRT to make it
10235          * possible to share the DPLL between CRT and HDMI. Enabling
10236          * the clock needlessly does no real harm, except use up a
10237          * bit of power potentially.
10238          *
10239          * We'll limit this to IVB with 3 pipes, since it has only two
10240          * DPLLs and so DPLL sharing is the only way to get three pipes
10241          * driving PCH ports at the same time. On SNB we could do this,
10242          * and potentially avoid enabling the second DPLL, but it's not
10243          * clear if it''s a win or loss power wise. No point in doing
10244          * this on ILK at all since it has a fixed DPLL<->pipe mapping.
10245          */
10246         if (INTEL_NUM_PIPES(dev_priv) == 3 &&
10247             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
10248                 dpll |= DPLL_SDVO_HIGH_SPEED;
10249
10250         /* compute bitmask from p1 value */
10251         dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
10252         /* also FPA1 */
10253         dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
10254
10255         switch (crtc_state->dpll.p2) {
10256         case 5:
10257                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
10258                 break;
10259         case 7:
10260                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
10261                 break;
10262         case 10:
10263                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
10264                 break;
10265         case 14:
10266                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
10267                 break;
10268         }
10269
10270         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
10271             intel_panel_use_ssc(dev_priv))
10272                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
10273         else
10274                 dpll |= PLL_REF_INPUT_DREFCLK;
10275
10276         dpll |= DPLL_VCO_ENABLE;
10277
10278         crtc_state->dpll_hw_state.dpll = dpll;
10279         crtc_state->dpll_hw_state.fp0 = fp;
10280         crtc_state->dpll_hw_state.fp1 = fp2;
10281 }
10282
10283 static int ilk_crtc_compute_clock(struct intel_crtc *crtc,
10284                                   struct intel_crtc_state *crtc_state)
10285 {
10286         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10287         struct intel_atomic_state *state =
10288                 to_intel_atomic_state(crtc_state->uapi.state);
10289         const struct intel_limit *limit;
10290         int refclk = 120000;
10291
10292         memset(&crtc_state->dpll_hw_state, 0,
10293                sizeof(crtc_state->dpll_hw_state));
10294
10295         /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
10296         if (!crtc_state->has_pch_encoder)
10297                 return 0;
10298
10299         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
10300                 if (intel_panel_use_ssc(dev_priv)) {
10301                         drm_dbg_kms(&dev_priv->drm,
10302                                     "using SSC reference clock of %d kHz\n",
10303                                     dev_priv->vbt.lvds_ssc_freq);
10304                         refclk = dev_priv->vbt.lvds_ssc_freq;
10305                 }
10306
10307                 if (intel_is_dual_link_lvds(dev_priv)) {
10308                         if (refclk == 100000)
10309                                 limit = &ilk_limits_dual_lvds_100m;
10310                         else
10311                                 limit = &ilk_limits_dual_lvds;
10312                 } else {
10313                         if (refclk == 100000)
10314                                 limit = &ilk_limits_single_lvds_100m;
10315                         else
10316                                 limit = &ilk_limits_single_lvds;
10317                 }
10318         } else {
10319                 limit = &ilk_limits_dac;
10320         }
10321
10322         if (!crtc_state->clock_set &&
10323             !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
10324                                 refclk, NULL, &crtc_state->dpll)) {
10325                 drm_err(&dev_priv->drm,
10326                         "Couldn't find PLL settings for mode!\n");
10327                 return -EINVAL;
10328         }
10329
10330         ilk_compute_dpll(crtc, crtc_state, NULL);
10331
10332         if (!intel_reserve_shared_dplls(state, crtc, NULL)) {
10333                 drm_dbg_kms(&dev_priv->drm,
10334                             "failed to find PLL for pipe %c\n",
10335                             pipe_name(crtc->pipe));
10336                 return -EINVAL;
10337         }
10338
10339         return 0;
10340 }
10341
10342 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
10343                                          struct intel_link_m_n *m_n)
10344 {
10345         struct drm_device *dev = crtc->base.dev;
10346         struct drm_i915_private *dev_priv = to_i915(dev);
10347         enum pipe pipe = crtc->pipe;
10348
10349         m_n->link_m = intel_de_read(dev_priv, PCH_TRANS_LINK_M1(pipe));
10350         m_n->link_n = intel_de_read(dev_priv, PCH_TRANS_LINK_N1(pipe));
10351         m_n->gmch_m = intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe))
10352                 & ~TU_SIZE_MASK;
10353         m_n->gmch_n = intel_de_read(dev_priv, PCH_TRANS_DATA_N1(pipe));
10354         m_n->tu = ((intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe))
10355                     & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
10356 }
10357
10358 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
10359                                          enum transcoder transcoder,
10360                                          struct intel_link_m_n *m_n,
10361                                          struct intel_link_m_n *m2_n2)
10362 {
10363         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10364         enum pipe pipe = crtc->pipe;
10365
10366         if (INTEL_GEN(dev_priv) >= 5) {
10367                 m_n->link_m = intel_de_read(dev_priv,
10368                                             PIPE_LINK_M1(transcoder));
10369                 m_n->link_n = intel_de_read(dev_priv,
10370                                             PIPE_LINK_N1(transcoder));
10371                 m_n->gmch_m = intel_de_read(dev_priv,
10372                                             PIPE_DATA_M1(transcoder))
10373                         & ~TU_SIZE_MASK;
10374                 m_n->gmch_n = intel_de_read(dev_priv,
10375                                             PIPE_DATA_N1(transcoder));
10376                 m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M1(transcoder))
10377                             & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
10378
10379                 if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) {
10380                         m2_n2->link_m = intel_de_read(dev_priv,
10381                                                       PIPE_LINK_M2(transcoder));
10382                         m2_n2->link_n = intel_de_read(dev_priv,
10383                                                              PIPE_LINK_N2(transcoder));
10384                         m2_n2->gmch_m = intel_de_read(dev_priv,
10385                                                              PIPE_DATA_M2(transcoder))
10386                                         & ~TU_SIZE_MASK;
10387                         m2_n2->gmch_n = intel_de_read(dev_priv,
10388                                                              PIPE_DATA_N2(transcoder));
10389                         m2_n2->tu = ((intel_de_read(dev_priv, PIPE_DATA_M2(transcoder))
10390                                         & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
10391                 }
10392         } else {
10393                 m_n->link_m = intel_de_read(dev_priv, PIPE_LINK_M_G4X(pipe));
10394                 m_n->link_n = intel_de_read(dev_priv, PIPE_LINK_N_G4X(pipe));
10395                 m_n->gmch_m = intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe))
10396                         & ~TU_SIZE_MASK;
10397                 m_n->gmch_n = intel_de_read(dev_priv, PIPE_DATA_N_G4X(pipe));
10398                 m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe))
10399                             & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
10400         }
10401 }
10402
10403 void intel_dp_get_m_n(struct intel_crtc *crtc,
10404                       struct intel_crtc_state *pipe_config)
10405 {
10406         if (pipe_config->has_pch_encoder)
10407                 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
10408         else
10409                 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
10410                                              &pipe_config->dp_m_n,
10411                                              &pipe_config->dp_m2_n2);
10412 }
10413
10414 static void ilk_get_fdi_m_n_config(struct intel_crtc *crtc,
10415                                    struct intel_crtc_state *pipe_config)
10416 {
10417         intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
10418                                      &pipe_config->fdi_m_n, NULL);
10419 }
10420
10421 static void ilk_get_pfit_pos_size(struct intel_crtc_state *crtc_state,
10422                                   u32 pos, u32 size)
10423 {
10424         drm_rect_init(&crtc_state->pch_pfit.dst,
10425                       pos >> 16, pos & 0xffff,
10426                       size >> 16, size & 0xffff);
10427 }
10428
10429 static void skl_get_pfit_config(struct intel_crtc_state *crtc_state)
10430 {
10431         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
10432         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10433         struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
10434         int id = -1;
10435         int i;
10436
10437         /* find scaler attached to this pipe */
10438         for (i = 0; i < crtc->num_scalers; i++) {
10439                 u32 ctl, pos, size;
10440
10441                 ctl = intel_de_read(dev_priv, SKL_PS_CTRL(crtc->pipe, i));
10442                 if ((ctl & (PS_SCALER_EN | PS_PLANE_SEL_MASK)) != PS_SCALER_EN)
10443                         continue;
10444
10445                 id = i;
10446                 crtc_state->pch_pfit.enabled = true;
10447
10448                 pos = intel_de_read(dev_priv, SKL_PS_WIN_POS(crtc->pipe, i));
10449                 size = intel_de_read(dev_priv, SKL_PS_WIN_SZ(crtc->pipe, i));
10450
10451                 ilk_get_pfit_pos_size(crtc_state, pos, size);
10452
10453                 scaler_state->scalers[i].in_use = true;
10454                 break;
10455         }
10456
10457         scaler_state->scaler_id = id;
10458         if (id >= 0)
10459                 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
10460         else
10461                 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
10462 }
10463
10464 static void
10465 skl_get_initial_plane_config(struct intel_crtc *crtc,
10466                              struct intel_initial_plane_config *plane_config)
10467 {
10468         struct drm_device *dev = crtc->base.dev;
10469         struct drm_i915_private *dev_priv = to_i915(dev);
10470         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
10471         enum plane_id plane_id = plane->id;
10472         enum pipe pipe;
10473         u32 val, base, offset, stride_mult, tiling, alpha;
10474         int fourcc, pixel_format;
10475         unsigned int aligned_height;
10476         struct drm_framebuffer *fb;
10477         struct intel_framebuffer *intel_fb;
10478
10479         if (!plane->get_hw_state(plane, &pipe))
10480                 return;
10481
10482         drm_WARN_ON(dev, pipe != crtc->pipe);
10483
10484         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
10485         if (!intel_fb) {
10486                 drm_dbg_kms(&dev_priv->drm, "failed to alloc fb\n");
10487                 return;
10488         }
10489
10490         fb = &intel_fb->base;
10491
10492         fb->dev = dev;
10493
10494         val = intel_de_read(dev_priv, PLANE_CTL(pipe, plane_id));
10495
10496         if (INTEL_GEN(dev_priv) >= 11)
10497                 pixel_format = val & ICL_PLANE_CTL_FORMAT_MASK;
10498         else
10499                 pixel_format = val & PLANE_CTL_FORMAT_MASK;
10500
10501         if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
10502                 alpha = intel_de_read(dev_priv,
10503                                       PLANE_COLOR_CTL(pipe, plane_id));
10504                 alpha &= PLANE_COLOR_ALPHA_MASK;
10505         } else {
10506                 alpha = val & PLANE_CTL_ALPHA_MASK;
10507         }
10508
10509         fourcc = skl_format_to_fourcc(pixel_format,
10510                                       val & PLANE_CTL_ORDER_RGBX, alpha);
10511         fb->format = drm_format_info(fourcc);
10512
10513         tiling = val & PLANE_CTL_TILED_MASK;
10514         switch (tiling) {
10515         case PLANE_CTL_TILED_LINEAR:
10516                 fb->modifier = DRM_FORMAT_MOD_LINEAR;
10517                 break;
10518         case PLANE_CTL_TILED_X:
10519                 plane_config->tiling = I915_TILING_X;
10520                 fb->modifier = I915_FORMAT_MOD_X_TILED;
10521                 break;
10522         case PLANE_CTL_TILED_Y:
10523                 plane_config->tiling = I915_TILING_Y;
10524                 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
10525                         fb->modifier = INTEL_GEN(dev_priv) >= 12 ?
10526                                 I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS :
10527                                 I915_FORMAT_MOD_Y_TILED_CCS;
10528                 else if (val & PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE)
10529                         fb->modifier = I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS;
10530                 else
10531                         fb->modifier = I915_FORMAT_MOD_Y_TILED;
10532                 break;
10533         case PLANE_CTL_TILED_YF:
10534                 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
10535                         fb->modifier = I915_FORMAT_MOD_Yf_TILED_CCS;
10536                 else
10537                         fb->modifier = I915_FORMAT_MOD_Yf_TILED;
10538                 break;
10539         default:
10540                 MISSING_CASE(tiling);
10541                 goto error;
10542         }
10543
10544         /*
10545          * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
10546          * while i915 HW rotation is clockwise, thats why this swapping.
10547          */
10548         switch (val & PLANE_CTL_ROTATE_MASK) {
10549         case PLANE_CTL_ROTATE_0:
10550                 plane_config->rotation = DRM_MODE_ROTATE_0;
10551                 break;
10552         case PLANE_CTL_ROTATE_90:
10553                 plane_config->rotation = DRM_MODE_ROTATE_270;
10554                 break;
10555         case PLANE_CTL_ROTATE_180:
10556                 plane_config->rotation = DRM_MODE_ROTATE_180;
10557                 break;
10558         case PLANE_CTL_ROTATE_270:
10559                 plane_config->rotation = DRM_MODE_ROTATE_90;
10560                 break;
10561         }
10562
10563         if (INTEL_GEN(dev_priv) >= 10 &&
10564             val & PLANE_CTL_FLIP_HORIZONTAL)
10565                 plane_config->rotation |= DRM_MODE_REFLECT_X;
10566
10567         base = intel_de_read(dev_priv, PLANE_SURF(pipe, plane_id)) & 0xfffff000;
10568         plane_config->base = base;
10569
10570         offset = intel_de_read(dev_priv, PLANE_OFFSET(pipe, plane_id));
10571
10572         val = intel_de_read(dev_priv, PLANE_SIZE(pipe, plane_id));
10573         fb->height = ((val >> 16) & 0xffff) + 1;
10574         fb->width = ((val >> 0) & 0xffff) + 1;
10575
10576         val = intel_de_read(dev_priv, PLANE_STRIDE(pipe, plane_id));
10577         stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0);
10578         fb->pitches[0] = (val & 0x3ff) * stride_mult;
10579
10580         aligned_height = intel_fb_align_height(fb, 0, fb->height);
10581
10582         plane_config->size = fb->pitches[0] * aligned_height;
10583
10584         drm_dbg_kms(&dev_priv->drm,
10585                     "%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
10586                     crtc->base.name, plane->base.name, fb->width, fb->height,
10587                     fb->format->cpp[0] * 8, base, fb->pitches[0],
10588                     plane_config->size);
10589
10590         plane_config->fb = intel_fb;
10591         return;
10592
10593 error:
10594         kfree(intel_fb);
10595 }
10596
10597 static void ilk_get_pfit_config(struct intel_crtc_state *crtc_state)
10598 {
10599         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
10600         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10601         u32 ctl, pos, size;
10602
10603         ctl = intel_de_read(dev_priv, PF_CTL(crtc->pipe));
10604         if ((ctl & PF_ENABLE) == 0)
10605                 return;
10606
10607         crtc_state->pch_pfit.enabled = true;
10608
10609         pos = intel_de_read(dev_priv, PF_WIN_POS(crtc->pipe));
10610         size = intel_de_read(dev_priv, PF_WIN_SZ(crtc->pipe));
10611
10612         ilk_get_pfit_pos_size(crtc_state, pos, size);
10613
10614         /*
10615          * We currently do not free assignements of panel fitters on
10616          * ivb/hsw (since we don't use the higher upscaling modes which
10617          * differentiates them) so just WARN about this case for now.
10618          */
10619         drm_WARN_ON(&dev_priv->drm, IS_GEN(dev_priv, 7) &&
10620                     (ctl & PF_PIPE_SEL_MASK_IVB) != PF_PIPE_SEL_IVB(crtc->pipe));
10621 }
10622
10623 static bool ilk_get_pipe_config(struct intel_crtc *crtc,
10624                                 struct intel_crtc_state *pipe_config)
10625 {
10626         struct drm_device *dev = crtc->base.dev;
10627         struct drm_i915_private *dev_priv = to_i915(dev);
10628         enum intel_display_power_domain power_domain;
10629         intel_wakeref_t wakeref;
10630         u32 tmp;
10631         bool ret;
10632
10633         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
10634         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
10635         if (!wakeref)
10636                 return false;
10637
10638         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
10639         pipe_config->shared_dpll = NULL;
10640
10641         ret = false;
10642         tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
10643         if (!(tmp & PIPECONF_ENABLE))
10644                 goto out;
10645
10646         switch (tmp & PIPECONF_BPC_MASK) {
10647         case PIPECONF_6BPC:
10648                 pipe_config->pipe_bpp = 18;
10649                 break;
10650         case PIPECONF_8BPC:
10651                 pipe_config->pipe_bpp = 24;
10652                 break;
10653         case PIPECONF_10BPC:
10654                 pipe_config->pipe_bpp = 30;
10655                 break;
10656         case PIPECONF_12BPC:
10657                 pipe_config->pipe_bpp = 36;
10658                 break;
10659         default:
10660                 break;
10661         }
10662
10663         if (tmp & PIPECONF_COLOR_RANGE_SELECT)
10664                 pipe_config->limited_color_range = true;
10665
10666         switch (tmp & PIPECONF_OUTPUT_COLORSPACE_MASK) {
10667         case PIPECONF_OUTPUT_COLORSPACE_YUV601:
10668         case PIPECONF_OUTPUT_COLORSPACE_YUV709:
10669                 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
10670                 break;
10671         default:
10672                 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
10673                 break;
10674         }
10675
10676         pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >>
10677                 PIPECONF_GAMMA_MODE_SHIFT;
10678
10679         pipe_config->csc_mode = intel_de_read(dev_priv,
10680                                               PIPE_CSC_MODE(crtc->pipe));
10681
10682         i9xx_get_pipe_color_config(pipe_config);
10683         intel_color_get_config(pipe_config);
10684
10685         if (intel_de_read(dev_priv, PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
10686                 struct intel_shared_dpll *pll;
10687                 enum intel_dpll_id pll_id;
10688
10689                 pipe_config->has_pch_encoder = true;
10690
10691                 tmp = intel_de_read(dev_priv, FDI_RX_CTL(crtc->pipe));
10692                 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
10693                                           FDI_DP_PORT_WIDTH_SHIFT) + 1;
10694
10695                 ilk_get_fdi_m_n_config(crtc, pipe_config);
10696
10697                 if (HAS_PCH_IBX(dev_priv)) {
10698                         /*
10699                          * The pipe->pch transcoder and pch transcoder->pll
10700                          * mapping is fixed.
10701                          */
10702                         pll_id = (enum intel_dpll_id) crtc->pipe;
10703                 } else {
10704                         tmp = intel_de_read(dev_priv, PCH_DPLL_SEL);
10705                         if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
10706                                 pll_id = DPLL_ID_PCH_PLL_B;
10707                         else
10708                                 pll_id= DPLL_ID_PCH_PLL_A;
10709                 }
10710
10711                 pipe_config->shared_dpll =
10712                         intel_get_shared_dpll_by_id(dev_priv, pll_id);
10713                 pll = pipe_config->shared_dpll;
10714
10715                 drm_WARN_ON(dev, !pll->info->funcs->get_hw_state(dev_priv, pll,
10716                                                  &pipe_config->dpll_hw_state));
10717
10718                 tmp = pipe_config->dpll_hw_state.dpll;
10719                 pipe_config->pixel_multiplier =
10720                         ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
10721                          >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
10722
10723                 ilk_pch_clock_get(crtc, pipe_config);
10724         } else {
10725                 pipe_config->pixel_multiplier = 1;
10726         }
10727
10728         intel_get_pipe_timings(crtc, pipe_config);
10729         intel_get_pipe_src_size(crtc, pipe_config);
10730
10731         ilk_get_pfit_config(pipe_config);
10732
10733         ret = true;
10734
10735 out:
10736         intel_display_power_put(dev_priv, power_domain, wakeref);
10737
10738         return ret;
10739 }
10740
10741 static int hsw_crtc_compute_clock(struct intel_crtc *crtc,
10742                                   struct intel_crtc_state *crtc_state)
10743 {
10744         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10745         struct intel_atomic_state *state =
10746                 to_intel_atomic_state(crtc_state->uapi.state);
10747
10748         if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) ||
10749             INTEL_GEN(dev_priv) >= 11) {
10750                 struct intel_encoder *encoder =
10751                         intel_get_crtc_new_encoder(state, crtc_state);
10752
10753                 if (!intel_reserve_shared_dplls(state, crtc, encoder)) {
10754                         drm_dbg_kms(&dev_priv->drm,
10755                                     "failed to find PLL for pipe %c\n",
10756                                     pipe_name(crtc->pipe));
10757                         return -EINVAL;
10758                 }
10759         }
10760
10761         return 0;
10762 }
10763
10764 static void cnl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
10765                             struct intel_crtc_state *pipe_config)
10766 {
10767         enum intel_dpll_id id;
10768         u32 temp;
10769
10770         temp = intel_de_read(dev_priv, DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
10771         id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
10772
10773         if (drm_WARN_ON(&dev_priv->drm, id < SKL_DPLL0 || id > SKL_DPLL2))
10774                 return;
10775
10776         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
10777 }
10778
10779 static void icl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
10780                             struct intel_crtc_state *pipe_config)
10781 {
10782         enum phy phy = intel_port_to_phy(dev_priv, port);
10783         enum icl_port_dpll_id port_dpll_id;
10784         enum intel_dpll_id id;
10785         u32 temp;
10786
10787         if (intel_phy_is_combo(dev_priv, phy)) {
10788                 temp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0) &
10789                         ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
10790                 id = temp >> ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy);
10791                 port_dpll_id = ICL_PORT_DPLL_DEFAULT;
10792         } else if (intel_phy_is_tc(dev_priv, phy)) {
10793                 u32 clk_sel = intel_de_read(dev_priv, DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK;
10794
10795                 if (clk_sel == DDI_CLK_SEL_MG) {
10796                         id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
10797                                                                     port));
10798                         port_dpll_id = ICL_PORT_DPLL_MG_PHY;
10799                 } else {
10800                         drm_WARN_ON(&dev_priv->drm,
10801                                     clk_sel < DDI_CLK_SEL_TBT_162);
10802                         id = DPLL_ID_ICL_TBTPLL;
10803                         port_dpll_id = ICL_PORT_DPLL_DEFAULT;
10804                 }
10805         } else {
10806                 drm_WARN(&dev_priv->drm, 1, "Invalid port %x\n", port);
10807                 return;
10808         }
10809
10810         pipe_config->icl_port_dplls[port_dpll_id].pll =
10811                 intel_get_shared_dpll_by_id(dev_priv, id);
10812
10813         icl_set_active_port_dpll(pipe_config, port_dpll_id);
10814 }
10815
10816 static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
10817                                 enum port port,
10818                                 struct intel_crtc_state *pipe_config)
10819 {
10820         enum intel_dpll_id id;
10821
10822         switch (port) {
10823         case PORT_A:
10824                 id = DPLL_ID_SKL_DPLL0;
10825                 break;
10826         case PORT_B:
10827                 id = DPLL_ID_SKL_DPLL1;
10828                 break;
10829         case PORT_C:
10830                 id = DPLL_ID_SKL_DPLL2;
10831                 break;
10832         default:
10833                 drm_err(&dev_priv->drm, "Incorrect port type\n");
10834                 return;
10835         }
10836
10837         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
10838 }
10839
10840 static void skl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
10841                             struct intel_crtc_state *pipe_config)
10842 {
10843         enum intel_dpll_id id;
10844         u32 temp;
10845
10846         temp = intel_de_read(dev_priv, DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
10847         id = temp >> (port * 3 + 1);
10848
10849         if (drm_WARN_ON(&dev_priv->drm, id < SKL_DPLL0 || id > SKL_DPLL3))
10850                 return;
10851
10852         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
10853 }
10854
10855 static void hsw_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
10856                             struct intel_crtc_state *pipe_config)
10857 {
10858         enum intel_dpll_id id;
10859         u32 ddi_pll_sel = intel_de_read(dev_priv, PORT_CLK_SEL(port));
10860
10861         switch (ddi_pll_sel) {
10862         case PORT_CLK_SEL_WRPLL1:
10863                 id = DPLL_ID_WRPLL1;
10864                 break;
10865         case PORT_CLK_SEL_WRPLL2:
10866                 id = DPLL_ID_WRPLL2;
10867                 break;
10868         case PORT_CLK_SEL_SPLL:
10869                 id = DPLL_ID_SPLL;
10870                 break;
10871         case PORT_CLK_SEL_LCPLL_810:
10872                 id = DPLL_ID_LCPLL_810;
10873                 break;
10874         case PORT_CLK_SEL_LCPLL_1350:
10875                 id = DPLL_ID_LCPLL_1350;
10876                 break;
10877         case PORT_CLK_SEL_LCPLL_2700:
10878                 id = DPLL_ID_LCPLL_2700;
10879                 break;
10880         default:
10881                 MISSING_CASE(ddi_pll_sel);
10882                 /* fall through */
10883         case PORT_CLK_SEL_NONE:
10884                 return;
10885         }
10886
10887         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
10888 }
10889
10890 static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
10891                                      struct intel_crtc_state *pipe_config,
10892                                      u64 *power_domain_mask,
10893                                      intel_wakeref_t *wakerefs)
10894 {
10895         struct drm_device *dev = crtc->base.dev;
10896         struct drm_i915_private *dev_priv = to_i915(dev);
10897         enum intel_display_power_domain power_domain;
10898         unsigned long panel_transcoder_mask = BIT(TRANSCODER_EDP);
10899         unsigned long enabled_panel_transcoders = 0;
10900         enum transcoder panel_transcoder;
10901         intel_wakeref_t wf;
10902         u32 tmp;
10903
10904         if (INTEL_GEN(dev_priv) >= 11)
10905                 panel_transcoder_mask |=
10906                         BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
10907
10908         /*
10909          * The pipe->transcoder mapping is fixed with the exception of the eDP
10910          * and DSI transcoders handled below.
10911          */
10912         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
10913
10914         /*
10915          * XXX: Do intel_display_power_get_if_enabled before reading this (for
10916          * consistency and less surprising code; it's in always on power).
10917          */
10918         for_each_cpu_transcoder_masked(dev_priv, panel_transcoder,
10919                                        panel_transcoder_mask) {
10920                 bool force_thru = false;
10921                 enum pipe trans_pipe;
10922
10923                 tmp = intel_de_read(dev_priv,
10924                                     TRANS_DDI_FUNC_CTL(panel_transcoder));
10925                 if (!(tmp & TRANS_DDI_FUNC_ENABLE))
10926                         continue;
10927
10928                 /*
10929                  * Log all enabled ones, only use the first one.
10930                  *
10931                  * FIXME: This won't work for two separate DSI displays.
10932                  */
10933                 enabled_panel_transcoders |= BIT(panel_transcoder);
10934                 if (enabled_panel_transcoders != BIT(panel_transcoder))
10935                         continue;
10936
10937                 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
10938                 default:
10939                         drm_WARN(dev, 1,
10940                                  "unknown pipe linked to transcoder %s\n",
10941                                  transcoder_name(panel_transcoder));
10942                         /* fall through */
10943                 case TRANS_DDI_EDP_INPUT_A_ONOFF:
10944                         force_thru = true;
10945                         /* fall through */
10946                 case TRANS_DDI_EDP_INPUT_A_ON:
10947                         trans_pipe = PIPE_A;
10948                         break;
10949                 case TRANS_DDI_EDP_INPUT_B_ONOFF:
10950                         trans_pipe = PIPE_B;
10951                         break;
10952                 case TRANS_DDI_EDP_INPUT_C_ONOFF:
10953                         trans_pipe = PIPE_C;
10954                         break;
10955                 case TRANS_DDI_EDP_INPUT_D_ONOFF:
10956                         trans_pipe = PIPE_D;
10957                         break;
10958                 }
10959
10960                 if (trans_pipe == crtc->pipe) {
10961                         pipe_config->cpu_transcoder = panel_transcoder;
10962                         pipe_config->pch_pfit.force_thru = force_thru;
10963                 }
10964         }
10965
10966         /*
10967          * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1
10968          */
10969         drm_WARN_ON(dev, (enabled_panel_transcoders & BIT(TRANSCODER_EDP)) &&
10970                     enabled_panel_transcoders != BIT(TRANSCODER_EDP));
10971
10972         power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
10973         drm_WARN_ON(dev, *power_domain_mask & BIT_ULL(power_domain));
10974
10975         wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
10976         if (!wf)
10977                 return false;
10978
10979         wakerefs[power_domain] = wf;
10980         *power_domain_mask |= BIT_ULL(power_domain);
10981
10982         tmp = intel_de_read(dev_priv, PIPECONF(pipe_config->cpu_transcoder));
10983
10984         return tmp & PIPECONF_ENABLE;
10985 }
10986
10987 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
10988                                          struct intel_crtc_state *pipe_config,
10989                                          u64 *power_domain_mask,
10990                                          intel_wakeref_t *wakerefs)
10991 {
10992         struct drm_device *dev = crtc->base.dev;
10993         struct drm_i915_private *dev_priv = to_i915(dev);
10994         enum intel_display_power_domain power_domain;
10995         enum transcoder cpu_transcoder;
10996         intel_wakeref_t wf;
10997         enum port port;
10998         u32 tmp;
10999
11000         for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
11001                 if (port == PORT_A)
11002                         cpu_transcoder = TRANSCODER_DSI_A;
11003                 else
11004                         cpu_transcoder = TRANSCODER_DSI_C;
11005
11006                 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
11007                 drm_WARN_ON(dev, *power_domain_mask & BIT_ULL(power_domain));
11008
11009                 wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
11010                 if (!wf)
11011                         continue;
11012
11013                 wakerefs[power_domain] = wf;
11014                 *power_domain_mask |= BIT_ULL(power_domain);
11015
11016                 /*
11017                  * The PLL needs to be enabled with a valid divider
11018                  * configuration, otherwise accessing DSI registers will hang
11019                  * the machine. See BSpec North Display Engine
11020                  * registers/MIPI[BXT]. We can break out here early, since we
11021                  * need the same DSI PLL to be enabled for both DSI ports.
11022                  */
11023                 if (!bxt_dsi_pll_is_enabled(dev_priv))
11024                         break;
11025
11026                 /* XXX: this works for video mode only */
11027                 tmp = intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port));
11028                 if (!(tmp & DPI_ENABLE))
11029                         continue;
11030
11031                 tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
11032                 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
11033                         continue;
11034
11035                 pipe_config->cpu_transcoder = cpu_transcoder;
11036                 break;
11037         }
11038
11039         return transcoder_is_dsi(pipe_config->cpu_transcoder);
11040 }
11041
11042 static void hsw_get_ddi_port_state(struct intel_crtc *crtc,
11043                                    struct intel_crtc_state *pipe_config)
11044 {
11045         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11046         enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
11047         struct intel_shared_dpll *pll;
11048         enum port port;
11049         u32 tmp;
11050
11051         if (transcoder_is_dsi(cpu_transcoder)) {
11052                 port = (cpu_transcoder == TRANSCODER_DSI_A) ?
11053                                                 PORT_A : PORT_B;
11054         } else {
11055                 tmp = intel_de_read(dev_priv,
11056                                     TRANS_DDI_FUNC_CTL(cpu_transcoder));
11057                 if (INTEL_GEN(dev_priv) >= 12)
11058                         port = TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
11059                 else
11060                         port = TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
11061         }
11062
11063         if (INTEL_GEN(dev_priv) >= 11)
11064                 icl_get_ddi_pll(dev_priv, port, pipe_config);
11065         else if (IS_CANNONLAKE(dev_priv))
11066                 cnl_get_ddi_pll(dev_priv, port, pipe_config);
11067         else if (IS_GEN9_BC(dev_priv))
11068                 skl_get_ddi_pll(dev_priv, port, pipe_config);
11069         else if (IS_GEN9_LP(dev_priv))
11070                 bxt_get_ddi_pll(dev_priv, port, pipe_config);
11071         else
11072                 hsw_get_ddi_pll(dev_priv, port, pipe_config);
11073
11074         pll = pipe_config->shared_dpll;
11075         if (pll) {
11076                 drm_WARN_ON(&dev_priv->drm,
11077                             !pll->info->funcs->get_hw_state(dev_priv, pll,
11078                                                 &pipe_config->dpll_hw_state));
11079         }
11080
11081         /*
11082          * Haswell has only FDI/PCH transcoder A. It is which is connected to
11083          * DDI E. So just check whether this pipe is wired to DDI E and whether
11084          * the PCH transcoder is on.
11085          */
11086         if (INTEL_GEN(dev_priv) < 9 &&
11087             (port == PORT_E) && intel_de_read(dev_priv, LPT_TRANSCONF) & TRANS_ENABLE) {
11088                 pipe_config->has_pch_encoder = true;
11089
11090                 tmp = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
11091                 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
11092                                           FDI_DP_PORT_WIDTH_SHIFT) + 1;
11093
11094                 ilk_get_fdi_m_n_config(crtc, pipe_config);
11095         }
11096 }
11097
11098 static bool hsw_get_pipe_config(struct intel_crtc *crtc,
11099                                 struct intel_crtc_state *pipe_config)
11100 {
11101         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11102         intel_wakeref_t wakerefs[POWER_DOMAIN_NUM], wf;
11103         enum intel_display_power_domain power_domain;
11104         u64 power_domain_mask;
11105         bool active;
11106         u32 tmp;
11107
11108         pipe_config->master_transcoder = INVALID_TRANSCODER;
11109
11110         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
11111         wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
11112         if (!wf)
11113                 return false;
11114
11115         wakerefs[power_domain] = wf;
11116         power_domain_mask = BIT_ULL(power_domain);
11117
11118         pipe_config->shared_dpll = NULL;
11119
11120         active = hsw_get_transcoder_state(crtc, pipe_config,
11121                                           &power_domain_mask, wakerefs);
11122
11123         if (IS_GEN9_LP(dev_priv) &&
11124             bxt_get_dsi_transcoder_state(crtc, pipe_config,
11125                                          &power_domain_mask, wakerefs)) {
11126                 drm_WARN_ON(&dev_priv->drm, active);
11127                 active = true;
11128         }
11129
11130         if (!active)
11131                 goto out;
11132
11133         if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
11134             INTEL_GEN(dev_priv) >= 11) {
11135                 hsw_get_ddi_port_state(crtc, pipe_config);
11136                 intel_get_pipe_timings(crtc, pipe_config);
11137         }
11138
11139         intel_get_pipe_src_size(crtc, pipe_config);
11140
11141         if (IS_HASWELL(dev_priv)) {
11142                 u32 tmp = intel_de_read(dev_priv,
11143                                         PIPECONF(pipe_config->cpu_transcoder));
11144
11145                 if (tmp & PIPECONF_OUTPUT_COLORSPACE_YUV_HSW)
11146                         pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
11147                 else
11148                         pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
11149         } else {
11150                 pipe_config->output_format =
11151                         bdw_get_pipemisc_output_format(crtc);
11152
11153                 /*
11154                  * Currently there is no interface defined to
11155                  * check user preference between RGB/YCBCR444
11156                  * or YCBCR420. So the only possible case for
11157                  * YCBCR444 usage is driving YCBCR420 output
11158                  * with LSPCON, when pipe is configured for
11159                  * YCBCR444 output and LSPCON takes care of
11160                  * downsampling it.
11161                  */
11162                 pipe_config->lspcon_downsampling =
11163                         pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444;
11164         }
11165
11166         pipe_config->gamma_mode = intel_de_read(dev_priv,
11167                                                 GAMMA_MODE(crtc->pipe));
11168
11169         pipe_config->csc_mode = intel_de_read(dev_priv,
11170                                               PIPE_CSC_MODE(crtc->pipe));
11171
11172         if (INTEL_GEN(dev_priv) >= 9) {
11173                 tmp = intel_de_read(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe));
11174
11175                 if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
11176                         pipe_config->gamma_enable = true;
11177
11178                 if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE)
11179                         pipe_config->csc_enable = true;
11180         } else {
11181                 i9xx_get_pipe_color_config(pipe_config);
11182         }
11183
11184         intel_color_get_config(pipe_config);
11185
11186         tmp = intel_de_read(dev_priv, WM_LINETIME(crtc->pipe));
11187         pipe_config->linetime = REG_FIELD_GET(HSW_LINETIME_MASK, tmp);
11188         if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
11189                 pipe_config->ips_linetime =
11190                         REG_FIELD_GET(HSW_IPS_LINETIME_MASK, tmp);
11191
11192         power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
11193         drm_WARN_ON(&dev_priv->drm, power_domain_mask & BIT_ULL(power_domain));
11194
11195         wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
11196         if (wf) {
11197                 wakerefs[power_domain] = wf;
11198                 power_domain_mask |= BIT_ULL(power_domain);
11199
11200                 if (INTEL_GEN(dev_priv) >= 9)
11201                         skl_get_pfit_config(pipe_config);
11202                 else
11203                         ilk_get_pfit_config(pipe_config);
11204         }
11205
11206         if (hsw_crtc_supports_ips(crtc)) {
11207                 if (IS_HASWELL(dev_priv))
11208                         pipe_config->ips_enabled = intel_de_read(dev_priv,
11209                                                                  IPS_CTL) & IPS_ENABLE;
11210                 else {
11211                         /*
11212                          * We cannot readout IPS state on broadwell, set to
11213                          * true so we can set it to a defined state on first
11214                          * commit.
11215                          */
11216                         pipe_config->ips_enabled = true;
11217                 }
11218         }
11219
11220         if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
11221             !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
11222                 pipe_config->pixel_multiplier =
11223                         intel_de_read(dev_priv,
11224                                       PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
11225         } else {
11226                 pipe_config->pixel_multiplier = 1;
11227         }
11228
11229 out:
11230         for_each_power_domain(power_domain, power_domain_mask)
11231                 intel_display_power_put(dev_priv,
11232                                         power_domain, wakerefs[power_domain]);
11233
11234         return active;
11235 }
11236
11237 static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
11238 {
11239         struct drm_i915_private *dev_priv =
11240                 to_i915(plane_state->uapi.plane->dev);
11241         const struct drm_framebuffer *fb = plane_state->hw.fb;
11242         const struct drm_i915_gem_object *obj = intel_fb_obj(fb);
11243         u32 base;
11244
11245         if (INTEL_INFO(dev_priv)->display.cursor_needs_physical)
11246                 base = sg_dma_address(obj->mm.pages->sgl);
11247         else
11248                 base = intel_plane_ggtt_offset(plane_state);
11249
11250         return base + plane_state->color_plane[0].offset;
11251 }
11252
11253 static u32 intel_cursor_position(const struct intel_plane_state *plane_state)
11254 {
11255         int x = plane_state->uapi.dst.x1;
11256         int y = plane_state->uapi.dst.y1;
11257         u32 pos = 0;
11258
11259         if (x < 0) {
11260                 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
11261                 x = -x;
11262         }
11263         pos |= x << CURSOR_X_SHIFT;
11264
11265         if (y < 0) {
11266                 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
11267                 y = -y;
11268         }
11269         pos |= y << CURSOR_Y_SHIFT;
11270
11271         return pos;
11272 }
11273
11274 static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state)
11275 {
11276         const struct drm_mode_config *config =
11277                 &plane_state->uapi.plane->dev->mode_config;
11278         int width = drm_rect_width(&plane_state->uapi.dst);
11279         int height = drm_rect_height(&plane_state->uapi.dst);
11280
11281         return width > 0 && width <= config->cursor_width &&
11282                 height > 0 && height <= config->cursor_height;
11283 }
11284
11285 static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
11286 {
11287         struct drm_i915_private *dev_priv =
11288                 to_i915(plane_state->uapi.plane->dev);
11289         unsigned int rotation = plane_state->hw.rotation;
11290         int src_x, src_y;
11291         u32 offset;
11292         int ret;
11293
11294         ret = intel_plane_compute_gtt(plane_state);
11295         if (ret)
11296                 return ret;
11297
11298         if (!plane_state->uapi.visible)
11299                 return 0;
11300
11301         src_x = plane_state->uapi.src.x1 >> 16;
11302         src_y = plane_state->uapi.src.y1 >> 16;
11303
11304         intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
11305         offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
11306                                                     plane_state, 0);
11307
11308         if (src_x != 0 || src_y != 0) {
11309                 drm_dbg_kms(&dev_priv->drm,
11310                             "Arbitrary cursor panning not supported\n");
11311                 return -EINVAL;
11312         }
11313
11314         /*
11315          * Put the final coordinates back so that the src
11316          * coordinate checks will see the right values.
11317          */
11318         drm_rect_translate_to(&plane_state->uapi.src,
11319                               src_x << 16, src_y << 16);
11320
11321         /* ILK+ do this automagically in hardware */
11322         if (HAS_GMCH(dev_priv) && rotation & DRM_MODE_ROTATE_180) {
11323                 const struct drm_framebuffer *fb = plane_state->hw.fb;
11324                 int src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
11325                 int src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
11326
11327                 offset += (src_h * src_w - 1) * fb->format->cpp[0];
11328         }
11329
11330         plane_state->color_plane[0].offset = offset;
11331         plane_state->color_plane[0].x = src_x;
11332         plane_state->color_plane[0].y = src_y;
11333
11334         return 0;
11335 }
11336
11337 static int intel_check_cursor(struct intel_crtc_state *crtc_state,
11338                               struct intel_plane_state *plane_state)
11339 {
11340         const struct drm_framebuffer *fb = plane_state->hw.fb;
11341         struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
11342         int ret;
11343
11344         if (fb && fb->modifier != DRM_FORMAT_MOD_LINEAR) {
11345                 drm_dbg_kms(&i915->drm, "cursor cannot be tiled\n");
11346                 return -EINVAL;
11347         }
11348
11349         ret = drm_atomic_helper_check_plane_state(&plane_state->uapi,
11350                                                   &crtc_state->uapi,
11351                                                   DRM_PLANE_HELPER_NO_SCALING,
11352                                                   DRM_PLANE_HELPER_NO_SCALING,
11353                                                   true, true);
11354         if (ret)
11355                 return ret;
11356
11357         /* Use the unclipped src/dst rectangles, which we program to hw */
11358         plane_state->uapi.src = drm_plane_state_src(&plane_state->uapi);
11359         plane_state->uapi.dst = drm_plane_state_dest(&plane_state->uapi);
11360
11361         ret = intel_cursor_check_surface(plane_state);
11362         if (ret)
11363                 return ret;
11364
11365         if (!plane_state->uapi.visible)
11366                 return 0;
11367
11368         ret = intel_plane_check_src_coordinates(plane_state);
11369         if (ret)
11370                 return ret;
11371
11372         return 0;
11373 }
11374
11375 static unsigned int
11376 i845_cursor_max_stride(struct intel_plane *plane,
11377                        u32 pixel_format, u64 modifier,
11378                        unsigned int rotation)
11379 {
11380         return 2048;
11381 }
11382
11383 static u32 i845_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
11384 {
11385         u32 cntl = 0;
11386
11387         if (crtc_state->gamma_enable)
11388                 cntl |= CURSOR_GAMMA_ENABLE;
11389
11390         return cntl;
11391 }
11392
11393 static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
11394                            const struct intel_plane_state *plane_state)
11395 {
11396         return CURSOR_ENABLE |
11397                 CURSOR_FORMAT_ARGB |
11398                 CURSOR_STRIDE(plane_state->color_plane[0].stride);
11399 }
11400
11401 static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state)
11402 {
11403         int width = drm_rect_width(&plane_state->uapi.dst);
11404
11405         /*
11406          * 845g/865g are only limited by the width of their cursors,
11407          * the height is arbitrary up to the precision of the register.
11408          */
11409         return intel_cursor_size_ok(plane_state) && IS_ALIGNED(width, 64);
11410 }
11411
11412 static int i845_check_cursor(struct intel_crtc_state *crtc_state,
11413                              struct intel_plane_state *plane_state)
11414 {
11415         const struct drm_framebuffer *fb = plane_state->hw.fb;
11416         struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
11417         int ret;
11418
11419         ret = intel_check_cursor(crtc_state, plane_state);
11420         if (ret)
11421                 return ret;
11422
11423         /* if we want to turn off the cursor ignore width and height */
11424         if (!fb)
11425                 return 0;
11426
11427         /* Check for which cursor types we support */
11428         if (!i845_cursor_size_ok(plane_state)) {
11429                 drm_dbg_kms(&i915->drm,
11430                             "Cursor dimension %dx%d not supported\n",
11431                             drm_rect_width(&plane_state->uapi.dst),
11432                             drm_rect_height(&plane_state->uapi.dst));
11433                 return -EINVAL;
11434         }
11435
11436         drm_WARN_ON(&i915->drm, plane_state->uapi.visible &&
11437                     plane_state->color_plane[0].stride != fb->pitches[0]);
11438
11439         switch (fb->pitches[0]) {
11440         case 256:
11441         case 512:
11442         case 1024:
11443         case 2048:
11444                 break;
11445         default:
11446                  drm_dbg_kms(&i915->drm, "Invalid cursor stride (%u)\n",
11447                              fb->pitches[0]);
11448                 return -EINVAL;
11449         }
11450
11451         plane_state->ctl = i845_cursor_ctl(crtc_state, plane_state);
11452
11453         return 0;
11454 }
11455
11456 static void i845_update_cursor(struct intel_plane *plane,
11457                                const struct intel_crtc_state *crtc_state,
11458                                const struct intel_plane_state *plane_state)
11459 {
11460         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11461         u32 cntl = 0, base = 0, pos = 0, size = 0;
11462         unsigned long irqflags;
11463
11464         if (plane_state && plane_state->uapi.visible) {
11465                 unsigned int width = drm_rect_width(&plane_state->uapi.dst);
11466                 unsigned int height = drm_rect_height(&plane_state->uapi.dst);
11467
11468                 cntl = plane_state->ctl |
11469                         i845_cursor_ctl_crtc(crtc_state);
11470
11471                 size = (height << 12) | width;
11472
11473                 base = intel_cursor_base(plane_state);
11474                 pos = intel_cursor_position(plane_state);
11475         }
11476
11477         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
11478
11479         /* On these chipsets we can only modify the base/size/stride
11480          * whilst the cursor is disabled.
11481          */
11482         if (plane->cursor.base != base ||
11483             plane->cursor.size != size ||
11484             plane->cursor.cntl != cntl) {
11485                 intel_de_write_fw(dev_priv, CURCNTR(PIPE_A), 0);
11486                 intel_de_write_fw(dev_priv, CURBASE(PIPE_A), base);
11487                 intel_de_write_fw(dev_priv, CURSIZE, size);
11488                 intel_de_write_fw(dev_priv, CURPOS(PIPE_A), pos);
11489                 intel_de_write_fw(dev_priv, CURCNTR(PIPE_A), cntl);
11490
11491                 plane->cursor.base = base;
11492                 plane->cursor.size = size;
11493                 plane->cursor.cntl = cntl;
11494         } else {
11495                 intel_de_write_fw(dev_priv, CURPOS(PIPE_A), pos);
11496         }
11497
11498         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
11499 }
11500
11501 static void i845_disable_cursor(struct intel_plane *plane,
11502                                 const struct intel_crtc_state *crtc_state)
11503 {
11504         i845_update_cursor(plane, crtc_state, NULL);
11505 }
11506
11507 static bool i845_cursor_get_hw_state(struct intel_plane *plane,
11508                                      enum pipe *pipe)
11509 {
11510         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11511         enum intel_display_power_domain power_domain;
11512         intel_wakeref_t wakeref;
11513         bool ret;
11514
11515         power_domain = POWER_DOMAIN_PIPE(PIPE_A);
11516         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
11517         if (!wakeref)
11518                 return false;
11519
11520         ret = intel_de_read(dev_priv, CURCNTR(PIPE_A)) & CURSOR_ENABLE;
11521
11522         *pipe = PIPE_A;
11523
11524         intel_display_power_put(dev_priv, power_domain, wakeref);
11525
11526         return ret;
11527 }
11528
11529 static unsigned int
11530 i9xx_cursor_max_stride(struct intel_plane *plane,
11531                        u32 pixel_format, u64 modifier,
11532                        unsigned int rotation)
11533 {
11534         return plane->base.dev->mode_config.cursor_width * 4;
11535 }
11536
11537 static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
11538 {
11539         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
11540         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11541         u32 cntl = 0;
11542
11543         if (INTEL_GEN(dev_priv) >= 11)
11544                 return cntl;
11545
11546         if (crtc_state->gamma_enable)
11547                 cntl = MCURSOR_GAMMA_ENABLE;
11548
11549         if (crtc_state->csc_enable)
11550                 cntl |= MCURSOR_PIPE_CSC_ENABLE;
11551
11552         if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
11553                 cntl |= MCURSOR_PIPE_SELECT(crtc->pipe);
11554
11555         return cntl;
11556 }
11557
11558 static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
11559                            const struct intel_plane_state *plane_state)
11560 {
11561         struct drm_i915_private *dev_priv =
11562                 to_i915(plane_state->uapi.plane->dev);
11563         u32 cntl = 0;
11564
11565         if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
11566                 cntl |= MCURSOR_TRICKLE_FEED_DISABLE;
11567
11568         switch (drm_rect_width(&plane_state->uapi.dst)) {
11569         case 64:
11570                 cntl |= MCURSOR_MODE_64_ARGB_AX;
11571                 break;
11572         case 128:
11573                 cntl |= MCURSOR_MODE_128_ARGB_AX;
11574                 break;
11575         case 256:
11576                 cntl |= MCURSOR_MODE_256_ARGB_AX;
11577                 break;
11578         default:
11579                 MISSING_CASE(drm_rect_width(&plane_state->uapi.dst));
11580                 return 0;
11581         }
11582
11583         if (plane_state->hw.rotation & DRM_MODE_ROTATE_180)
11584                 cntl |= MCURSOR_ROTATE_180;
11585
11586         return cntl;
11587 }
11588
11589 static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
11590 {
11591         struct drm_i915_private *dev_priv =
11592                 to_i915(plane_state->uapi.plane->dev);
11593         int width = drm_rect_width(&plane_state->uapi.dst);
11594         int height = drm_rect_height(&plane_state->uapi.dst);
11595
11596         if (!intel_cursor_size_ok(plane_state))
11597                 return false;
11598
11599         /* Cursor width is limited to a few power-of-two sizes */
11600         switch (width) {
11601         case 256:
11602         case 128:
11603         case 64:
11604                 break;
11605         default:
11606                 return false;
11607         }
11608
11609         /*
11610          * IVB+ have CUR_FBC_CTL which allows an arbitrary cursor
11611          * height from 8 lines up to the cursor width, when the
11612          * cursor is not rotated. Everything else requires square
11613          * cursors.
11614          */
11615         if (HAS_CUR_FBC(dev_priv) &&
11616             plane_state->hw.rotation & DRM_MODE_ROTATE_0) {
11617                 if (height < 8 || height > width)
11618                         return false;
11619         } else {
11620                 if (height != width)
11621                         return false;
11622         }
11623
11624         return true;
11625 }
11626
11627 static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
11628                              struct intel_plane_state *plane_state)
11629 {
11630         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
11631         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11632         const struct drm_framebuffer *fb = plane_state->hw.fb;
11633         enum pipe pipe = plane->pipe;
11634         int ret;
11635
11636         ret = intel_check_cursor(crtc_state, plane_state);
11637         if (ret)
11638                 return ret;
11639
11640         /* if we want to turn off the cursor ignore width and height */
11641         if (!fb)
11642                 return 0;
11643
11644         /* Check for which cursor types we support */
11645         if (!i9xx_cursor_size_ok(plane_state)) {
11646                 drm_dbg(&dev_priv->drm,
11647                         "Cursor dimension %dx%d not supported\n",
11648                         drm_rect_width(&plane_state->uapi.dst),
11649                         drm_rect_height(&plane_state->uapi.dst));
11650                 return -EINVAL;
11651         }
11652
11653         drm_WARN_ON(&dev_priv->drm, plane_state->uapi.visible &&
11654                     plane_state->color_plane[0].stride != fb->pitches[0]);
11655
11656         if (fb->pitches[0] !=
11657             drm_rect_width(&plane_state->uapi.dst) * fb->format->cpp[0]) {
11658                 drm_dbg_kms(&dev_priv->drm,
11659                             "Invalid cursor stride (%u) (cursor width %d)\n",
11660                             fb->pitches[0],
11661                             drm_rect_width(&plane_state->uapi.dst));
11662                 return -EINVAL;
11663         }
11664
11665         /*
11666          * There's something wrong with the cursor on CHV pipe C.
11667          * If it straddles the left edge of the screen then
11668          * moving it away from the edge or disabling it often
11669          * results in a pipe underrun, and often that can lead to
11670          * dead pipe (constant underrun reported, and it scans
11671          * out just a solid color). To recover from that, the
11672          * display power well must be turned off and on again.
11673          * Refuse the put the cursor into that compromised position.
11674          */
11675         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C &&
11676             plane_state->uapi.visible && plane_state->uapi.dst.x1 < 0) {
11677                 drm_dbg_kms(&dev_priv->drm,
11678                             "CHV cursor C not allowed to straddle the left screen edge\n");
11679                 return -EINVAL;
11680         }
11681
11682         plane_state->ctl = i9xx_cursor_ctl(crtc_state, plane_state);
11683
11684         return 0;
11685 }
11686
11687 static void i9xx_update_cursor(struct intel_plane *plane,
11688                                const struct intel_crtc_state *crtc_state,
11689                                const struct intel_plane_state *plane_state)
11690 {
11691         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11692         enum pipe pipe = plane->pipe;
11693         u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0;
11694         unsigned long irqflags;
11695
11696         if (plane_state && plane_state->uapi.visible) {
11697                 unsigned width = drm_rect_width(&plane_state->uapi.dst);
11698                 unsigned height = drm_rect_height(&plane_state->uapi.dst);
11699
11700                 cntl = plane_state->ctl |
11701                         i9xx_cursor_ctl_crtc(crtc_state);
11702
11703                 if (width != height)
11704                         fbc_ctl = CUR_FBC_CTL_EN | (height - 1);
11705
11706                 base = intel_cursor_base(plane_state);
11707                 pos = intel_cursor_position(plane_state);
11708         }
11709
11710         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
11711
11712         /*
11713          * On some platforms writing CURCNTR first will also
11714          * cause CURPOS to be armed by the CURBASE write.
11715          * Without the CURCNTR write the CURPOS write would
11716          * arm itself. Thus we always update CURCNTR before
11717          * CURPOS.
11718          *
11719          * On other platforms CURPOS always requires the
11720          * CURBASE write to arm the update. Additonally
11721          * a write to any of the cursor register will cancel
11722          * an already armed cursor update. Thus leaving out
11723          * the CURBASE write after CURPOS could lead to a
11724          * cursor that doesn't appear to move, or even change
11725          * shape. Thus we always write CURBASE.
11726          *
11727          * The other registers are armed by by the CURBASE write
11728          * except when the plane is getting enabled at which time
11729          * the CURCNTR write arms the update.
11730          */
11731
11732         if (INTEL_GEN(dev_priv) >= 9)
11733                 skl_write_cursor_wm(plane, crtc_state);
11734
11735         if (plane->cursor.base != base ||
11736             plane->cursor.size != fbc_ctl ||
11737             plane->cursor.cntl != cntl) {
11738                 if (HAS_CUR_FBC(dev_priv))
11739                         intel_de_write_fw(dev_priv, CUR_FBC_CTL(pipe),
11740                                           fbc_ctl);
11741                 intel_de_write_fw(dev_priv, CURCNTR(pipe), cntl);
11742                 intel_de_write_fw(dev_priv, CURPOS(pipe), pos);
11743                 intel_de_write_fw(dev_priv, CURBASE(pipe), base);
11744
11745                 plane->cursor.base = base;
11746                 plane->cursor.size = fbc_ctl;
11747                 plane->cursor.cntl = cntl;
11748         } else {
11749                 intel_de_write_fw(dev_priv, CURPOS(pipe), pos);
11750                 intel_de_write_fw(dev_priv, CURBASE(pipe), base);
11751         }
11752
11753         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
11754 }
11755
11756 static void i9xx_disable_cursor(struct intel_plane *plane,
11757                                 const struct intel_crtc_state *crtc_state)
11758 {
11759         i9xx_update_cursor(plane, crtc_state, NULL);
11760 }
11761
11762 static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
11763                                      enum pipe *pipe)
11764 {
11765         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
11766         enum intel_display_power_domain power_domain;
11767         intel_wakeref_t wakeref;
11768         bool ret;
11769         u32 val;
11770
11771         /*
11772          * Not 100% correct for planes that can move between pipes,
11773          * but that's only the case for gen2-3 which don't have any
11774          * display power wells.
11775          */
11776         power_domain = POWER_DOMAIN_PIPE(plane->pipe);
11777         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
11778         if (!wakeref)
11779                 return false;
11780
11781         val = intel_de_read(dev_priv, CURCNTR(plane->pipe));
11782
11783         ret = val & MCURSOR_MODE;
11784
11785         if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
11786                 *pipe = plane->pipe;
11787         else
11788                 *pipe = (val & MCURSOR_PIPE_SELECT_MASK) >>
11789                         MCURSOR_PIPE_SELECT_SHIFT;
11790
11791         intel_display_power_put(dev_priv, power_domain, wakeref);
11792
11793         return ret;
11794 }
11795
11796 /* VESA 640x480x72Hz mode to set on the pipe */
11797 static const struct drm_display_mode load_detect_mode = {
11798         DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
11799                  704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
11800 };
11801
11802 struct drm_framebuffer *
11803 intel_framebuffer_create(struct drm_i915_gem_object *obj,
11804                          struct drm_mode_fb_cmd2 *mode_cmd)
11805 {
11806         struct intel_framebuffer *intel_fb;
11807         int ret;
11808
11809         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
11810         if (!intel_fb)
11811                 return ERR_PTR(-ENOMEM);
11812
11813         ret = intel_framebuffer_init(intel_fb, obj, mode_cmd);
11814         if (ret)
11815                 goto err;
11816
11817         return &intel_fb->base;
11818
11819 err:
11820         kfree(intel_fb);
11821         return ERR_PTR(ret);
11822 }
11823
11824 static int intel_modeset_disable_planes(struct drm_atomic_state *state,
11825                                         struct drm_crtc *crtc)
11826 {
11827         struct drm_plane *plane;
11828         struct drm_plane_state *plane_state;
11829         int ret, i;
11830
11831         ret = drm_atomic_add_affected_planes(state, crtc);
11832         if (ret)
11833                 return ret;
11834
11835         for_each_new_plane_in_state(state, plane, plane_state, i) {
11836                 if (plane_state->crtc != crtc)
11837                         continue;
11838
11839                 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
11840                 if (ret)
11841                         return ret;
11842
11843                 drm_atomic_set_fb_for_plane(plane_state, NULL);
11844         }
11845
11846         return 0;
11847 }
11848
11849 int intel_get_load_detect_pipe(struct drm_connector *connector,
11850                                struct intel_load_detect_pipe *old,
11851                                struct drm_modeset_acquire_ctx *ctx)
11852 {
11853         struct intel_crtc *intel_crtc;
11854         struct intel_encoder *intel_encoder =
11855                 intel_attached_encoder(to_intel_connector(connector));
11856         struct drm_crtc *possible_crtc;
11857         struct drm_encoder *encoder = &intel_encoder->base;
11858         struct drm_crtc *crtc = NULL;
11859         struct drm_device *dev = encoder->dev;
11860         struct drm_i915_private *dev_priv = to_i915(dev);
11861         struct drm_mode_config *config = &dev->mode_config;
11862         struct drm_atomic_state *state = NULL, *restore_state = NULL;
11863         struct drm_connector_state *connector_state;
11864         struct intel_crtc_state *crtc_state;
11865         int ret, i = -1;
11866
11867         drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
11868                     connector->base.id, connector->name,
11869                     encoder->base.id, encoder->name);
11870
11871         old->restore_state = NULL;
11872
11873         drm_WARN_ON(dev, !drm_modeset_is_locked(&config->connection_mutex));
11874
11875         /*
11876          * Algorithm gets a little messy:
11877          *
11878          *   - if the connector already has an assigned crtc, use it (but make
11879          *     sure it's on first)
11880          *
11881          *   - try to find the first unused crtc that can drive this connector,
11882          *     and use that if we find one
11883          */
11884
11885         /* See if we already have a CRTC for this connector */
11886         if (connector->state->crtc) {
11887                 crtc = connector->state->crtc;
11888
11889                 ret = drm_modeset_lock(&crtc->mutex, ctx);
11890                 if (ret)
11891                         goto fail;
11892
11893                 /* Make sure the crtc and connector are running */
11894                 goto found;
11895         }
11896
11897         /* Find an unused one (if possible) */
11898         for_each_crtc(dev, possible_crtc) {
11899                 i++;
11900                 if (!(encoder->possible_crtcs & (1 << i)))
11901                         continue;
11902
11903                 ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
11904                 if (ret)
11905                         goto fail;
11906
11907                 if (possible_crtc->state->enable) {
11908                         drm_modeset_unlock(&possible_crtc->mutex);
11909                         continue;
11910                 }
11911
11912                 crtc = possible_crtc;
11913                 break;
11914         }
11915
11916         /*
11917          * If we didn't find an unused CRTC, don't use any.
11918          */
11919         if (!crtc) {
11920                 drm_dbg_kms(&dev_priv->drm,
11921                             "no pipe available for load-detect\n");
11922                 ret = -ENODEV;
11923                 goto fail;
11924         }
11925
11926 found:
11927         intel_crtc = to_intel_crtc(crtc);
11928
11929         state = drm_atomic_state_alloc(dev);
11930         restore_state = drm_atomic_state_alloc(dev);
11931         if (!state || !restore_state) {
11932                 ret = -ENOMEM;
11933                 goto fail;
11934         }
11935
11936         state->acquire_ctx = ctx;
11937         restore_state->acquire_ctx = ctx;
11938
11939         connector_state = drm_atomic_get_connector_state(state, connector);
11940         if (IS_ERR(connector_state)) {
11941                 ret = PTR_ERR(connector_state);
11942                 goto fail;
11943         }
11944
11945         ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
11946         if (ret)
11947                 goto fail;
11948
11949         crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
11950         if (IS_ERR(crtc_state)) {
11951                 ret = PTR_ERR(crtc_state);
11952                 goto fail;
11953         }
11954
11955         crtc_state->uapi.active = true;
11956
11957         ret = drm_atomic_set_mode_for_crtc(&crtc_state->uapi,
11958                                            &load_detect_mode);
11959         if (ret)
11960                 goto fail;
11961
11962         ret = intel_modeset_disable_planes(state, crtc);
11963         if (ret)
11964                 goto fail;
11965
11966         ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
11967         if (!ret)
11968                 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
11969         if (!ret)
11970                 ret = drm_atomic_add_affected_planes(restore_state, crtc);
11971         if (ret) {
11972                 drm_dbg_kms(&dev_priv->drm,
11973                             "Failed to create a copy of old state to restore: %i\n",
11974                             ret);
11975                 goto fail;
11976         }
11977
11978         ret = drm_atomic_commit(state);
11979         if (ret) {
11980                 drm_dbg_kms(&dev_priv->drm,
11981                             "failed to set mode on load-detect pipe\n");
11982                 goto fail;
11983         }
11984
11985         old->restore_state = restore_state;
11986         drm_atomic_state_put(state);
11987
11988         /* let the connector get through one full cycle before testing */
11989         intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
11990         return true;
11991
11992 fail:
11993         if (state) {
11994                 drm_atomic_state_put(state);
11995                 state = NULL;
11996         }
11997         if (restore_state) {
11998                 drm_atomic_state_put(restore_state);
11999                 restore_state = NULL;
12000         }
12001
12002         if (ret == -EDEADLK)
12003                 return ret;
12004
12005         return false;
12006 }
12007
12008 void intel_release_load_detect_pipe(struct drm_connector *connector,
12009                                     struct intel_load_detect_pipe *old,
12010                                     struct drm_modeset_acquire_ctx *ctx)
12011 {
12012         struct intel_encoder *intel_encoder =
12013                 intel_attached_encoder(to_intel_connector(connector));
12014         struct drm_i915_private *i915 = to_i915(intel_encoder->base.dev);
12015         struct drm_encoder *encoder = &intel_encoder->base;
12016         struct drm_atomic_state *state = old->restore_state;
12017         int ret;
12018
12019         drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
12020                     connector->base.id, connector->name,
12021                     encoder->base.id, encoder->name);
12022
12023         if (!state)
12024                 return;
12025
12026         ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
12027         if (ret)
12028                 drm_dbg_kms(&i915->drm,
12029                             "Couldn't release load detect pipe: %i\n", ret);
12030         drm_atomic_state_put(state);
12031 }
12032
12033 static int i9xx_pll_refclk(struct drm_device *dev,
12034                            const struct intel_crtc_state *pipe_config)
12035 {
12036         struct drm_i915_private *dev_priv = to_i915(dev);
12037         u32 dpll = pipe_config->dpll_hw_state.dpll;
12038
12039         if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
12040                 return dev_priv->vbt.lvds_ssc_freq;
12041         else if (HAS_PCH_SPLIT(dev_priv))
12042                 return 120000;
12043         else if (!IS_GEN(dev_priv, 2))
12044                 return 96000;
12045         else
12046                 return 48000;
12047 }
12048
12049 /* Returns the clock of the currently programmed mode of the given pipe. */
12050 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
12051                                 struct intel_crtc_state *pipe_config)
12052 {
12053         struct drm_device *dev = crtc->base.dev;
12054         struct drm_i915_private *dev_priv = to_i915(dev);
12055         enum pipe pipe = crtc->pipe;
12056         u32 dpll = pipe_config->dpll_hw_state.dpll;
12057         u32 fp;
12058         struct dpll clock;
12059         int port_clock;
12060         int refclk = i9xx_pll_refclk(dev, pipe_config);
12061
12062         if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
12063                 fp = pipe_config->dpll_hw_state.fp0;
12064         else
12065                 fp = pipe_config->dpll_hw_state.fp1;
12066
12067         clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
12068         if (IS_PINEVIEW(dev_priv)) {
12069                 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
12070                 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
12071         } else {
12072                 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
12073                 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
12074         }
12075
12076         if (!IS_GEN(dev_priv, 2)) {
12077                 if (IS_PINEVIEW(dev_priv))
12078                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
12079                                 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
12080                 else
12081                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
12082                                DPLL_FPA01_P1_POST_DIV_SHIFT);
12083
12084                 switch (dpll & DPLL_MODE_MASK) {
12085                 case DPLLB_MODE_DAC_SERIAL:
12086                         clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
12087                                 5 : 10;
12088                         break;
12089                 case DPLLB_MODE_LVDS:
12090                         clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
12091                                 7 : 14;
12092                         break;
12093                 default:
12094                         drm_dbg_kms(&dev_priv->drm,
12095                                     "Unknown DPLL mode %08x in programmed "
12096                                     "mode\n", (int)(dpll & DPLL_MODE_MASK));
12097                         return;
12098                 }
12099
12100                 if (IS_PINEVIEW(dev_priv))
12101                         port_clock = pnv_calc_dpll_params(refclk, &clock);
12102                 else
12103                         port_clock = i9xx_calc_dpll_params(refclk, &clock);
12104         } else {
12105                 u32 lvds = IS_I830(dev_priv) ? 0 : intel_de_read(dev_priv,
12106                                                                  LVDS);
12107                 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
12108
12109                 if (is_lvds) {
12110                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
12111                                        DPLL_FPA01_P1_POST_DIV_SHIFT);
12112
12113                         if (lvds & LVDS_CLKB_POWER_UP)
12114                                 clock.p2 = 7;
12115                         else
12116                                 clock.p2 = 14;
12117                 } else {
12118                         if (dpll & PLL_P1_DIVIDE_BY_TWO)
12119                                 clock.p1 = 2;
12120                         else {
12121                                 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
12122                                             DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
12123                         }
12124                         if (dpll & PLL_P2_DIVIDE_BY_4)
12125                                 clock.p2 = 4;
12126                         else
12127                                 clock.p2 = 2;
12128                 }
12129
12130                 port_clock = i9xx_calc_dpll_params(refclk, &clock);
12131         }
12132
12133         /*
12134          * This value includes pixel_multiplier. We will use
12135          * port_clock to compute adjusted_mode.crtc_clock in the
12136          * encoder's get_config() function.
12137          */
12138         pipe_config->port_clock = port_clock;
12139 }
12140
12141 int intel_dotclock_calculate(int link_freq,
12142                              const struct intel_link_m_n *m_n)
12143 {
12144         /*
12145          * The calculation for the data clock is:
12146          * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
12147          * But we want to avoid losing precison if possible, so:
12148          * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
12149          *
12150          * and the link clock is simpler:
12151          * link_clock = (m * link_clock) / n
12152          */
12153
12154         if (!m_n->link_n)
12155                 return 0;
12156
12157         return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
12158 }
12159
12160 static void ilk_pch_clock_get(struct intel_crtc *crtc,
12161                               struct intel_crtc_state *pipe_config)
12162 {
12163         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12164
12165         /* read out port_clock from the DPLL */
12166         i9xx_crtc_clock_get(crtc, pipe_config);
12167
12168         /*
12169          * In case there is an active pipe without active ports,
12170          * we may need some idea for the dotclock anyway.
12171          * Calculate one based on the FDI configuration.
12172          */
12173         pipe_config->hw.adjusted_mode.crtc_clock =
12174                 intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
12175                                          &pipe_config->fdi_m_n);
12176 }
12177
12178 static void intel_crtc_state_reset(struct intel_crtc_state *crtc_state,
12179                                    struct intel_crtc *crtc)
12180 {
12181         memset(crtc_state, 0, sizeof(*crtc_state));
12182
12183         __drm_atomic_helper_crtc_state_reset(&crtc_state->uapi, &crtc->base);
12184
12185         crtc_state->cpu_transcoder = INVALID_TRANSCODER;
12186         crtc_state->master_transcoder = INVALID_TRANSCODER;
12187         crtc_state->hsw_workaround_pipe = INVALID_PIPE;
12188         crtc_state->output_format = INTEL_OUTPUT_FORMAT_INVALID;
12189         crtc_state->scaler_state.scaler_id = -1;
12190         crtc_state->mst_master_transcoder = INVALID_TRANSCODER;
12191 }
12192
12193 static struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc)
12194 {
12195         struct intel_crtc_state *crtc_state;
12196
12197         crtc_state = kmalloc(sizeof(*crtc_state), GFP_KERNEL);
12198
12199         if (crtc_state)
12200                 intel_crtc_state_reset(crtc_state, crtc);
12201
12202         return crtc_state;
12203 }
12204
12205 /* Returns the currently programmed mode of the given encoder. */
12206 struct drm_display_mode *
12207 intel_encoder_current_mode(struct intel_encoder *encoder)
12208 {
12209         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
12210         struct intel_crtc_state *crtc_state;
12211         struct drm_display_mode *mode;
12212         struct intel_crtc *crtc;
12213         enum pipe pipe;
12214
12215         if (!encoder->get_hw_state(encoder, &pipe))
12216                 return NULL;
12217
12218         crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
12219
12220         mode = kzalloc(sizeof(*mode), GFP_KERNEL);
12221         if (!mode)
12222                 return NULL;
12223
12224         crtc_state = intel_crtc_state_alloc(crtc);
12225         if (!crtc_state) {
12226                 kfree(mode);
12227                 return NULL;
12228         }
12229
12230         if (!dev_priv->display.get_pipe_config(crtc, crtc_state)) {
12231                 kfree(crtc_state);
12232                 kfree(mode);
12233                 return NULL;
12234         }
12235
12236         encoder->get_config(encoder, crtc_state);
12237
12238         intel_mode_from_pipe_config(mode, crtc_state);
12239
12240         kfree(crtc_state);
12241
12242         return mode;
12243 }
12244
12245 static void intel_crtc_destroy(struct drm_crtc *crtc)
12246 {
12247         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12248
12249         drm_crtc_cleanup(crtc);
12250         kfree(intel_crtc);
12251 }
12252
12253 /**
12254  * intel_wm_need_update - Check whether watermarks need updating
12255  * @cur: current plane state
12256  * @new: new plane state
12257  *
12258  * Check current plane state versus the new one to determine whether
12259  * watermarks need to be recalculated.
12260  *
12261  * Returns true or false.
12262  */
12263 static bool intel_wm_need_update(const struct intel_plane_state *cur,
12264                                  struct intel_plane_state *new)
12265 {
12266         /* Update watermarks on tiling or size changes. */
12267         if (new->uapi.visible != cur->uapi.visible)
12268                 return true;
12269
12270         if (!cur->hw.fb || !new->hw.fb)
12271                 return false;
12272
12273         if (cur->hw.fb->modifier != new->hw.fb->modifier ||
12274             cur->hw.rotation != new->hw.rotation ||
12275             drm_rect_width(&new->uapi.src) != drm_rect_width(&cur->uapi.src) ||
12276             drm_rect_height(&new->uapi.src) != drm_rect_height(&cur->uapi.src) ||
12277             drm_rect_width(&new->uapi.dst) != drm_rect_width(&cur->uapi.dst) ||
12278             drm_rect_height(&new->uapi.dst) != drm_rect_height(&cur->uapi.dst))
12279                 return true;
12280
12281         return false;
12282 }
12283
12284 static bool needs_scaling(const struct intel_plane_state *state)
12285 {
12286         int src_w = drm_rect_width(&state->uapi.src) >> 16;
12287         int src_h = drm_rect_height(&state->uapi.src) >> 16;
12288         int dst_w = drm_rect_width(&state->uapi.dst);
12289         int dst_h = drm_rect_height(&state->uapi.dst);
12290
12291         return (src_w != dst_w || src_h != dst_h);
12292 }
12293
12294 int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
12295                                     struct intel_crtc_state *crtc_state,
12296                                     const struct intel_plane_state *old_plane_state,
12297                                     struct intel_plane_state *plane_state)
12298 {
12299         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
12300         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
12301         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12302         bool mode_changed = needs_modeset(crtc_state);
12303         bool was_crtc_enabled = old_crtc_state->hw.active;
12304         bool is_crtc_enabled = crtc_state->hw.active;
12305         bool turn_off, turn_on, visible, was_visible;
12306         int ret;
12307
12308         if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
12309                 ret = skl_update_scaler_plane(crtc_state, plane_state);
12310                 if (ret)
12311                         return ret;
12312         }
12313
12314         was_visible = old_plane_state->uapi.visible;
12315         visible = plane_state->uapi.visible;
12316
12317         if (!was_crtc_enabled && drm_WARN_ON(&dev_priv->drm, was_visible))
12318                 was_visible = false;
12319
12320         /*
12321          * Visibility is calculated as if the crtc was on, but
12322          * after scaler setup everything depends on it being off
12323          * when the crtc isn't active.
12324          *
12325          * FIXME this is wrong for watermarks. Watermarks should also
12326          * be computed as if the pipe would be active. Perhaps move
12327          * per-plane wm computation to the .check_plane() hook, and
12328          * only combine the results from all planes in the current place?
12329          */
12330         if (!is_crtc_enabled) {
12331                 intel_plane_set_invisible(crtc_state, plane_state);
12332                 visible = false;
12333         }
12334
12335         if (!was_visible && !visible)
12336                 return 0;
12337
12338         turn_off = was_visible && (!visible || mode_changed);
12339         turn_on = visible && (!was_visible || mode_changed);
12340
12341         drm_dbg_atomic(&dev_priv->drm,
12342                        "[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
12343                        crtc->base.base.id, crtc->base.name,
12344                        plane->base.base.id, plane->base.name,
12345                        was_visible, visible,
12346                        turn_off, turn_on, mode_changed);
12347
12348         if (turn_on) {
12349                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
12350                         crtc_state->update_wm_pre = true;
12351
12352                 /* must disable cxsr around plane enable/disable */
12353                 if (plane->id != PLANE_CURSOR)
12354                         crtc_state->disable_cxsr = true;
12355         } else if (turn_off) {
12356                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
12357                         crtc_state->update_wm_post = true;
12358
12359                 /* must disable cxsr around plane enable/disable */
12360                 if (plane->id != PLANE_CURSOR)
12361                         crtc_state->disable_cxsr = true;
12362         } else if (intel_wm_need_update(old_plane_state, plane_state)) {
12363                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
12364                         /* FIXME bollocks */
12365                         crtc_state->update_wm_pre = true;
12366                         crtc_state->update_wm_post = true;
12367                 }
12368         }
12369
12370         if (visible || was_visible)
12371                 crtc_state->fb_bits |= plane->frontbuffer_bit;
12372
12373         /*
12374          * ILK/SNB DVSACNTR/Sprite Enable
12375          * IVB SPR_CTL/Sprite Enable
12376          * "When in Self Refresh Big FIFO mode, a write to enable the
12377          *  plane will be internally buffered and delayed while Big FIFO
12378          *  mode is exiting."
12379          *
12380          * Which means that enabling the sprite can take an extra frame
12381          * when we start in big FIFO mode (LP1+). Thus we need to drop
12382          * down to LP0 and wait for vblank in order to make sure the
12383          * sprite gets enabled on the next vblank after the register write.
12384          * Doing otherwise would risk enabling the sprite one frame after
12385          * we've already signalled flip completion. We can resume LP1+
12386          * once the sprite has been enabled.
12387          *
12388          *
12389          * WaCxSRDisabledForSpriteScaling:ivb
12390          * IVB SPR_SCALE/Scaling Enable
12391          * "Low Power watermarks must be disabled for at least one
12392          *  frame before enabling sprite scaling, and kept disabled
12393          *  until sprite scaling is disabled."
12394          *
12395          * ILK/SNB DVSASCALE/Scaling Enable
12396          * "When in Self Refresh Big FIFO mode, scaling enable will be
12397          *  masked off while Big FIFO mode is exiting."
12398          *
12399          * Despite the w/a only being listed for IVB we assume that
12400          * the ILK/SNB note has similar ramifications, hence we apply
12401          * the w/a on all three platforms.
12402          *
12403          * With experimental results seems this is needed also for primary
12404          * plane, not only sprite plane.
12405          */
12406         if (plane->id != PLANE_CURSOR &&
12407             (IS_GEN_RANGE(dev_priv, 5, 6) ||
12408              IS_IVYBRIDGE(dev_priv)) &&
12409             (turn_on || (!needs_scaling(old_plane_state) &&
12410                          needs_scaling(plane_state))))
12411                 crtc_state->disable_lp_wm = true;
12412
12413         return 0;
12414 }
12415
12416 static bool encoders_cloneable(const struct intel_encoder *a,
12417                                const struct intel_encoder *b)
12418 {
12419         /* masks could be asymmetric, so check both ways */
12420         return a == b || (a->cloneable & (1 << b->type) &&
12421                           b->cloneable & (1 << a->type));
12422 }
12423
12424 static bool check_single_encoder_cloning(struct drm_atomic_state *state,
12425                                          struct intel_crtc *crtc,
12426                                          struct intel_encoder *encoder)
12427 {
12428         struct intel_encoder *source_encoder;
12429         struct drm_connector *connector;
12430         struct drm_connector_state *connector_state;
12431         int i;
12432
12433         for_each_new_connector_in_state(state, connector, connector_state, i) {
12434                 if (connector_state->crtc != &crtc->base)
12435                         continue;
12436
12437                 source_encoder =
12438                         to_intel_encoder(connector_state->best_encoder);
12439                 if (!encoders_cloneable(encoder, source_encoder))
12440                         return false;
12441         }
12442
12443         return true;
12444 }
12445
12446 static int icl_add_linked_planes(struct intel_atomic_state *state)
12447 {
12448         struct intel_plane *plane, *linked;
12449         struct intel_plane_state *plane_state, *linked_plane_state;
12450         int i;
12451
12452         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
12453                 linked = plane_state->planar_linked_plane;
12454
12455                 if (!linked)
12456                         continue;
12457
12458                 linked_plane_state = intel_atomic_get_plane_state(state, linked);
12459                 if (IS_ERR(linked_plane_state))
12460                         return PTR_ERR(linked_plane_state);
12461
12462                 drm_WARN_ON(state->base.dev,
12463                             linked_plane_state->planar_linked_plane != plane);
12464                 drm_WARN_ON(state->base.dev,
12465                             linked_plane_state->planar_slave == plane_state->planar_slave);
12466         }
12467
12468         return 0;
12469 }
12470
12471 static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
12472 {
12473         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
12474         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12475         struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
12476         struct intel_plane *plane, *linked;
12477         struct intel_plane_state *plane_state;
12478         int i;
12479
12480         if (INTEL_GEN(dev_priv) < 11)
12481                 return 0;
12482
12483         /*
12484          * Destroy all old plane links and make the slave plane invisible
12485          * in the crtc_state->active_planes mask.
12486          */
12487         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
12488                 if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane)
12489                         continue;
12490
12491                 plane_state->planar_linked_plane = NULL;
12492                 if (plane_state->planar_slave && !plane_state->uapi.visible) {
12493                         crtc_state->active_planes &= ~BIT(plane->id);
12494                         crtc_state->update_planes |= BIT(plane->id);
12495                 }
12496
12497                 plane_state->planar_slave = false;
12498         }
12499
12500         if (!crtc_state->nv12_planes)
12501                 return 0;
12502
12503         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
12504                 struct intel_plane_state *linked_state = NULL;
12505
12506                 if (plane->pipe != crtc->pipe ||
12507                     !(crtc_state->nv12_planes & BIT(plane->id)))
12508                         continue;
12509
12510                 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
12511                         if (!icl_is_nv12_y_plane(dev_priv, linked->id))
12512                                 continue;
12513
12514                         if (crtc_state->active_planes & BIT(linked->id))
12515                                 continue;
12516
12517                         linked_state = intel_atomic_get_plane_state(state, linked);
12518                         if (IS_ERR(linked_state))
12519                                 return PTR_ERR(linked_state);
12520
12521                         break;
12522                 }
12523
12524                 if (!linked_state) {
12525                         drm_dbg_kms(&dev_priv->drm,
12526                                     "Need %d free Y planes for planar YUV\n",
12527                                     hweight8(crtc_state->nv12_planes));
12528
12529                         return -EINVAL;
12530                 }
12531
12532                 plane_state->planar_linked_plane = linked;
12533
12534                 linked_state->planar_slave = true;
12535                 linked_state->planar_linked_plane = plane;
12536                 crtc_state->active_planes |= BIT(linked->id);
12537                 crtc_state->update_planes |= BIT(linked->id);
12538                 drm_dbg_kms(&dev_priv->drm, "Using %s as Y plane for %s\n",
12539                             linked->base.name, plane->base.name);
12540
12541                 /* Copy parameters to slave plane */
12542                 linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE;
12543                 linked_state->color_ctl = plane_state->color_ctl;
12544                 linked_state->view = plane_state->view;
12545                 memcpy(linked_state->color_plane, plane_state->color_plane,
12546                        sizeof(linked_state->color_plane));
12547
12548                 intel_plane_copy_uapi_to_hw_state(linked_state, plane_state);
12549                 linked_state->uapi.src = plane_state->uapi.src;
12550                 linked_state->uapi.dst = plane_state->uapi.dst;
12551
12552                 if (icl_is_hdr_plane(dev_priv, plane->id)) {
12553                         if (linked->id == PLANE_SPRITE5)
12554                                 plane_state->cus_ctl |= PLANE_CUS_PLANE_7;
12555                         else if (linked->id == PLANE_SPRITE4)
12556                                 plane_state->cus_ctl |= PLANE_CUS_PLANE_6;
12557                         else if (linked->id == PLANE_SPRITE3)
12558                                 plane_state->cus_ctl |= PLANE_CUS_PLANE_5_RKL;
12559                         else if (linked->id == PLANE_SPRITE2)
12560                                 plane_state->cus_ctl |= PLANE_CUS_PLANE_4_RKL;
12561                         else
12562                                 MISSING_CASE(linked->id);
12563                 }
12564         }
12565
12566         return 0;
12567 }
12568
12569 static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
12570 {
12571         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
12572         struct intel_atomic_state *state =
12573                 to_intel_atomic_state(new_crtc_state->uapi.state);
12574         const struct intel_crtc_state *old_crtc_state =
12575                 intel_atomic_get_old_crtc_state(state, crtc);
12576
12577         return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes;
12578 }
12579
12580 static u16 hsw_linetime_wm(const struct intel_crtc_state *crtc_state)
12581 {
12582         const struct drm_display_mode *adjusted_mode =
12583                 &crtc_state->hw.adjusted_mode;
12584
12585         if (!crtc_state->hw.enable)
12586                 return 0;
12587
12588         return DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
12589                                  adjusted_mode->crtc_clock);
12590 }
12591
12592 static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state,
12593                                const struct intel_cdclk_state *cdclk_state)
12594 {
12595         const struct drm_display_mode *adjusted_mode =
12596                 &crtc_state->hw.adjusted_mode;
12597
12598         if (!crtc_state->hw.enable)
12599                 return 0;
12600
12601         return DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
12602                                  cdclk_state->logical.cdclk);
12603 }
12604
12605 static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state)
12606 {
12607         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
12608         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12609         const struct drm_display_mode *adjusted_mode =
12610                 &crtc_state->hw.adjusted_mode;
12611         u16 linetime_wm;
12612
12613         if (!crtc_state->hw.enable)
12614                 return 0;
12615
12616         linetime_wm = DIV_ROUND_UP(adjusted_mode->crtc_htotal * 1000 * 8,
12617                                    crtc_state->pixel_rate);
12618
12619         /* Display WA #1135: BXT:ALL GLK:ALL */
12620         if (IS_GEN9_LP(dev_priv) && dev_priv->ipc_enabled)
12621                 linetime_wm /= 2;
12622
12623         return linetime_wm;
12624 }
12625
12626 static int hsw_compute_linetime_wm(struct intel_atomic_state *state,
12627                                    struct intel_crtc *crtc)
12628 {
12629         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12630         struct intel_crtc_state *crtc_state =
12631                 intel_atomic_get_new_crtc_state(state, crtc);
12632         const struct intel_cdclk_state *cdclk_state;
12633
12634         if (INTEL_GEN(dev_priv) >= 9)
12635                 crtc_state->linetime = skl_linetime_wm(crtc_state);
12636         else
12637                 crtc_state->linetime = hsw_linetime_wm(crtc_state);
12638
12639         if (!hsw_crtc_supports_ips(crtc))
12640                 return 0;
12641
12642         cdclk_state = intel_atomic_get_cdclk_state(state);
12643         if (IS_ERR(cdclk_state))
12644                 return PTR_ERR(cdclk_state);
12645
12646         crtc_state->ips_linetime = hsw_ips_linetime_wm(crtc_state,
12647                                                        cdclk_state);
12648
12649         return 0;
12650 }
12651
12652 static int intel_crtc_atomic_check(struct intel_atomic_state *state,
12653                                    struct intel_crtc *crtc)
12654 {
12655         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12656         struct intel_crtc_state *crtc_state =
12657                 intel_atomic_get_new_crtc_state(state, crtc);
12658         bool mode_changed = needs_modeset(crtc_state);
12659         int ret;
12660
12661         if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv) &&
12662             mode_changed && !crtc_state->hw.active)
12663                 crtc_state->update_wm_post = true;
12664
12665         if (mode_changed && crtc_state->hw.enable &&
12666             dev_priv->display.crtc_compute_clock &&
12667             !drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll)) {
12668                 ret = dev_priv->display.crtc_compute_clock(crtc, crtc_state);
12669                 if (ret)
12670                         return ret;
12671         }
12672
12673         /*
12674          * May need to update pipe gamma enable bits
12675          * when C8 planes are getting enabled/disabled.
12676          */
12677         if (c8_planes_changed(crtc_state))
12678                 crtc_state->uapi.color_mgmt_changed = true;
12679
12680         if (mode_changed || crtc_state->update_pipe ||
12681             crtc_state->uapi.color_mgmt_changed) {
12682                 ret = intel_color_check(crtc_state);
12683                 if (ret)
12684                         return ret;
12685         }
12686
12687         if (dev_priv->display.compute_pipe_wm) {
12688                 ret = dev_priv->display.compute_pipe_wm(crtc_state);
12689                 if (ret) {
12690                         drm_dbg_kms(&dev_priv->drm,
12691                                     "Target pipe watermarks are invalid\n");
12692                         return ret;
12693                 }
12694         }
12695
12696         if (dev_priv->display.compute_intermediate_wm) {
12697                 if (drm_WARN_ON(&dev_priv->drm,
12698                                 !dev_priv->display.compute_pipe_wm))
12699                         return 0;
12700
12701                 /*
12702                  * Calculate 'intermediate' watermarks that satisfy both the
12703                  * old state and the new state.  We can program these
12704                  * immediately.
12705                  */
12706                 ret = dev_priv->display.compute_intermediate_wm(crtc_state);
12707                 if (ret) {
12708                         drm_dbg_kms(&dev_priv->drm,
12709                                     "No valid intermediate pipe watermarks are possible\n");
12710                         return ret;
12711                 }
12712         }
12713
12714         if (INTEL_GEN(dev_priv) >= 9) {
12715                 if (mode_changed || crtc_state->update_pipe) {
12716                         ret = skl_update_scaler_crtc(crtc_state);
12717                         if (ret)
12718                                 return ret;
12719                 }
12720
12721                 ret = intel_atomic_setup_scalers(dev_priv, crtc, crtc_state);
12722                 if (ret)
12723                         return ret;
12724         }
12725
12726         if (HAS_IPS(dev_priv)) {
12727                 ret = hsw_compute_ips_config(crtc_state);
12728                 if (ret)
12729                         return ret;
12730         }
12731
12732         if (INTEL_GEN(dev_priv) >= 9 ||
12733             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
12734                 ret = hsw_compute_linetime_wm(state, crtc);
12735                 if (ret)
12736                         return ret;
12737
12738         }
12739
12740         return 0;
12741 }
12742
12743 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
12744 {
12745         struct intel_connector *connector;
12746         struct drm_connector_list_iter conn_iter;
12747
12748         drm_connector_list_iter_begin(dev, &conn_iter);
12749         for_each_intel_connector_iter(connector, &conn_iter) {
12750                 if (connector->base.state->crtc)
12751                         drm_connector_put(&connector->base);
12752
12753                 if (connector->base.encoder) {
12754                         connector->base.state->best_encoder =
12755                                 connector->base.encoder;
12756                         connector->base.state->crtc =
12757                                 connector->base.encoder->crtc;
12758
12759                         drm_connector_get(&connector->base);
12760                 } else {
12761                         connector->base.state->best_encoder = NULL;
12762                         connector->base.state->crtc = NULL;
12763                 }
12764         }
12765         drm_connector_list_iter_end(&conn_iter);
12766 }
12767
12768 static int
12769 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
12770                       struct intel_crtc_state *pipe_config)
12771 {
12772         struct drm_connector *connector = conn_state->connector;
12773         struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
12774         const struct drm_display_info *info = &connector->display_info;
12775         int bpp;
12776
12777         switch (conn_state->max_bpc) {
12778         case 6 ... 7:
12779                 bpp = 6 * 3;
12780                 break;
12781         case 8 ... 9:
12782                 bpp = 8 * 3;
12783                 break;
12784         case 10 ... 11:
12785                 bpp = 10 * 3;
12786                 break;
12787         case 12:
12788                 bpp = 12 * 3;
12789                 break;
12790         default:
12791                 return -EINVAL;
12792         }
12793
12794         if (bpp < pipe_config->pipe_bpp) {
12795                 drm_dbg_kms(&i915->drm,
12796                             "[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
12797                             "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
12798                             connector->base.id, connector->name,
12799                             bpp, 3 * info->bpc,
12800                             3 * conn_state->max_requested_bpc,
12801                             pipe_config->pipe_bpp);
12802
12803                 pipe_config->pipe_bpp = bpp;
12804         }
12805
12806         return 0;
12807 }
12808
12809 static int
12810 compute_baseline_pipe_bpp(struct intel_crtc *crtc,
12811                           struct intel_crtc_state *pipe_config)
12812 {
12813         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12814         struct drm_atomic_state *state = pipe_config->uapi.state;
12815         struct drm_connector *connector;
12816         struct drm_connector_state *connector_state;
12817         int bpp, i;
12818
12819         if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
12820             IS_CHERRYVIEW(dev_priv)))
12821                 bpp = 10*3;
12822         else if (INTEL_GEN(dev_priv) >= 5)
12823                 bpp = 12*3;
12824         else
12825                 bpp = 8*3;
12826
12827         pipe_config->pipe_bpp = bpp;
12828
12829         /* Clamp display bpp to connector max bpp */
12830         for_each_new_connector_in_state(state, connector, connector_state, i) {
12831                 int ret;
12832
12833                 if (connector_state->crtc != &crtc->base)
12834                         continue;
12835
12836                 ret = compute_sink_pipe_bpp(connector_state, pipe_config);
12837                 if (ret)
12838                         return ret;
12839         }
12840
12841         return 0;
12842 }
12843
12844 static void intel_dump_crtc_timings(struct drm_i915_private *i915,
12845                                     const struct drm_display_mode *mode)
12846 {
12847         drm_dbg_kms(&i915->drm, "crtc timings: %d %d %d %d %d %d %d %d %d, "
12848                     "type: 0x%x flags: 0x%x\n",
12849                     mode->crtc_clock,
12850                     mode->crtc_hdisplay, mode->crtc_hsync_start,
12851                     mode->crtc_hsync_end, mode->crtc_htotal,
12852                     mode->crtc_vdisplay, mode->crtc_vsync_start,
12853                     mode->crtc_vsync_end, mode->crtc_vtotal,
12854                     mode->type, mode->flags);
12855 }
12856
12857 static void
12858 intel_dump_m_n_config(const struct intel_crtc_state *pipe_config,
12859                       const char *id, unsigned int lane_count,
12860                       const struct intel_link_m_n *m_n)
12861 {
12862         struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
12863
12864         drm_dbg_kms(&i915->drm,
12865                     "%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
12866                     id, lane_count,
12867                     m_n->gmch_m, m_n->gmch_n,
12868                     m_n->link_m, m_n->link_n, m_n->tu);
12869 }
12870
12871 static void
12872 intel_dump_infoframe(struct drm_i915_private *dev_priv,
12873                      const union hdmi_infoframe *frame)
12874 {
12875         if (!drm_debug_enabled(DRM_UT_KMS))
12876                 return;
12877
12878         hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame);
12879 }
12880
12881 static void
12882 intel_dump_dp_vsc_sdp(struct drm_i915_private *dev_priv,
12883                       const struct drm_dp_vsc_sdp *vsc)
12884 {
12885         if (!drm_debug_enabled(DRM_UT_KMS))
12886                 return;
12887
12888         drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, vsc);
12889 }
12890
12891 #define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
12892
12893 static const char * const output_type_str[] = {
12894         OUTPUT_TYPE(UNUSED),
12895         OUTPUT_TYPE(ANALOG),
12896         OUTPUT_TYPE(DVO),
12897         OUTPUT_TYPE(SDVO),
12898         OUTPUT_TYPE(LVDS),
12899         OUTPUT_TYPE(TVOUT),
12900         OUTPUT_TYPE(HDMI),
12901         OUTPUT_TYPE(DP),
12902         OUTPUT_TYPE(EDP),
12903         OUTPUT_TYPE(DSI),
12904         OUTPUT_TYPE(DDI),
12905         OUTPUT_TYPE(DP_MST),
12906 };
12907
12908 #undef OUTPUT_TYPE
12909
12910 static void snprintf_output_types(char *buf, size_t len,
12911                                   unsigned int output_types)
12912 {
12913         char *str = buf;
12914         int i;
12915
12916         str[0] = '\0';
12917
12918         for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
12919                 int r;
12920
12921                 if ((output_types & BIT(i)) == 0)
12922                         continue;
12923
12924                 r = snprintf(str, len, "%s%s",
12925                              str != buf ? "," : "", output_type_str[i]);
12926                 if (r >= len)
12927                         break;
12928                 str += r;
12929                 len -= r;
12930
12931                 output_types &= ~BIT(i);
12932         }
12933
12934         WARN_ON_ONCE(output_types != 0);
12935 }
12936
12937 static const char * const output_format_str[] = {
12938         [INTEL_OUTPUT_FORMAT_INVALID] = "Invalid",
12939         [INTEL_OUTPUT_FORMAT_RGB] = "RGB",
12940         [INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
12941         [INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
12942 };
12943
12944 static const char *output_formats(enum intel_output_format format)
12945 {
12946         if (format >= ARRAY_SIZE(output_format_str))
12947                 format = INTEL_OUTPUT_FORMAT_INVALID;
12948         return output_format_str[format];
12949 }
12950
12951 static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
12952 {
12953         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
12954         struct drm_i915_private *i915 = to_i915(plane->base.dev);
12955         const struct drm_framebuffer *fb = plane_state->hw.fb;
12956         struct drm_format_name_buf format_name;
12957
12958         if (!fb) {
12959                 drm_dbg_kms(&i915->drm,
12960                             "[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
12961                             plane->base.base.id, plane->base.name,
12962                             yesno(plane_state->uapi.visible));
12963                 return;
12964         }
12965
12966         drm_dbg_kms(&i915->drm,
12967                     "[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %s, visible: %s\n",
12968                     plane->base.base.id, plane->base.name,
12969                     fb->base.id, fb->width, fb->height,
12970                     drm_get_format_name(fb->format->format, &format_name),
12971                     yesno(plane_state->uapi.visible));
12972         drm_dbg_kms(&i915->drm, "\trotation: 0x%x, scaler: %d\n",
12973                     plane_state->hw.rotation, plane_state->scaler_id);
12974         if (plane_state->uapi.visible)
12975                 drm_dbg_kms(&i915->drm,
12976                             "\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
12977                             DRM_RECT_FP_ARG(&plane_state->uapi.src),
12978                             DRM_RECT_ARG(&plane_state->uapi.dst));
12979 }
12980
12981 static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
12982                                    struct intel_atomic_state *state,
12983                                    const char *context)
12984 {
12985         struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
12986         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12987         const struct intel_plane_state *plane_state;
12988         struct intel_plane *plane;
12989         char buf[64];
12990         int i;
12991
12992         drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] enable: %s %s\n",
12993                     crtc->base.base.id, crtc->base.name,
12994                     yesno(pipe_config->hw.enable), context);
12995
12996         if (!pipe_config->hw.enable)
12997                 goto dump_planes;
12998
12999         snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
13000         drm_dbg_kms(&dev_priv->drm,
13001                     "active: %s, output_types: %s (0x%x), output format: %s\n",
13002                     yesno(pipe_config->hw.active),
13003                     buf, pipe_config->output_types,
13004                     output_formats(pipe_config->output_format));
13005
13006         drm_dbg_kms(&dev_priv->drm,
13007                     "cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
13008                     transcoder_name(pipe_config->cpu_transcoder),
13009                     pipe_config->pipe_bpp, pipe_config->dither);
13010
13011         drm_dbg_kms(&dev_priv->drm,
13012                     "port sync: master transcoder: %s, slave transcoder bitmask = 0x%x\n",
13013                     transcoder_name(pipe_config->master_transcoder),
13014                     pipe_config->sync_mode_slaves_mask);
13015
13016         if (pipe_config->has_pch_encoder)
13017                 intel_dump_m_n_config(pipe_config, "fdi",
13018                                       pipe_config->fdi_lanes,
13019                                       &pipe_config->fdi_m_n);
13020
13021         if (intel_crtc_has_dp_encoder(pipe_config)) {
13022                 intel_dump_m_n_config(pipe_config, "dp m_n",
13023                                 pipe_config->lane_count, &pipe_config->dp_m_n);
13024                 if (pipe_config->has_drrs)
13025                         intel_dump_m_n_config(pipe_config, "dp m2_n2",
13026                                               pipe_config->lane_count,
13027                                               &pipe_config->dp_m2_n2);
13028         }
13029
13030         drm_dbg_kms(&dev_priv->drm,
13031                     "audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
13032                     pipe_config->has_audio, pipe_config->has_infoframe,
13033                     pipe_config->infoframes.enable);
13034
13035         if (pipe_config->infoframes.enable &
13036             intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL))
13037                 drm_dbg_kms(&dev_priv->drm, "GCP: 0x%x\n",
13038                             pipe_config->infoframes.gcp);
13039         if (pipe_config->infoframes.enable &
13040             intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
13041                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi);
13042         if (pipe_config->infoframes.enable &
13043             intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD))
13044                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.spd);
13045         if (pipe_config->infoframes.enable &
13046             intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
13047                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi);
13048         if (pipe_config->infoframes.enable &
13049             intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_DRM))
13050                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm);
13051         if (pipe_config->infoframes.enable &
13052             intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA))
13053                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm);
13054         if (pipe_config->infoframes.enable &
13055             intel_hdmi_infoframe_enable(DP_SDP_VSC))
13056                 intel_dump_dp_vsc_sdp(dev_priv, &pipe_config->infoframes.vsc);
13057
13058         drm_dbg_kms(&dev_priv->drm, "requested mode:\n");
13059         drm_mode_debug_printmodeline(&pipe_config->hw.mode);
13060         drm_dbg_kms(&dev_priv->drm, "adjusted mode:\n");
13061         drm_mode_debug_printmodeline(&pipe_config->hw.adjusted_mode);
13062         intel_dump_crtc_timings(dev_priv, &pipe_config->hw.adjusted_mode);
13063         drm_dbg_kms(&dev_priv->drm,
13064                     "port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
13065                     pipe_config->port_clock,
13066                     pipe_config->pipe_src_w, pipe_config->pipe_src_h,
13067                     pipe_config->pixel_rate);
13068
13069         drm_dbg_kms(&dev_priv->drm, "linetime: %d, ips linetime: %d\n",
13070                     pipe_config->linetime, pipe_config->ips_linetime);
13071
13072         if (INTEL_GEN(dev_priv) >= 9)
13073                 drm_dbg_kms(&dev_priv->drm,
13074                             "num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
13075                             crtc->num_scalers,
13076                             pipe_config->scaler_state.scaler_users,
13077                             pipe_config->scaler_state.scaler_id);
13078
13079         if (HAS_GMCH(dev_priv))
13080                 drm_dbg_kms(&dev_priv->drm,
13081                             "gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
13082                             pipe_config->gmch_pfit.control,
13083                             pipe_config->gmch_pfit.pgm_ratios,
13084                             pipe_config->gmch_pfit.lvds_border_bits);
13085         else
13086                 drm_dbg_kms(&dev_priv->drm,
13087                             "pch pfit: " DRM_RECT_FMT ", %s, force thru: %s\n",
13088                             DRM_RECT_ARG(&pipe_config->pch_pfit.dst),
13089                             enableddisabled(pipe_config->pch_pfit.enabled),
13090                             yesno(pipe_config->pch_pfit.force_thru));
13091
13092         drm_dbg_kms(&dev_priv->drm, "ips: %i, double wide: %i\n",
13093                     pipe_config->ips_enabled, pipe_config->double_wide);
13094
13095         intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
13096
13097         if (IS_CHERRYVIEW(dev_priv))
13098                 drm_dbg_kms(&dev_priv->drm,
13099                             "cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
13100                             pipe_config->cgm_mode, pipe_config->gamma_mode,
13101                             pipe_config->gamma_enable, pipe_config->csc_enable);
13102         else
13103                 drm_dbg_kms(&dev_priv->drm,
13104                             "csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
13105                             pipe_config->csc_mode, pipe_config->gamma_mode,
13106                             pipe_config->gamma_enable, pipe_config->csc_enable);
13107
13108         drm_dbg_kms(&dev_priv->drm, "MST master transcoder: %s\n",
13109                     transcoder_name(pipe_config->mst_master_transcoder));
13110
13111 dump_planes:
13112         if (!state)
13113                 return;
13114
13115         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
13116                 if (plane->pipe == crtc->pipe)
13117                         intel_dump_plane_state(plane_state);
13118         }
13119 }
13120
13121 static bool check_digital_port_conflicts(struct intel_atomic_state *state)
13122 {
13123         struct drm_device *dev = state->base.dev;
13124         struct drm_connector *connector;
13125         struct drm_connector_list_iter conn_iter;
13126         unsigned int used_ports = 0;
13127         unsigned int used_mst_ports = 0;
13128         bool ret = true;
13129
13130         /*
13131          * We're going to peek into connector->state,
13132          * hence connection_mutex must be held.
13133          */
13134         drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex);
13135
13136         /*
13137          * Walk the connector list instead of the encoder
13138          * list to detect the problem on ddi platforms
13139          * where there's just one encoder per digital port.
13140          */
13141         drm_connector_list_iter_begin(dev, &conn_iter);
13142         drm_for_each_connector_iter(connector, &conn_iter) {
13143                 struct drm_connector_state *connector_state;
13144                 struct intel_encoder *encoder;
13145
13146                 connector_state =
13147                         drm_atomic_get_new_connector_state(&state->base,
13148                                                            connector);
13149                 if (!connector_state)
13150                         connector_state = connector->state;
13151
13152                 if (!connector_state->best_encoder)
13153                         continue;
13154
13155                 encoder = to_intel_encoder(connector_state->best_encoder);
13156
13157                 drm_WARN_ON(dev, !connector_state->crtc);
13158
13159                 switch (encoder->type) {
13160                 case INTEL_OUTPUT_DDI:
13161                         if (drm_WARN_ON(dev, !HAS_DDI(to_i915(dev))))
13162                                 break;
13163                         /* else, fall through */
13164                 case INTEL_OUTPUT_DP:
13165                 case INTEL_OUTPUT_HDMI:
13166                 case INTEL_OUTPUT_EDP:
13167                         /* the same port mustn't appear more than once */
13168                         if (used_ports & BIT(encoder->port))
13169                                 ret = false;
13170
13171                         used_ports |= BIT(encoder->port);
13172                         break;
13173                 case INTEL_OUTPUT_DP_MST:
13174                         used_mst_ports |=
13175                                 1 << encoder->port;
13176                         break;
13177                 default:
13178                         break;
13179                 }
13180         }
13181         drm_connector_list_iter_end(&conn_iter);
13182
13183         /* can't mix MST and SST/HDMI on the same port */
13184         if (used_ports & used_mst_ports)
13185                 return false;
13186
13187         return ret;
13188 }
13189
13190 static void
13191 intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_crtc_state *crtc_state)
13192 {
13193         intel_crtc_copy_color_blobs(crtc_state);
13194 }
13195
13196 static void
13197 intel_crtc_copy_uapi_to_hw_state(struct intel_crtc_state *crtc_state)
13198 {
13199         crtc_state->hw.enable = crtc_state->uapi.enable;
13200         crtc_state->hw.active = crtc_state->uapi.active;
13201         crtc_state->hw.mode = crtc_state->uapi.mode;
13202         crtc_state->hw.adjusted_mode = crtc_state->uapi.adjusted_mode;
13203         intel_crtc_copy_uapi_to_hw_state_nomodeset(crtc_state);
13204 }
13205
13206 static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state)
13207 {
13208         crtc_state->uapi.enable = crtc_state->hw.enable;
13209         crtc_state->uapi.active = crtc_state->hw.active;
13210         drm_WARN_ON(crtc_state->uapi.crtc->dev,
13211                     drm_atomic_set_mode_for_crtc(&crtc_state->uapi, &crtc_state->hw.mode) < 0);
13212
13213         crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode;
13214
13215         /* copy color blobs to uapi */
13216         drm_property_replace_blob(&crtc_state->uapi.degamma_lut,
13217                                   crtc_state->hw.degamma_lut);
13218         drm_property_replace_blob(&crtc_state->uapi.gamma_lut,
13219                                   crtc_state->hw.gamma_lut);
13220         drm_property_replace_blob(&crtc_state->uapi.ctm,
13221                                   crtc_state->hw.ctm);
13222 }
13223
13224 static int
13225 intel_crtc_prepare_cleared_state(struct intel_crtc_state *crtc_state)
13226 {
13227         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
13228         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13229         struct intel_crtc_state *saved_state;
13230
13231         saved_state = intel_crtc_state_alloc(crtc);
13232         if (!saved_state)
13233                 return -ENOMEM;
13234
13235         /* free the old crtc_state->hw members */
13236         intel_crtc_free_hw_state(crtc_state);
13237
13238         /* FIXME: before the switch to atomic started, a new pipe_config was
13239          * kzalloc'd. Code that depends on any field being zero should be
13240          * fixed, so that the crtc_state can be safely duplicated. For now,
13241          * only fields that are know to not cause problems are preserved. */
13242
13243         saved_state->uapi = crtc_state->uapi;
13244         saved_state->scaler_state = crtc_state->scaler_state;
13245         saved_state->shared_dpll = crtc_state->shared_dpll;
13246         saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
13247         memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls,
13248                sizeof(saved_state->icl_port_dplls));
13249         saved_state->crc_enabled = crtc_state->crc_enabled;
13250         if (IS_G4X(dev_priv) ||
13251             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
13252                 saved_state->wm = crtc_state->wm;
13253
13254         memcpy(crtc_state, saved_state, sizeof(*crtc_state));
13255         kfree(saved_state);
13256
13257         intel_crtc_copy_uapi_to_hw_state(crtc_state);
13258
13259         return 0;
13260 }
13261
13262 static int
13263 intel_modeset_pipe_config(struct intel_crtc_state *pipe_config)
13264 {
13265         struct drm_crtc *crtc = pipe_config->uapi.crtc;
13266         struct drm_atomic_state *state = pipe_config->uapi.state;
13267         struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
13268         struct drm_connector *connector;
13269         struct drm_connector_state *connector_state;
13270         int base_bpp, ret, i;
13271         bool retry = true;
13272
13273         pipe_config->cpu_transcoder =
13274                 (enum transcoder) to_intel_crtc(crtc)->pipe;
13275
13276         /*
13277          * Sanitize sync polarity flags based on requested ones. If neither
13278          * positive or negative polarity is requested, treat this as meaning
13279          * negative polarity.
13280          */
13281         if (!(pipe_config->hw.adjusted_mode.flags &
13282               (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
13283                 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
13284
13285         if (!(pipe_config->hw.adjusted_mode.flags &
13286               (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
13287                 pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
13288
13289         ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
13290                                         pipe_config);
13291         if (ret)
13292                 return ret;
13293
13294         base_bpp = pipe_config->pipe_bpp;
13295
13296         /*
13297          * Determine the real pipe dimensions. Note that stereo modes can
13298          * increase the actual pipe size due to the frame doubling and
13299          * insertion of additional space for blanks between the frame. This
13300          * is stored in the crtc timings. We use the requested mode to do this
13301          * computation to clearly distinguish it from the adjusted mode, which
13302          * can be changed by the connectors in the below retry loop.
13303          */
13304         drm_mode_get_hv_timing(&pipe_config->hw.mode,
13305                                &pipe_config->pipe_src_w,
13306                                &pipe_config->pipe_src_h);
13307
13308         for_each_new_connector_in_state(state, connector, connector_state, i) {
13309                 struct intel_encoder *encoder =
13310                         to_intel_encoder(connector_state->best_encoder);
13311
13312                 if (connector_state->crtc != crtc)
13313                         continue;
13314
13315                 if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
13316                         drm_dbg_kms(&i915->drm,
13317                                     "rejecting invalid cloning configuration\n");
13318                         return -EINVAL;
13319                 }
13320
13321                 /*
13322                  * Determine output_types before calling the .compute_config()
13323                  * hooks so that the hooks can use this information safely.
13324                  */
13325                 if (encoder->compute_output_type)
13326                         pipe_config->output_types |=
13327                                 BIT(encoder->compute_output_type(encoder, pipe_config,
13328                                                                  connector_state));
13329                 else
13330                         pipe_config->output_types |= BIT(encoder->type);
13331         }
13332
13333 encoder_retry:
13334         /* Ensure the port clock defaults are reset when retrying. */
13335         pipe_config->port_clock = 0;
13336         pipe_config->pixel_multiplier = 1;
13337
13338         /* Fill in default crtc timings, allow encoders to overwrite them. */
13339         drm_mode_set_crtcinfo(&pipe_config->hw.adjusted_mode,
13340                               CRTC_STEREO_DOUBLE);
13341
13342         /* Pass our mode to the connectors and the CRTC to give them a chance to
13343          * adjust it according to limitations or connector properties, and also
13344          * a chance to reject the mode entirely.
13345          */
13346         for_each_new_connector_in_state(state, connector, connector_state, i) {
13347                 struct intel_encoder *encoder =
13348                         to_intel_encoder(connector_state->best_encoder);
13349
13350                 if (connector_state->crtc != crtc)
13351                         continue;
13352
13353                 ret = encoder->compute_config(encoder, pipe_config,
13354                                               connector_state);
13355                 if (ret < 0) {
13356                         if (ret != -EDEADLK)
13357                                 drm_dbg_kms(&i915->drm,
13358                                             "Encoder config failure: %d\n",
13359                                             ret);
13360                         return ret;
13361                 }
13362         }
13363
13364         /* Set default port clock if not overwritten by the encoder. Needs to be
13365          * done afterwards in case the encoder adjusts the mode. */
13366         if (!pipe_config->port_clock)
13367                 pipe_config->port_clock = pipe_config->hw.adjusted_mode.crtc_clock
13368                         * pipe_config->pixel_multiplier;
13369
13370         ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
13371         if (ret == -EDEADLK)
13372                 return ret;
13373         if (ret < 0) {
13374                 drm_dbg_kms(&i915->drm, "CRTC fixup failed\n");
13375                 return ret;
13376         }
13377
13378         if (ret == RETRY) {
13379                 if (drm_WARN(&i915->drm, !retry,
13380                              "loop in pipe configuration computation\n"))
13381                         return -EINVAL;
13382
13383                 drm_dbg_kms(&i915->drm, "CRTC bw constrained, retrying\n");
13384                 retry = false;
13385                 goto encoder_retry;
13386         }
13387
13388         /* Dithering seems to not pass-through bits correctly when it should, so
13389          * only enable it on 6bpc panels and when its not a compliance
13390          * test requesting 6bpc video pattern.
13391          */
13392         pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
13393                 !pipe_config->dither_force_disable;
13394         drm_dbg_kms(&i915->drm,
13395                     "hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
13396                     base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
13397
13398         /*
13399          * Make drm_calc_timestamping_constants in
13400          * drm_atomic_helper_update_legacy_modeset_state() happy
13401          */
13402         pipe_config->uapi.adjusted_mode = pipe_config->hw.adjusted_mode;
13403
13404         return 0;
13405 }
13406
13407 static int
13408 intel_modeset_pipe_config_late(struct intel_crtc_state *crtc_state)
13409 {
13410         struct intel_atomic_state *state =
13411                 to_intel_atomic_state(crtc_state->uapi.state);
13412         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
13413         struct drm_connector_state *conn_state;
13414         struct drm_connector *connector;
13415         int i;
13416
13417         for_each_new_connector_in_state(&state->base, connector,
13418                                         conn_state, i) {
13419                 struct intel_encoder *encoder =
13420                         to_intel_encoder(conn_state->best_encoder);
13421                 int ret;
13422
13423                 if (conn_state->crtc != &crtc->base ||
13424                     !encoder->compute_config_late)
13425                         continue;
13426
13427                 ret = encoder->compute_config_late(encoder, crtc_state,
13428                                                    conn_state);
13429                 if (ret)
13430                         return ret;
13431         }
13432
13433         return 0;
13434 }
13435
13436 bool intel_fuzzy_clock_check(int clock1, int clock2)
13437 {
13438         int diff;
13439
13440         if (clock1 == clock2)
13441                 return true;
13442
13443         if (!clock1 || !clock2)
13444                 return false;
13445
13446         diff = abs(clock1 - clock2);
13447
13448         if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
13449                 return true;
13450
13451         return false;
13452 }
13453
13454 static bool
13455 intel_compare_m_n(unsigned int m, unsigned int n,
13456                   unsigned int m2, unsigned int n2,
13457                   bool exact)
13458 {
13459         if (m == m2 && n == n2)
13460                 return true;
13461
13462         if (exact || !m || !n || !m2 || !n2)
13463                 return false;
13464
13465         BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
13466
13467         if (n > n2) {
13468                 while (n > n2) {
13469                         m2 <<= 1;
13470                         n2 <<= 1;
13471                 }
13472         } else if (n < n2) {
13473                 while (n < n2) {
13474                         m <<= 1;
13475                         n <<= 1;
13476                 }
13477         }
13478
13479         if (n != n2)
13480                 return false;
13481
13482         return intel_fuzzy_clock_check(m, m2);
13483 }
13484
13485 static bool
13486 intel_compare_link_m_n(const struct intel_link_m_n *m_n,
13487                        const struct intel_link_m_n *m2_n2,
13488                        bool exact)
13489 {
13490         return m_n->tu == m2_n2->tu &&
13491                 intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
13492                                   m2_n2->gmch_m, m2_n2->gmch_n, exact) &&
13493                 intel_compare_m_n(m_n->link_m, m_n->link_n,
13494                                   m2_n2->link_m, m2_n2->link_n, exact);
13495 }
13496
13497 static bool
13498 intel_compare_infoframe(const union hdmi_infoframe *a,
13499                         const union hdmi_infoframe *b)
13500 {
13501         return memcmp(a, b, sizeof(*a)) == 0;
13502 }
13503
13504 static bool
13505 intel_compare_dp_vsc_sdp(const struct drm_dp_vsc_sdp *a,
13506                          const struct drm_dp_vsc_sdp *b)
13507 {
13508         return memcmp(a, b, sizeof(*a)) == 0;
13509 }
13510
13511 static void
13512 pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
13513                                bool fastset, const char *name,
13514                                const union hdmi_infoframe *a,
13515                                const union hdmi_infoframe *b)
13516 {
13517         if (fastset) {
13518                 if (!drm_debug_enabled(DRM_UT_KMS))
13519                         return;
13520
13521                 drm_dbg_kms(&dev_priv->drm,
13522                             "fastset mismatch in %s infoframe\n", name);
13523                 drm_dbg_kms(&dev_priv->drm, "expected:\n");
13524                 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
13525                 drm_dbg_kms(&dev_priv->drm, "found:\n");
13526                 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
13527         } else {
13528                 drm_err(&dev_priv->drm, "mismatch in %s infoframe\n", name);
13529                 drm_err(&dev_priv->drm, "expected:\n");
13530                 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
13531                 drm_err(&dev_priv->drm, "found:\n");
13532                 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
13533         }
13534 }
13535
13536 static void
13537 pipe_config_dp_vsc_sdp_mismatch(struct drm_i915_private *dev_priv,
13538                                 bool fastset, const char *name,
13539                                 const struct drm_dp_vsc_sdp *a,
13540                                 const struct drm_dp_vsc_sdp *b)
13541 {
13542         if (fastset) {
13543                 if (!drm_debug_enabled(DRM_UT_KMS))
13544                         return;
13545
13546                 drm_dbg_kms(&dev_priv->drm,
13547                             "fastset mismatch in %s dp sdp\n", name);
13548                 drm_dbg_kms(&dev_priv->drm, "expected:\n");
13549                 drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, a);
13550                 drm_dbg_kms(&dev_priv->drm, "found:\n");
13551                 drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, b);
13552         } else {
13553                 drm_err(&dev_priv->drm, "mismatch in %s dp sdp\n", name);
13554                 drm_err(&dev_priv->drm, "expected:\n");
13555                 drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, a);
13556                 drm_err(&dev_priv->drm, "found:\n");
13557                 drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, b);
13558         }
13559 }
13560
13561 static void __printf(4, 5)
13562 pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
13563                      const char *name, const char *format, ...)
13564 {
13565         struct drm_i915_private *i915 = to_i915(crtc->base.dev);
13566         struct va_format vaf;
13567         va_list args;
13568
13569         va_start(args, format);
13570         vaf.fmt = format;
13571         vaf.va = &args;
13572
13573         if (fastset)
13574                 drm_dbg_kms(&i915->drm,
13575                             "[CRTC:%d:%s] fastset mismatch in %s %pV\n",
13576                             crtc->base.base.id, crtc->base.name, name, &vaf);
13577         else
13578                 drm_err(&i915->drm, "[CRTC:%d:%s] mismatch in %s %pV\n",
13579                         crtc->base.base.id, crtc->base.name, name, &vaf);
13580
13581         va_end(args);
13582 }
13583
13584 static bool fastboot_enabled(struct drm_i915_private *dev_priv)
13585 {
13586         if (dev_priv->params.fastboot != -1)
13587                 return dev_priv->params.fastboot;
13588
13589         /* Enable fastboot by default on Skylake and newer */
13590         if (INTEL_GEN(dev_priv) >= 9)
13591                 return true;
13592
13593         /* Enable fastboot by default on VLV and CHV */
13594         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
13595                 return true;
13596
13597         /* Disabled by default on all others */
13598         return false;
13599 }
13600
13601 static bool
13602 intel_pipe_config_compare(const struct intel_crtc_state *current_config,
13603                           const struct intel_crtc_state *pipe_config,
13604                           bool fastset)
13605 {
13606         struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev);
13607         struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
13608         bool ret = true;
13609         u32 bp_gamma = 0;
13610         bool fixup_inherited = fastset &&
13611                 current_config->inherited && !pipe_config->inherited;
13612
13613         if (fixup_inherited && !fastboot_enabled(dev_priv)) {
13614                 drm_dbg_kms(&dev_priv->drm,
13615                             "initial modeset and fastboot not set\n");
13616                 ret = false;
13617         }
13618
13619 #define PIPE_CONF_CHECK_X(name) do { \
13620         if (current_config->name != pipe_config->name) { \
13621                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13622                                      "(expected 0x%08x, found 0x%08x)", \
13623                                      current_config->name, \
13624                                      pipe_config->name); \
13625                 ret = false; \
13626         } \
13627 } while (0)
13628
13629 #define PIPE_CONF_CHECK_I(name) do { \
13630         if (current_config->name != pipe_config->name) { \
13631                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13632                                      "(expected %i, found %i)", \
13633                                      current_config->name, \
13634                                      pipe_config->name); \
13635                 ret = false; \
13636         } \
13637 } while (0)
13638
13639 #define PIPE_CONF_CHECK_BOOL(name) do { \
13640         if (current_config->name != pipe_config->name) { \
13641                 pipe_config_mismatch(fastset, crtc,  __stringify(name), \
13642                                      "(expected %s, found %s)", \
13643                                      yesno(current_config->name), \
13644                                      yesno(pipe_config->name)); \
13645                 ret = false; \
13646         } \
13647 } while (0)
13648
13649 /*
13650  * Checks state where we only read out the enabling, but not the entire
13651  * state itself (like full infoframes or ELD for audio). These states
13652  * require a full modeset on bootup to fix up.
13653  */
13654 #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
13655         if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
13656                 PIPE_CONF_CHECK_BOOL(name); \
13657         } else { \
13658                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13659                                      "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)", \
13660                                      yesno(current_config->name), \
13661                                      yesno(pipe_config->name)); \
13662                 ret = false; \
13663         } \
13664 } while (0)
13665
13666 #define PIPE_CONF_CHECK_P(name) do { \
13667         if (current_config->name != pipe_config->name) { \
13668                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13669                                      "(expected %p, found %p)", \
13670                                      current_config->name, \
13671                                      pipe_config->name); \
13672                 ret = false; \
13673         } \
13674 } while (0)
13675
13676 #define PIPE_CONF_CHECK_M_N(name) do { \
13677         if (!intel_compare_link_m_n(&current_config->name, \
13678                                     &pipe_config->name,\
13679                                     !fastset)) { \
13680                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13681                                      "(expected tu %i gmch %i/%i link %i/%i, " \
13682                                      "found tu %i, gmch %i/%i link %i/%i)", \
13683                                      current_config->name.tu, \
13684                                      current_config->name.gmch_m, \
13685                                      current_config->name.gmch_n, \
13686                                      current_config->name.link_m, \
13687                                      current_config->name.link_n, \
13688                                      pipe_config->name.tu, \
13689                                      pipe_config->name.gmch_m, \
13690                                      pipe_config->name.gmch_n, \
13691                                      pipe_config->name.link_m, \
13692                                      pipe_config->name.link_n); \
13693                 ret = false; \
13694         } \
13695 } while (0)
13696
13697 /* This is required for BDW+ where there is only one set of registers for
13698  * switching between high and low RR.
13699  * This macro can be used whenever a comparison has to be made between one
13700  * hw state and multiple sw state variables.
13701  */
13702 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
13703         if (!intel_compare_link_m_n(&current_config->name, \
13704                                     &pipe_config->name, !fastset) && \
13705             !intel_compare_link_m_n(&current_config->alt_name, \
13706                                     &pipe_config->name, !fastset)) { \
13707                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13708                                      "(expected tu %i gmch %i/%i link %i/%i, " \
13709                                      "or tu %i gmch %i/%i link %i/%i, " \
13710                                      "found tu %i, gmch %i/%i link %i/%i)", \
13711                                      current_config->name.tu, \
13712                                      current_config->name.gmch_m, \
13713                                      current_config->name.gmch_n, \
13714                                      current_config->name.link_m, \
13715                                      current_config->name.link_n, \
13716                                      current_config->alt_name.tu, \
13717                                      current_config->alt_name.gmch_m, \
13718                                      current_config->alt_name.gmch_n, \
13719                                      current_config->alt_name.link_m, \
13720                                      current_config->alt_name.link_n, \
13721                                      pipe_config->name.tu, \
13722                                      pipe_config->name.gmch_m, \
13723                                      pipe_config->name.gmch_n, \
13724                                      pipe_config->name.link_m, \
13725                                      pipe_config->name.link_n); \
13726                 ret = false; \
13727         } \
13728 } while (0)
13729
13730 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
13731         if ((current_config->name ^ pipe_config->name) & (mask)) { \
13732                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13733                                      "(%x) (expected %i, found %i)", \
13734                                      (mask), \
13735                                      current_config->name & (mask), \
13736                                      pipe_config->name & (mask)); \
13737                 ret = false; \
13738         } \
13739 } while (0)
13740
13741 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
13742         if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
13743                 pipe_config_mismatch(fastset, crtc, __stringify(name), \
13744                                      "(expected %i, found %i)", \
13745                                      current_config->name, \
13746                                      pipe_config->name); \
13747                 ret = false; \
13748         } \
13749 } while (0)
13750
13751 #define PIPE_CONF_CHECK_INFOFRAME(name) do { \
13752         if (!intel_compare_infoframe(&current_config->infoframes.name, \
13753                                      &pipe_config->infoframes.name)) { \
13754                 pipe_config_infoframe_mismatch(dev_priv, fastset, __stringify(name), \
13755                                                &current_config->infoframes.name, \
13756                                                &pipe_config->infoframes.name); \
13757                 ret = false; \
13758         } \
13759 } while (0)
13760
13761 #define PIPE_CONF_CHECK_DP_VSC_SDP(name) do { \
13762         if (!current_config->has_psr && !pipe_config->has_psr && \
13763             !intel_compare_dp_vsc_sdp(&current_config->infoframes.name, \
13764                                       &pipe_config->infoframes.name)) { \
13765                 pipe_config_dp_vsc_sdp_mismatch(dev_priv, fastset, __stringify(name), \
13766                                                 &current_config->infoframes.name, \
13767                                                 &pipe_config->infoframes.name); \
13768                 ret = false; \
13769         } \
13770 } while (0)
13771
13772 #define PIPE_CONF_CHECK_COLOR_LUT(name1, name2, bit_precision) do { \
13773         if (current_config->name1 != pipe_config->name1) { \
13774                 pipe_config_mismatch(fastset, crtc, __stringify(name1), \
13775                                 "(expected %i, found %i, won't compare lut values)", \
13776                                 current_config->name1, \
13777                                 pipe_config->name1); \
13778                 ret = false;\
13779         } else { \
13780                 if (!intel_color_lut_equal(current_config->name2, \
13781                                         pipe_config->name2, pipe_config->name1, \
13782                                         bit_precision)) { \
13783                         pipe_config_mismatch(fastset, crtc, __stringify(name2), \
13784                                         "hw_state doesn't match sw_state"); \
13785                         ret = false; \
13786                 } \
13787         } \
13788 } while (0)
13789
13790 #define PIPE_CONF_QUIRK(quirk) \
13791         ((current_config->quirks | pipe_config->quirks) & (quirk))
13792
13793         PIPE_CONF_CHECK_I(cpu_transcoder);
13794
13795         PIPE_CONF_CHECK_BOOL(has_pch_encoder);
13796         PIPE_CONF_CHECK_I(fdi_lanes);
13797         PIPE_CONF_CHECK_M_N(fdi_m_n);
13798
13799         PIPE_CONF_CHECK_I(lane_count);
13800         PIPE_CONF_CHECK_X(lane_lat_optim_mask);
13801
13802         if (INTEL_GEN(dev_priv) < 8) {
13803                 PIPE_CONF_CHECK_M_N(dp_m_n);
13804
13805                 if (current_config->has_drrs)
13806                         PIPE_CONF_CHECK_M_N(dp_m2_n2);
13807         } else
13808                 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
13809
13810         PIPE_CONF_CHECK_X(output_types);
13811
13812         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hdisplay);
13813         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_htotal);
13814         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_start);
13815         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_end);
13816         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_start);
13817         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_end);
13818
13819         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vdisplay);
13820         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vtotal);
13821         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_start);
13822         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_end);
13823         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_start);
13824         PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_end);
13825
13826         PIPE_CONF_CHECK_I(pixel_multiplier);
13827         PIPE_CONF_CHECK_I(output_format);
13828         PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
13829         if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
13830             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
13831                 PIPE_CONF_CHECK_BOOL(limited_color_range);
13832
13833         PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
13834         PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
13835         PIPE_CONF_CHECK_BOOL(has_infoframe);
13836         PIPE_CONF_CHECK_BOOL(fec_enable);
13837
13838         PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
13839
13840         PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
13841                               DRM_MODE_FLAG_INTERLACE);
13842
13843         if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
13844                 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
13845                                       DRM_MODE_FLAG_PHSYNC);
13846                 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
13847                                       DRM_MODE_FLAG_NHSYNC);
13848                 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
13849                                       DRM_MODE_FLAG_PVSYNC);
13850                 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
13851                                       DRM_MODE_FLAG_NVSYNC);
13852         }
13853
13854         PIPE_CONF_CHECK_X(gmch_pfit.control);
13855         /* pfit ratios are autocomputed by the hw on gen4+ */
13856         if (INTEL_GEN(dev_priv) < 4)
13857                 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
13858         PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
13859
13860         /*
13861          * Changing the EDP transcoder input mux
13862          * (A_ONOFF vs. A_ON) requires a full modeset.
13863          */
13864         PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru);
13865
13866         if (!fastset) {
13867                 PIPE_CONF_CHECK_I(pipe_src_w);
13868                 PIPE_CONF_CHECK_I(pipe_src_h);
13869
13870                 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
13871                 if (current_config->pch_pfit.enabled) {
13872                         PIPE_CONF_CHECK_I(pch_pfit.dst.x1);
13873                         PIPE_CONF_CHECK_I(pch_pfit.dst.y1);
13874                         PIPE_CONF_CHECK_I(pch_pfit.dst.x2);
13875                         PIPE_CONF_CHECK_I(pch_pfit.dst.y2);
13876                 }
13877
13878                 PIPE_CONF_CHECK_I(scaler_state.scaler_id);
13879                 PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
13880
13881                 PIPE_CONF_CHECK_X(gamma_mode);
13882                 if (IS_CHERRYVIEW(dev_priv))
13883                         PIPE_CONF_CHECK_X(cgm_mode);
13884                 else
13885                         PIPE_CONF_CHECK_X(csc_mode);
13886                 PIPE_CONF_CHECK_BOOL(gamma_enable);
13887                 PIPE_CONF_CHECK_BOOL(csc_enable);
13888
13889                 PIPE_CONF_CHECK_I(linetime);
13890                 PIPE_CONF_CHECK_I(ips_linetime);
13891
13892                 bp_gamma = intel_color_get_gamma_bit_precision(pipe_config);
13893                 if (bp_gamma)
13894                         PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, hw.gamma_lut, bp_gamma);
13895         }
13896
13897         PIPE_CONF_CHECK_BOOL(double_wide);
13898
13899         PIPE_CONF_CHECK_P(shared_dpll);
13900         PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
13901         PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
13902         PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
13903         PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
13904         PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
13905         PIPE_CONF_CHECK_X(dpll_hw_state.spll);
13906         PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
13907         PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
13908         PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
13909         PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
13910         PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
13911         PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
13912         PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
13913         PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
13914         PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
13915         PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
13916         PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
13917         PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
13918         PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
13919         PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
13920         PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
13921         PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
13922         PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
13923         PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
13924         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
13925         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
13926         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
13927         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
13928         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
13929         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
13930         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
13931
13932         PIPE_CONF_CHECK_X(dsi_pll.ctrl);
13933         PIPE_CONF_CHECK_X(dsi_pll.div);
13934
13935         if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5)
13936                 PIPE_CONF_CHECK_I(pipe_bpp);
13937
13938         PIPE_CONF_CHECK_CLOCK_FUZZY(hw.adjusted_mode.crtc_clock);
13939         PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
13940
13941         PIPE_CONF_CHECK_I(min_voltage_level);
13942
13943         PIPE_CONF_CHECK_X(infoframes.enable);
13944         PIPE_CONF_CHECK_X(infoframes.gcp);
13945         PIPE_CONF_CHECK_INFOFRAME(avi);
13946         PIPE_CONF_CHECK_INFOFRAME(spd);
13947         PIPE_CONF_CHECK_INFOFRAME(hdmi);
13948         PIPE_CONF_CHECK_INFOFRAME(drm);
13949         PIPE_CONF_CHECK_DP_VSC_SDP(vsc);
13950
13951         PIPE_CONF_CHECK_X(sync_mode_slaves_mask);
13952         PIPE_CONF_CHECK_I(master_transcoder);
13953
13954         PIPE_CONF_CHECK_I(dsc.compression_enable);
13955         PIPE_CONF_CHECK_I(dsc.dsc_split);
13956         PIPE_CONF_CHECK_I(dsc.compressed_bpp);
13957
13958         PIPE_CONF_CHECK_I(mst_master_transcoder);
13959
13960 #undef PIPE_CONF_CHECK_X
13961 #undef PIPE_CONF_CHECK_I
13962 #undef PIPE_CONF_CHECK_BOOL
13963 #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
13964 #undef PIPE_CONF_CHECK_P
13965 #undef PIPE_CONF_CHECK_FLAGS
13966 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
13967 #undef PIPE_CONF_CHECK_COLOR_LUT
13968 #undef PIPE_CONF_QUIRK
13969
13970         return ret;
13971 }
13972
13973 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
13974                                            const struct intel_crtc_state *pipe_config)
13975 {
13976         if (pipe_config->has_pch_encoder) {
13977                 int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
13978                                                             &pipe_config->fdi_m_n);
13979                 int dotclock = pipe_config->hw.adjusted_mode.crtc_clock;
13980
13981                 /*
13982                  * FDI already provided one idea for the dotclock.
13983                  * Yell if the encoder disagrees.
13984                  */
13985                 drm_WARN(&dev_priv->drm,
13986                          !intel_fuzzy_clock_check(fdi_dotclock, dotclock),
13987                          "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
13988                          fdi_dotclock, dotclock);
13989         }
13990 }
13991
13992 static void verify_wm_state(struct intel_crtc *crtc,
13993                             struct intel_crtc_state *new_crtc_state)
13994 {
13995         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13996         struct skl_hw_state {
13997                 struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
13998                 struct skl_ddb_entry ddb_uv[I915_MAX_PLANES];
13999                 struct skl_pipe_wm wm;
14000         } *hw;
14001         struct skl_pipe_wm *sw_wm;
14002         struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
14003         u8 hw_enabled_slices;
14004         const enum pipe pipe = crtc->pipe;
14005         int plane, level, max_level = ilk_wm_max_level(dev_priv);
14006
14007         if (INTEL_GEN(dev_priv) < 9 || !new_crtc_state->hw.active)
14008                 return;
14009
14010         hw = kzalloc(sizeof(*hw), GFP_KERNEL);
14011         if (!hw)
14012                 return;
14013
14014         skl_pipe_wm_get_hw_state(crtc, &hw->wm);
14015         sw_wm = &new_crtc_state->wm.skl.optimal;
14016
14017         skl_pipe_ddb_get_hw_state(crtc, hw->ddb_y, hw->ddb_uv);
14018
14019         hw_enabled_slices = intel_enabled_dbuf_slices_mask(dev_priv);
14020
14021         if (INTEL_GEN(dev_priv) >= 11 &&
14022             hw_enabled_slices != dev_priv->dbuf.enabled_slices)
14023                 drm_err(&dev_priv->drm,
14024                         "mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n",
14025                         dev_priv->dbuf.enabled_slices,
14026                         hw_enabled_slices);
14027
14028         /* planes */
14029         for_each_universal_plane(dev_priv, pipe, plane) {
14030                 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
14031
14032                 hw_plane_wm = &hw->wm.planes[plane];
14033                 sw_plane_wm = &sw_wm->planes[plane];
14034
14035                 /* Watermarks */
14036                 for (level = 0; level <= max_level; level++) {
14037                         if (skl_wm_level_equals(&hw_plane_wm->wm[level],
14038                                                 &sw_plane_wm->wm[level]) ||
14039                             (level == 0 && skl_wm_level_equals(&hw_plane_wm->wm[level],
14040                                                                &sw_plane_wm->sagv_wm0)))
14041                                 continue;
14042
14043                         drm_err(&dev_priv->drm,
14044                                 "mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
14045                                 pipe_name(pipe), plane + 1, level,
14046                                 sw_plane_wm->wm[level].plane_en,
14047                                 sw_plane_wm->wm[level].plane_res_b,
14048                                 sw_plane_wm->wm[level].plane_res_l,
14049                                 hw_plane_wm->wm[level].plane_en,
14050                                 hw_plane_wm->wm[level].plane_res_b,
14051                                 hw_plane_wm->wm[level].plane_res_l);
14052                 }
14053
14054                 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
14055                                          &sw_plane_wm->trans_wm)) {
14056                         drm_err(&dev_priv->drm,
14057                                 "mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
14058                                 pipe_name(pipe), plane + 1,
14059                                 sw_plane_wm->trans_wm.plane_en,
14060                                 sw_plane_wm->trans_wm.plane_res_b,
14061                                 sw_plane_wm->trans_wm.plane_res_l,
14062                                 hw_plane_wm->trans_wm.plane_en,
14063                                 hw_plane_wm->trans_wm.plane_res_b,
14064                                 hw_plane_wm->trans_wm.plane_res_l);
14065                 }
14066
14067                 /* DDB */
14068                 hw_ddb_entry = &hw->ddb_y[plane];
14069                 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[plane];
14070
14071                 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
14072                         drm_err(&dev_priv->drm,
14073                                 "mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
14074                                 pipe_name(pipe), plane + 1,
14075                                 sw_ddb_entry->start, sw_ddb_entry->end,
14076                                 hw_ddb_entry->start, hw_ddb_entry->end);
14077                 }
14078         }
14079
14080         /*
14081          * cursor
14082          * If the cursor plane isn't active, we may not have updated it's ddb
14083          * allocation. In that case since the ddb allocation will be updated
14084          * once the plane becomes visible, we can skip this check
14085          */
14086         if (1) {
14087                 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
14088
14089                 hw_plane_wm = &hw->wm.planes[PLANE_CURSOR];
14090                 sw_plane_wm = &sw_wm->planes[PLANE_CURSOR];
14091
14092                 /* Watermarks */
14093                 for (level = 0; level <= max_level; level++) {
14094                         if (skl_wm_level_equals(&hw_plane_wm->wm[level],
14095                                                 &sw_plane_wm->wm[level]) ||
14096                             (level == 0 && skl_wm_level_equals(&hw_plane_wm->wm[level],
14097                                                                &sw_plane_wm->sagv_wm0)))
14098                                 continue;
14099
14100                         drm_err(&dev_priv->drm,
14101                                 "mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
14102                                 pipe_name(pipe), level,
14103                                 sw_plane_wm->wm[level].plane_en,
14104                                 sw_plane_wm->wm[level].plane_res_b,
14105                                 sw_plane_wm->wm[level].plane_res_l,
14106                                 hw_plane_wm->wm[level].plane_en,
14107                                 hw_plane_wm->wm[level].plane_res_b,
14108                                 hw_plane_wm->wm[level].plane_res_l);
14109                 }
14110
14111                 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
14112                                          &sw_plane_wm->trans_wm)) {
14113                         drm_err(&dev_priv->drm,
14114                                 "mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
14115                                 pipe_name(pipe),
14116                                 sw_plane_wm->trans_wm.plane_en,
14117                                 sw_plane_wm->trans_wm.plane_res_b,
14118                                 sw_plane_wm->trans_wm.plane_res_l,
14119                                 hw_plane_wm->trans_wm.plane_en,
14120                                 hw_plane_wm->trans_wm.plane_res_b,
14121                                 hw_plane_wm->trans_wm.plane_res_l);
14122                 }
14123
14124                 /* DDB */
14125                 hw_ddb_entry = &hw->ddb_y[PLANE_CURSOR];
14126                 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
14127
14128                 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
14129                         drm_err(&dev_priv->drm,
14130                                 "mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
14131                                 pipe_name(pipe),
14132                                 sw_ddb_entry->start, sw_ddb_entry->end,
14133                                 hw_ddb_entry->start, hw_ddb_entry->end);
14134                 }
14135         }
14136
14137         kfree(hw);
14138 }
14139
14140 static void
14141 verify_connector_state(struct intel_atomic_state *state,
14142                        struct intel_crtc *crtc)
14143 {
14144         struct drm_connector *connector;
14145         struct drm_connector_state *new_conn_state;
14146         int i;
14147
14148         for_each_new_connector_in_state(&state->base, connector, new_conn_state, i) {
14149                 struct drm_encoder *encoder = connector->encoder;
14150                 struct intel_crtc_state *crtc_state = NULL;
14151
14152                 if (new_conn_state->crtc != &crtc->base)
14153                         continue;
14154
14155                 if (crtc)
14156                         crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
14157
14158                 intel_connector_verify_state(crtc_state, new_conn_state);
14159
14160                 I915_STATE_WARN(new_conn_state->best_encoder != encoder,
14161                      "connector's atomic encoder doesn't match legacy encoder\n");
14162         }
14163 }
14164
14165 static void
14166 verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_state *state)
14167 {
14168         struct intel_encoder *encoder;
14169         struct drm_connector *connector;
14170         struct drm_connector_state *old_conn_state, *new_conn_state;
14171         int i;
14172
14173         for_each_intel_encoder(&dev_priv->drm, encoder) {
14174                 bool enabled = false, found = false;
14175                 enum pipe pipe;
14176
14177                 drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s]\n",
14178                             encoder->base.base.id,
14179                             encoder->base.name);
14180
14181                 for_each_oldnew_connector_in_state(&state->base, connector, old_conn_state,
14182                                                    new_conn_state, i) {
14183                         if (old_conn_state->best_encoder == &encoder->base)
14184                                 found = true;
14185
14186                         if (new_conn_state->best_encoder != &encoder->base)
14187                                 continue;
14188                         found = enabled = true;
14189
14190                         I915_STATE_WARN(new_conn_state->crtc !=
14191                                         encoder->base.crtc,
14192                              "connector's crtc doesn't match encoder crtc\n");
14193                 }
14194
14195                 if (!found)
14196                         continue;
14197
14198                 I915_STATE_WARN(!!encoder->base.crtc != enabled,
14199                      "encoder's enabled state mismatch "
14200                      "(expected %i, found %i)\n",
14201                      !!encoder->base.crtc, enabled);
14202
14203                 if (!encoder->base.crtc) {
14204                         bool active;
14205
14206                         active = encoder->get_hw_state(encoder, &pipe);
14207                         I915_STATE_WARN(active,
14208                              "encoder detached but still enabled on pipe %c.\n",
14209                              pipe_name(pipe));
14210                 }
14211         }
14212 }
14213
14214 static void
14215 verify_crtc_state(struct intel_crtc *crtc,
14216                   struct intel_crtc_state *old_crtc_state,
14217                   struct intel_crtc_state *new_crtc_state)
14218 {
14219         struct drm_device *dev = crtc->base.dev;
14220         struct drm_i915_private *dev_priv = to_i915(dev);
14221         struct intel_encoder *encoder;
14222         struct intel_crtc_state *pipe_config = old_crtc_state;
14223         struct drm_atomic_state *state = old_crtc_state->uapi.state;
14224         bool active;
14225
14226         __drm_atomic_helper_crtc_destroy_state(&old_crtc_state->uapi);
14227         intel_crtc_free_hw_state(old_crtc_state);
14228         intel_crtc_state_reset(old_crtc_state, crtc);
14229         old_crtc_state->uapi.state = state;
14230
14231         drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s]\n", crtc->base.base.id,
14232                     crtc->base.name);
14233
14234         active = dev_priv->display.get_pipe_config(crtc, pipe_config);
14235
14236         /* we keep both pipes enabled on 830 */
14237         if (IS_I830(dev_priv))
14238                 active = new_crtc_state->hw.active;
14239
14240         I915_STATE_WARN(new_crtc_state->hw.active != active,
14241                         "crtc active state doesn't match with hw state "
14242                         "(expected %i, found %i)\n",
14243                         new_crtc_state->hw.active, active);
14244
14245         I915_STATE_WARN(crtc->active != new_crtc_state->hw.active,
14246                         "transitional active state does not match atomic hw state "
14247                         "(expected %i, found %i)\n",
14248                         new_crtc_state->hw.active, crtc->active);
14249
14250         for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
14251                 enum pipe pipe;
14252
14253                 active = encoder->get_hw_state(encoder, &pipe);
14254                 I915_STATE_WARN(active != new_crtc_state->hw.active,
14255                                 "[ENCODER:%i] active %i with crtc active %i\n",
14256                                 encoder->base.base.id, active,
14257                                 new_crtc_state->hw.active);
14258
14259                 I915_STATE_WARN(active && crtc->pipe != pipe,
14260                                 "Encoder connected to wrong pipe %c\n",
14261                                 pipe_name(pipe));
14262
14263                 if (active)
14264                         encoder->get_config(encoder, pipe_config);
14265         }
14266
14267         intel_crtc_compute_pixel_rate(pipe_config);
14268
14269         if (!new_crtc_state->hw.active)
14270                 return;
14271
14272         intel_pipe_config_sanity_check(dev_priv, pipe_config);
14273
14274         if (!intel_pipe_config_compare(new_crtc_state,
14275                                        pipe_config, false)) {
14276                 I915_STATE_WARN(1, "pipe state doesn't match!\n");
14277                 intel_dump_pipe_config(pipe_config, NULL, "[hw state]");
14278                 intel_dump_pipe_config(new_crtc_state, NULL, "[sw state]");
14279         }
14280 }
14281
14282 static void
14283 intel_verify_planes(struct intel_atomic_state *state)
14284 {
14285         struct intel_plane *plane;
14286         const struct intel_plane_state *plane_state;
14287         int i;
14288
14289         for_each_new_intel_plane_in_state(state, plane,
14290                                           plane_state, i)
14291                 assert_plane(plane, plane_state->planar_slave ||
14292                              plane_state->uapi.visible);
14293 }
14294
14295 static void
14296 verify_single_dpll_state(struct drm_i915_private *dev_priv,
14297                          struct intel_shared_dpll *pll,
14298                          struct intel_crtc *crtc,
14299                          struct intel_crtc_state *new_crtc_state)
14300 {
14301         struct intel_dpll_hw_state dpll_hw_state;
14302         unsigned int crtc_mask;
14303         bool active;
14304
14305         memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
14306
14307         drm_dbg_kms(&dev_priv->drm, "%s\n", pll->info->name);
14308
14309         active = pll->info->funcs->get_hw_state(dev_priv, pll, &dpll_hw_state);
14310
14311         if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
14312                 I915_STATE_WARN(!pll->on && pll->active_mask,
14313                      "pll in active use but not on in sw tracking\n");
14314                 I915_STATE_WARN(pll->on && !pll->active_mask,
14315                      "pll is on but not used by any active crtc\n");
14316                 I915_STATE_WARN(pll->on != active,
14317                      "pll on state mismatch (expected %i, found %i)\n",
14318                      pll->on, active);
14319         }
14320
14321         if (!crtc) {
14322                 I915_STATE_WARN(pll->active_mask & ~pll->state.crtc_mask,
14323                                 "more active pll users than references: %x vs %x\n",
14324                                 pll->active_mask, pll->state.crtc_mask);
14325
14326                 return;
14327         }
14328
14329         crtc_mask = drm_crtc_mask(&crtc->base);
14330
14331         if (new_crtc_state->hw.active)
14332                 I915_STATE_WARN(!(pll->active_mask & crtc_mask),
14333                                 "pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
14334                                 pipe_name(crtc->pipe), pll->active_mask);
14335         else
14336                 I915_STATE_WARN(pll->active_mask & crtc_mask,
14337                                 "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
14338                                 pipe_name(crtc->pipe), pll->active_mask);
14339
14340         I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask),
14341                         "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
14342                         crtc_mask, pll->state.crtc_mask);
14343
14344         I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
14345                                           &dpll_hw_state,
14346                                           sizeof(dpll_hw_state)),
14347                         "pll hw state mismatch\n");
14348 }
14349
14350 static void
14351 verify_shared_dpll_state(struct intel_crtc *crtc,
14352                          struct intel_crtc_state *old_crtc_state,
14353                          struct intel_crtc_state *new_crtc_state)
14354 {
14355         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14356
14357         if (new_crtc_state->shared_dpll)
14358                 verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll, crtc, new_crtc_state);
14359
14360         if (old_crtc_state->shared_dpll &&
14361             old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
14362                 unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
14363                 struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
14364
14365                 I915_STATE_WARN(pll->active_mask & crtc_mask,
14366                                 "pll active mismatch (didn't expect pipe %c in active mask)\n",
14367                                 pipe_name(crtc->pipe));
14368                 I915_STATE_WARN(pll->state.crtc_mask & crtc_mask,
14369                                 "pll enabled crtcs mismatch (found %x in enabled mask)\n",
14370                                 pipe_name(crtc->pipe));
14371         }
14372 }
14373
14374 static void
14375 intel_modeset_verify_crtc(struct intel_crtc *crtc,
14376                           struct intel_atomic_state *state,
14377                           struct intel_crtc_state *old_crtc_state,
14378                           struct intel_crtc_state *new_crtc_state)
14379 {
14380         if (!needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe)
14381                 return;
14382
14383         verify_wm_state(crtc, new_crtc_state);
14384         verify_connector_state(state, crtc);
14385         verify_crtc_state(crtc, old_crtc_state, new_crtc_state);
14386         verify_shared_dpll_state(crtc, old_crtc_state, new_crtc_state);
14387 }
14388
14389 static void
14390 verify_disabled_dpll_state(struct drm_i915_private *dev_priv)
14391 {
14392         int i;
14393
14394         for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++)
14395                 verify_single_dpll_state(dev_priv,
14396                                          &dev_priv->dpll.shared_dplls[i],
14397                                          NULL, NULL);
14398 }
14399
14400 static void
14401 intel_modeset_verify_disabled(struct drm_i915_private *dev_priv,
14402                               struct intel_atomic_state *state)
14403 {
14404         verify_encoder_state(dev_priv, state);
14405         verify_connector_state(state, NULL);
14406         verify_disabled_dpll_state(dev_priv);
14407 }
14408
14409 static void
14410 intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
14411 {
14412         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
14413         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14414         const struct drm_display_mode *adjusted_mode =
14415                 &crtc_state->hw.adjusted_mode;
14416
14417         drm_calc_timestamping_constants(&crtc->base, adjusted_mode);
14418
14419         crtc->mode_flags = crtc_state->mode_flags;
14420
14421         /*
14422          * The scanline counter increments at the leading edge of hsync.
14423          *
14424          * On most platforms it starts counting from vtotal-1 on the
14425          * first active line. That means the scanline counter value is
14426          * always one less than what we would expect. Ie. just after
14427          * start of vblank, which also occurs at start of hsync (on the
14428          * last active line), the scanline counter will read vblank_start-1.
14429          *
14430          * On gen2 the scanline counter starts counting from 1 instead
14431          * of vtotal-1, so we have to subtract one (or rather add vtotal-1
14432          * to keep the value positive), instead of adding one.
14433          *
14434          * On HSW+ the behaviour of the scanline counter depends on the output
14435          * type. For DP ports it behaves like most other platforms, but on HDMI
14436          * there's an extra 1 line difference. So we need to add two instead of
14437          * one to the value.
14438          *
14439          * On VLV/CHV DSI the scanline counter would appear to increment
14440          * approx. 1/3 of a scanline before start of vblank. Unfortunately
14441          * that means we can't tell whether we're in vblank or not while
14442          * we're on that particular line. We must still set scanline_offset
14443          * to 1 so that the vblank timestamps come out correct when we query
14444          * the scanline counter from within the vblank interrupt handler.
14445          * However if queried just before the start of vblank we'll get an
14446          * answer that's slightly in the future.
14447          */
14448         if (IS_GEN(dev_priv, 2)) {
14449                 int vtotal;
14450
14451                 vtotal = adjusted_mode->crtc_vtotal;
14452                 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
14453                         vtotal /= 2;
14454
14455                 crtc->scanline_offset = vtotal - 1;
14456         } else if (HAS_DDI(dev_priv) &&
14457                    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
14458                 crtc->scanline_offset = 2;
14459         } else {
14460                 crtc->scanline_offset = 1;
14461         }
14462 }
14463
14464 static void intel_modeset_clear_plls(struct intel_atomic_state *state)
14465 {
14466         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14467         struct intel_crtc_state *new_crtc_state;
14468         struct intel_crtc *crtc;
14469         int i;
14470
14471         if (!dev_priv->display.crtc_compute_clock)
14472                 return;
14473
14474         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
14475                 if (!needs_modeset(new_crtc_state))
14476                         continue;
14477
14478                 intel_release_shared_dplls(state, crtc);
14479         }
14480 }
14481
14482 /*
14483  * This implements the workaround described in the "notes" section of the mode
14484  * set sequence documentation. When going from no pipes or single pipe to
14485  * multiple pipes, and planes are enabled after the pipe, we need to wait at
14486  * least 2 vblanks on the first pipe before enabling planes on the second pipe.
14487  */
14488 static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state)
14489 {
14490         struct intel_crtc_state *crtc_state;
14491         struct intel_crtc *crtc;
14492         struct intel_crtc_state *first_crtc_state = NULL;
14493         struct intel_crtc_state *other_crtc_state = NULL;
14494         enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
14495         int i;
14496
14497         /* look at all crtc's that are going to be enabled in during modeset */
14498         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
14499                 if (!crtc_state->hw.active ||
14500                     !needs_modeset(crtc_state))
14501                         continue;
14502
14503                 if (first_crtc_state) {
14504                         other_crtc_state = crtc_state;
14505                         break;
14506                 } else {
14507                         first_crtc_state = crtc_state;
14508                         first_pipe = crtc->pipe;
14509                 }
14510         }
14511
14512         /* No workaround needed? */
14513         if (!first_crtc_state)
14514                 return 0;
14515
14516         /* w/a possibly needed, check how many crtc's are already enabled. */
14517         for_each_intel_crtc(state->base.dev, crtc) {
14518                 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
14519                 if (IS_ERR(crtc_state))
14520                         return PTR_ERR(crtc_state);
14521
14522                 crtc_state->hsw_workaround_pipe = INVALID_PIPE;
14523
14524                 if (!crtc_state->hw.active ||
14525                     needs_modeset(crtc_state))
14526                         continue;
14527
14528                 /* 2 or more enabled crtcs means no need for w/a */
14529                 if (enabled_pipe != INVALID_PIPE)
14530                         return 0;
14531
14532                 enabled_pipe = crtc->pipe;
14533         }
14534
14535         if (enabled_pipe != INVALID_PIPE)
14536                 first_crtc_state->hsw_workaround_pipe = enabled_pipe;
14537         else if (other_crtc_state)
14538                 other_crtc_state->hsw_workaround_pipe = first_pipe;
14539
14540         return 0;
14541 }
14542
14543 u8 intel_calc_active_pipes(struct intel_atomic_state *state,
14544                            u8 active_pipes)
14545 {
14546         const struct intel_crtc_state *crtc_state;
14547         struct intel_crtc *crtc;
14548         int i;
14549
14550         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
14551                 if (crtc_state->hw.active)
14552                         active_pipes |= BIT(crtc->pipe);
14553                 else
14554                         active_pipes &= ~BIT(crtc->pipe);
14555         }
14556
14557         return active_pipes;
14558 }
14559
14560 static int intel_modeset_checks(struct intel_atomic_state *state)
14561 {
14562         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14563         int ret;
14564
14565         state->modeset = true;
14566         state->active_pipes = intel_calc_active_pipes(state, dev_priv->active_pipes);
14567
14568         if (state->active_pipes != dev_priv->active_pipes) {
14569                 ret = _intel_atomic_lock_global_state(state);
14570                 if (ret)
14571                         return ret;
14572         }
14573
14574         if (IS_HASWELL(dev_priv))
14575                 return hsw_mode_set_planes_workaround(state);
14576
14577         return 0;
14578 }
14579
14580 /*
14581  * Handle calculation of various watermark data at the end of the atomic check
14582  * phase.  The code here should be run after the per-crtc and per-plane 'check'
14583  * handlers to ensure that all derived state has been updated.
14584  */
14585 static int calc_watermark_data(struct intel_atomic_state *state)
14586 {
14587         struct drm_device *dev = state->base.dev;
14588         struct drm_i915_private *dev_priv = to_i915(dev);
14589
14590         /* Is there platform-specific watermark information to calculate? */
14591         if (dev_priv->display.compute_global_watermarks)
14592                 return dev_priv->display.compute_global_watermarks(state);
14593
14594         return 0;
14595 }
14596
14597 static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state,
14598                                      struct intel_crtc_state *new_crtc_state)
14599 {
14600         if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true))
14601                 return;
14602
14603         new_crtc_state->uapi.mode_changed = false;
14604         new_crtc_state->update_pipe = true;
14605 }
14606
14607 static void intel_crtc_copy_fastset(const struct intel_crtc_state *old_crtc_state,
14608                                     struct intel_crtc_state *new_crtc_state)
14609 {
14610         /*
14611          * If we're not doing the full modeset we want to
14612          * keep the current M/N values as they may be
14613          * sufficiently different to the computed values
14614          * to cause problems.
14615          *
14616          * FIXME: should really copy more fuzzy state here
14617          */
14618         new_crtc_state->fdi_m_n = old_crtc_state->fdi_m_n;
14619         new_crtc_state->dp_m_n = old_crtc_state->dp_m_n;
14620         new_crtc_state->dp_m2_n2 = old_crtc_state->dp_m2_n2;
14621         new_crtc_state->has_drrs = old_crtc_state->has_drrs;
14622 }
14623
14624 static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state,
14625                                           struct intel_crtc *crtc,
14626                                           u8 plane_ids_mask)
14627 {
14628         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14629         struct intel_plane *plane;
14630
14631         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
14632                 struct intel_plane_state *plane_state;
14633
14634                 if ((plane_ids_mask & BIT(plane->id)) == 0)
14635                         continue;
14636
14637                 plane_state = intel_atomic_get_plane_state(state, plane);
14638                 if (IS_ERR(plane_state))
14639                         return PTR_ERR(plane_state);
14640         }
14641
14642         return 0;
14643 }
14644
14645 static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv)
14646 {
14647         /* See {hsw,vlv,ivb}_plane_ratio() */
14648         return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) ||
14649                 IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
14650                 IS_IVYBRIDGE(dev_priv) || (INTEL_GEN(dev_priv) >= 11);
14651 }
14652
14653 static int intel_atomic_check_planes(struct intel_atomic_state *state)
14654 {
14655         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14656         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
14657         struct intel_plane_state *plane_state;
14658         struct intel_plane *plane;
14659         struct intel_crtc *crtc;
14660         int i, ret;
14661
14662         ret = icl_add_linked_planes(state);
14663         if (ret)
14664                 return ret;
14665
14666         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
14667                 ret = intel_plane_atomic_check(state, plane);
14668                 if (ret) {
14669                         drm_dbg_atomic(&dev_priv->drm,
14670                                        "[PLANE:%d:%s] atomic driver check failed\n",
14671                                        plane->base.base.id, plane->base.name);
14672                         return ret;
14673                 }
14674         }
14675
14676         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14677                                             new_crtc_state, i) {
14678                 u8 old_active_planes, new_active_planes;
14679
14680                 ret = icl_check_nv12_planes(new_crtc_state);
14681                 if (ret)
14682                         return ret;
14683
14684                 /*
14685                  * On some platforms the number of active planes affects
14686                  * the planes' minimum cdclk calculation. Add such planes
14687                  * to the state before we compute the minimum cdclk.
14688                  */
14689                 if (!active_planes_affects_min_cdclk(dev_priv))
14690                         continue;
14691
14692                 old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
14693                 new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
14694
14695                 /*
14696                  * Not only the number of planes, but if the plane configuration had
14697                  * changed might already mean we need to recompute min CDCLK,
14698                  * because different planes might consume different amount of Dbuf bandwidth
14699                  * according to formula: Bw per plane = Pixel rate * bpp * pipe/plane scale factor
14700                  */
14701                 if (old_active_planes == new_active_planes)
14702                         continue;
14703
14704                 ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes);
14705                 if (ret)
14706                         return ret;
14707         }
14708
14709         return 0;
14710 }
14711
14712 static int intel_atomic_check_cdclk(struct intel_atomic_state *state,
14713                                     bool *need_cdclk_calc)
14714 {
14715         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
14716         struct intel_cdclk_state *new_cdclk_state;
14717         struct intel_plane_state *plane_state;
14718         struct intel_bw_state *new_bw_state;
14719         struct intel_plane *plane;
14720         int min_cdclk = 0;
14721         enum pipe pipe;
14722         int ret;
14723         int i;
14724         /*
14725          * active_planes bitmask has been updated, and potentially
14726          * affected planes are part of the state. We can now
14727          * compute the minimum cdclk for each plane.
14728          */
14729         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
14730                 ret = intel_plane_calc_min_cdclk(state, plane, need_cdclk_calc);
14731                 if (ret)
14732                         return ret;
14733         }
14734
14735         new_cdclk_state = intel_atomic_get_new_cdclk_state(state);
14736
14737         if (new_cdclk_state && new_cdclk_state->force_min_cdclk_changed)
14738                 *need_cdclk_calc = true;
14739
14740         ret = dev_priv->display.bw_calc_min_cdclk(state);
14741         if (ret)
14742                 return ret;
14743
14744         new_bw_state = intel_atomic_get_new_bw_state(state);
14745
14746         if (!new_cdclk_state || !new_bw_state)
14747                 return 0;
14748
14749         for_each_pipe(dev_priv, pipe) {
14750                 min_cdclk = max(new_cdclk_state->min_cdclk[pipe], min_cdclk);
14751
14752                 /*
14753                  * Currently do this change only if we need to increase
14754                  */
14755                 if (new_bw_state->min_cdclk > min_cdclk)
14756                         *need_cdclk_calc = true;
14757         }
14758
14759         return 0;
14760 }
14761
14762 static int intel_atomic_check_crtcs(struct intel_atomic_state *state)
14763 {
14764         struct intel_crtc_state *crtc_state;
14765         struct intel_crtc *crtc;
14766         int i;
14767
14768         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
14769                 int ret = intel_crtc_atomic_check(state, crtc);
14770                 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
14771                 if (ret) {
14772                         drm_dbg_atomic(&i915->drm,
14773                                        "[CRTC:%d:%s] atomic driver check failed\n",
14774                                        crtc->base.base.id, crtc->base.name);
14775                         return ret;
14776                 }
14777         }
14778
14779         return 0;
14780 }
14781
14782 static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state,
14783                                                u8 transcoders)
14784 {
14785         const struct intel_crtc_state *new_crtc_state;
14786         struct intel_crtc *crtc;
14787         int i;
14788
14789         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
14790                 if (new_crtc_state->hw.enable &&
14791                     transcoders & BIT(new_crtc_state->cpu_transcoder) &&
14792                     needs_modeset(new_crtc_state))
14793                         return true;
14794         }
14795
14796         return false;
14797 }
14798
14799 /**
14800  * intel_atomic_check - validate state object
14801  * @dev: drm device
14802  * @_state: state to validate
14803  */
14804 static int intel_atomic_check(struct drm_device *dev,
14805                               struct drm_atomic_state *_state)
14806 {
14807         struct drm_i915_private *dev_priv = to_i915(dev);
14808         struct intel_atomic_state *state = to_intel_atomic_state(_state);
14809         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
14810         struct intel_crtc *crtc;
14811         int ret, i;
14812         bool any_ms = false;
14813
14814         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14815                                             new_crtc_state, i) {
14816                 if (new_crtc_state->inherited != old_crtc_state->inherited)
14817                         new_crtc_state->uapi.mode_changed = true;
14818         }
14819
14820         ret = drm_atomic_helper_check_modeset(dev, &state->base);
14821         if (ret)
14822                 goto fail;
14823
14824         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14825                                             new_crtc_state, i) {
14826                 if (!needs_modeset(new_crtc_state)) {
14827                         /* Light copy */
14828                         intel_crtc_copy_uapi_to_hw_state_nomodeset(new_crtc_state);
14829
14830                         continue;
14831                 }
14832
14833                 ret = intel_crtc_prepare_cleared_state(new_crtc_state);
14834                 if (ret)
14835                         goto fail;
14836
14837                 if (!new_crtc_state->hw.enable)
14838                         continue;
14839
14840                 ret = intel_modeset_pipe_config(new_crtc_state);
14841                 if (ret)
14842                         goto fail;
14843         }
14844
14845         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14846                                             new_crtc_state, i) {
14847                 if (!needs_modeset(new_crtc_state))
14848                         continue;
14849
14850                 ret = intel_modeset_pipe_config_late(new_crtc_state);
14851                 if (ret)
14852                         goto fail;
14853
14854                 intel_crtc_check_fastset(old_crtc_state, new_crtc_state);
14855         }
14856
14857         /**
14858          * Check if fastset is allowed by external dependencies like other
14859          * pipes and transcoders.
14860          *
14861          * Right now it only forces a fullmodeset when the MST master
14862          * transcoder did not changed but the pipe of the master transcoder
14863          * needs a fullmodeset so all slaves also needs to do a fullmodeset or
14864          * in case of port synced crtcs, if one of the synced crtcs
14865          * needs a full modeset, all other synced crtcs should be
14866          * forced a full modeset.
14867          */
14868         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
14869                 if (!new_crtc_state->hw.enable || needs_modeset(new_crtc_state))
14870                         continue;
14871
14872                 if (intel_dp_mst_is_slave_trans(new_crtc_state)) {
14873                         enum transcoder master = new_crtc_state->mst_master_transcoder;
14874
14875                         if (intel_cpu_transcoders_need_modeset(state, BIT(master))) {
14876                                 new_crtc_state->uapi.mode_changed = true;
14877                                 new_crtc_state->update_pipe = false;
14878                         }
14879                 }
14880
14881                 if (is_trans_port_sync_mode(new_crtc_state)) {
14882                         u8 trans = new_crtc_state->sync_mode_slaves_mask;
14883
14884                         if (new_crtc_state->master_transcoder != INVALID_TRANSCODER)
14885                                 trans |= BIT(new_crtc_state->master_transcoder);
14886
14887                         if (intel_cpu_transcoders_need_modeset(state, trans)) {
14888                                 new_crtc_state->uapi.mode_changed = true;
14889                                 new_crtc_state->update_pipe = false;
14890                         }
14891                 }
14892         }
14893
14894         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14895                                             new_crtc_state, i) {
14896                 if (needs_modeset(new_crtc_state)) {
14897                         any_ms = true;
14898                         continue;
14899                 }
14900
14901                 if (!new_crtc_state->update_pipe)
14902                         continue;
14903
14904                 intel_crtc_copy_fastset(old_crtc_state, new_crtc_state);
14905         }
14906
14907         if (any_ms && !check_digital_port_conflicts(state)) {
14908                 drm_dbg_kms(&dev_priv->drm,
14909                             "rejecting conflicting digital port configuration\n");
14910                 ret = EINVAL;
14911                 goto fail;
14912         }
14913
14914         ret = drm_dp_mst_atomic_check(&state->base);
14915         if (ret)
14916                 goto fail;
14917
14918         ret = intel_atomic_check_planes(state);
14919         if (ret)
14920                 goto fail;
14921
14922         /*
14923          * distrust_bios_wm will force a full dbuf recomputation
14924          * but the hardware state will only get updated accordingly
14925          * if state->modeset==true. Hence distrust_bios_wm==true &&
14926          * state->modeset==false is an invalid combination which
14927          * would cause the hardware and software dbuf state to get
14928          * out of sync. We must prevent that.
14929          *
14930          * FIXME clean up this mess and introduce better
14931          * state tracking for dbuf.
14932          */
14933         if (dev_priv->wm.distrust_bios_wm)
14934                 any_ms = true;
14935
14936         if (any_ms) {
14937                 ret = intel_modeset_checks(state);
14938                 if (ret)
14939                         goto fail;
14940         }
14941
14942         intel_fbc_choose_crtc(dev_priv, state);
14943         ret = calc_watermark_data(state);
14944         if (ret)
14945                 goto fail;
14946
14947         ret = intel_bw_atomic_check(state);
14948         if (ret)
14949                 goto fail;
14950
14951         ret = intel_atomic_check_cdclk(state, &any_ms);
14952         if (ret)
14953                 goto fail;
14954
14955         if (any_ms) {
14956                 ret = intel_modeset_calc_cdclk(state);
14957                 if (ret)
14958                         return ret;
14959
14960                 intel_modeset_clear_plls(state);
14961         }
14962
14963         ret = intel_atomic_check_crtcs(state);
14964         if (ret)
14965                 goto fail;
14966
14967         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14968                                             new_crtc_state, i) {
14969                 if (!needs_modeset(new_crtc_state) &&
14970                     !new_crtc_state->update_pipe)
14971                         continue;
14972
14973                 intel_dump_pipe_config(new_crtc_state, state,
14974                                        needs_modeset(new_crtc_state) ?
14975                                        "[modeset]" : "[fastset]");
14976         }
14977
14978         return 0;
14979
14980  fail:
14981         if (ret == -EDEADLK)
14982                 return ret;
14983
14984         /*
14985          * FIXME would probably be nice to know which crtc specifically
14986          * caused the failure, in cases where we can pinpoint it.
14987          */
14988         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
14989                                             new_crtc_state, i)
14990                 intel_dump_pipe_config(new_crtc_state, state, "[failed]");
14991
14992         return ret;
14993 }
14994
14995 static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
14996 {
14997         struct intel_crtc_state *crtc_state;
14998         struct intel_crtc *crtc;
14999         int i, ret;
15000
15001         ret = drm_atomic_helper_prepare_planes(state->base.dev, &state->base);
15002         if (ret < 0)
15003                 return ret;
15004
15005         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
15006                 bool mode_changed = needs_modeset(crtc_state);
15007
15008                 if (mode_changed || crtc_state->update_pipe ||
15009                     crtc_state->uapi.color_mgmt_changed) {
15010                         intel_dsb_prepare(crtc_state);
15011                 }
15012         }
15013
15014         return 0;
15015 }
15016
15017 u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
15018 {
15019         struct drm_device *dev = crtc->base.dev;
15020         struct drm_vblank_crtc *vblank = &dev->vblank[drm_crtc_index(&crtc->base)];
15021
15022         if (!vblank->max_vblank_count)
15023                 return (u32)drm_crtc_accurate_vblank_count(&crtc->base);
15024
15025         return crtc->base.funcs->get_vblank_counter(&crtc->base);
15026 }
15027
15028 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
15029                                   struct intel_crtc_state *crtc_state)
15030 {
15031         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
15032
15033         if (!IS_GEN(dev_priv, 2) || crtc_state->active_planes)
15034                 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
15035
15036         if (crtc_state->has_pch_encoder) {
15037                 enum pipe pch_transcoder =
15038                         intel_crtc_pch_transcoder(crtc);
15039
15040                 intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
15041         }
15042 }
15043
15044 static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
15045                                const struct intel_crtc_state *new_crtc_state)
15046 {
15047         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
15048         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
15049
15050         /*
15051          * Update pipe size and adjust fitter if needed: the reason for this is
15052          * that in compute_mode_changes we check the native mode (not the pfit
15053          * mode) to see if we can flip rather than do a full mode set. In the
15054          * fastboot case, we'll flip, but if we don't update the pipesrc and
15055          * pfit state, we'll end up with a big fb scanned out into the wrong
15056          * sized surface.
15057          */
15058         intel_set_pipe_src_size(new_crtc_state);
15059
15060         /* on skylake this is done by detaching scalers */
15061         if (INTEL_GEN(dev_priv) >= 9) {
15062                 skl_detach_scalers(new_crtc_state);
15063
15064                 if (new_crtc_state->pch_pfit.enabled)
15065                         skl_pfit_enable(new_crtc_state);
15066         } else if (HAS_PCH_SPLIT(dev_priv)) {
15067                 if (new_crtc_state->pch_pfit.enabled)
15068                         ilk_pfit_enable(new_crtc_state);
15069                 else if (old_crtc_state->pch_pfit.enabled)
15070                         ilk_pfit_disable(old_crtc_state);
15071         }
15072
15073         /*
15074          * The register is supposedly single buffered so perhaps
15075          * not 100% correct to do this here. But SKL+ calculate
15076          * this based on the adjust pixel rate so pfit changes do
15077          * affect it and so it must be updated for fastsets.
15078          * HSW/BDW only really need this here for fastboot, after
15079          * that the value should not change without a full modeset.
15080          */
15081         if (INTEL_GEN(dev_priv) >= 9 ||
15082             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
15083                 hsw_set_linetime_wm(new_crtc_state);
15084
15085         if (INTEL_GEN(dev_priv) >= 11)
15086                 icl_set_pipe_chicken(crtc);
15087 }
15088
15089 static void commit_pipe_config(struct intel_atomic_state *state,
15090                                struct intel_crtc *crtc)
15091 {
15092         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
15093         const struct intel_crtc_state *old_crtc_state =
15094                 intel_atomic_get_old_crtc_state(state, crtc);
15095         const struct intel_crtc_state *new_crtc_state =
15096                 intel_atomic_get_new_crtc_state(state, crtc);
15097         bool modeset = needs_modeset(new_crtc_state);
15098
15099         /*
15100          * During modesets pipe configuration was programmed as the
15101          * CRTC was enabled.
15102          */
15103         if (!modeset) {
15104                 if (new_crtc_state->uapi.color_mgmt_changed ||
15105                     new_crtc_state->update_pipe)
15106                         intel_color_commit(new_crtc_state);
15107
15108                 if (INTEL_GEN(dev_priv) >= 9)
15109                         skl_detach_scalers(new_crtc_state);
15110
15111                 if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
15112                         bdw_set_pipemisc(new_crtc_state);
15113
15114                 if (new_crtc_state->update_pipe)
15115                         intel_pipe_fastset(old_crtc_state, new_crtc_state);
15116         }
15117
15118         if (dev_priv->display.atomic_update_watermarks)
15119                 dev_priv->display.atomic_update_watermarks(state, crtc);
15120 }
15121
15122 static void intel_enable_crtc(struct intel_atomic_state *state,
15123                               struct intel_crtc *crtc)
15124 {
15125         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
15126         const struct intel_crtc_state *new_crtc_state =
15127                 intel_atomic_get_new_crtc_state(state, crtc);
15128
15129         if (!needs_modeset(new_crtc_state))
15130                 return;
15131
15132         intel_crtc_update_active_timings(new_crtc_state);
15133
15134         dev_priv->display.crtc_enable(state, crtc);
15135
15136         /* vblanks work again, re-enable pipe CRC. */
15137         intel_crtc_enable_pipe_crc(crtc);
15138 }
15139
15140 static void intel_update_crtc(struct intel_atomic_state *state,
15141                               struct intel_crtc *crtc)
15142 {
15143         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
15144         const struct intel_crtc_state *old_crtc_state =
15145                 intel_atomic_get_old_crtc_state(state, crtc);
15146         struct intel_crtc_state *new_crtc_state =
15147                 intel_atomic_get_new_crtc_state(state, crtc);
15148         bool modeset = needs_modeset(new_crtc_state);
15149
15150         if (!modeset) {
15151                 if (new_crtc_state->preload_luts &&
15152                     (new_crtc_state->uapi.color_mgmt_changed ||
15153                      new_crtc_state->update_pipe))
15154                         intel_color_load_luts(new_crtc_state);
15155
15156                 intel_pre_plane_update(state, crtc);
15157
15158                 if (new_crtc_state->update_pipe)
15159                         intel_encoders_update_pipe(state, crtc);
15160         }
15161
15162         if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc)
15163                 intel_fbc_disable(crtc);
15164         else
15165                 intel_fbc_enable(state, crtc);
15166
15167         /* Perform vblank evasion around commit operation */
15168         intel_pipe_update_start(new_crtc_state);
15169
15170         commit_pipe_config(state, crtc);
15171
15172         if (INTEL_GEN(dev_priv) >= 9)
15173                 skl_update_planes_on_crtc(state, crtc);
15174         else
15175                 i9xx_update_planes_on_crtc(state, crtc);
15176
15177         intel_pipe_update_end(new_crtc_state);
15178
15179         /*
15180          * We usually enable FIFO underrun interrupts as part of the
15181          * CRTC enable sequence during modesets.  But when we inherit a
15182          * valid pipe configuration from the BIOS we need to take care
15183          * of enabling them on the CRTC's first fastset.
15184          */
15185         if (new_crtc_state->update_pipe && !modeset &&
15186             old_crtc_state->inherited)
15187                 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
15188 }
15189
15190
15191 static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
15192                                           struct intel_crtc_state *old_crtc_state,
15193                                           struct intel_crtc_state *new_crtc_state,
15194                                           struct intel_crtc *crtc)
15195 {
15196         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
15197
15198         intel_crtc_disable_planes(state, crtc);
15199
15200         /*
15201          * We need to disable pipe CRC before disabling the pipe,
15202          * or we race against vblank off.
15203          */
15204         intel_crtc_disable_pipe_crc(crtc);
15205
15206         dev_priv->display.crtc_disable(state, crtc);
15207         crtc->active = false;
15208         intel_fbc_disable(crtc);
15209         intel_disable_shared_dpll(old_crtc_state);
15210
15211         /* FIXME unify this for all platforms */
15212         if (!new_crtc_state->hw.active &&
15213             !HAS_GMCH(dev_priv) &&
15214             dev_priv->display.initial_watermarks)
15215                 dev_priv->display.initial_watermarks(state, crtc);
15216 }
15217
15218 static void intel_commit_modeset_disables(struct intel_atomic_state *state)
15219 {
15220         struct intel_crtc_state *new_crtc_state, *old_crtc_state;
15221         struct intel_crtc *crtc;
15222         u32 handled = 0;
15223         int i;
15224
15225         /* Only disable port sync and MST slaves */
15226         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
15227                                             new_crtc_state, i) {
15228                 if (!needs_modeset(new_crtc_state))
15229                         continue;
15230
15231                 if (!old_crtc_state->hw.active)
15232                         continue;
15233
15234                 /* In case of Transcoder port Sync master slave CRTCs can be
15235                  * assigned in any order and we need to make sure that
15236                  * slave CRTCs are disabled first and then master CRTC since
15237                  * Slave vblanks are masked till Master Vblanks.
15238                  */
15239                 if (!is_trans_port_sync_slave(old_crtc_state) &&
15240                     !intel_dp_mst_is_slave_trans(old_crtc_state))
15241                         continue;
15242
15243                 intel_pre_plane_update(state, crtc);
15244                 intel_old_crtc_state_disables(state, old_crtc_state,
15245                                               new_crtc_state, crtc);
15246                 handled |= BIT(crtc->pipe);
15247         }
15248
15249         /* Disable everything else left on */
15250         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
15251                                             new_crtc_state, i) {
15252                 if (!needs_modeset(new_crtc_state) ||
15253                     (handled & BIT(crtc->pipe)))
15254                         continue;
15255
15256                 intel_pre_plane_update(state, crtc);
15257                 if (old_crtc_state->hw.active)
15258                         intel_old_crtc_state_disables(state, old_crtc_state,
15259                                                       new_crtc_state, crtc);
15260         }
15261 }
15262
15263 static void intel_commit_modeset_enables(struct intel_atomic_state *state)
15264 {
15265         struct intel_crtc_state *new_crtc_state;
15266         struct intel_crtc *crtc;
15267         int i;
15268
15269         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
15270                 if (!new_crtc_state->hw.active)
15271                         continue;
15272
15273                 intel_enable_crtc(state, crtc);
15274                 intel_update_crtc(state, crtc);
15275         }
15276 }
15277
15278 static void skl_commit_modeset_enables(struct intel_atomic_state *state)
15279 {
15280         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
15281         struct intel_crtc *crtc;
15282         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
15283         struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
15284         u8 update_pipes = 0, modeset_pipes = 0;
15285         int i;
15286
15287         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
15288                 enum pipe pipe = crtc->pipe;
15289
15290                 if (!new_crtc_state->hw.active)
15291                         continue;
15292
15293                 /* ignore allocations for crtc's that have been turned off. */
15294                 if (!needs_modeset(new_crtc_state)) {
15295                         entries[pipe] = old_crtc_state->wm.skl.ddb;
15296                         update_pipes |= BIT(pipe);
15297                 } else {
15298                         modeset_pipes |= BIT(pipe);
15299                 }
15300         }
15301
15302         /*
15303          * Whenever the number of active pipes changes, we need to make sure we
15304          * update the pipes in the right order so that their ddb allocations
15305          * never overlap with each other between CRTC updates. Otherwise we'll
15306          * cause pipe underruns and other bad stuff.
15307          *
15308          * So first lets enable all pipes that do not need a fullmodeset as
15309          * those don't have any external dependency.
15310          */
15311         while (update_pipes) {
15312                 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
15313                                                     new_crtc_state, i) {
15314                         enum pipe pipe = crtc->pipe;
15315
15316                         if ((update_pipes & BIT(pipe)) == 0)
15317                                 continue;
15318
15319                         if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
15320                                                         entries, I915_MAX_PIPES, pipe))
15321                                 continue;
15322
15323                         entries[pipe] = new_crtc_state->wm.skl.ddb;
15324                         update_pipes &= ~BIT(pipe);
15325
15326                         intel_update_crtc(state, crtc);
15327
15328                         /*
15329                          * If this is an already active pipe, it's DDB changed,
15330                          * and this isn't the last pipe that needs updating
15331                          * then we need to wait for a vblank to pass for the
15332                          * new ddb allocation to take effect.
15333                          */
15334                         if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb,
15335                                                  &old_crtc_state->wm.skl.ddb) &&
15336                             (update_pipes | modeset_pipes))
15337                                 intel_wait_for_vblank(dev_priv, pipe);
15338                 }
15339         }
15340
15341         update_pipes = modeset_pipes;
15342
15343         /*
15344          * Enable all pipes that needs a modeset and do not depends on other
15345          * pipes
15346          */
15347         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
15348                 enum pipe pipe = crtc->pipe;
15349
15350                 if ((modeset_pipes & BIT(pipe)) == 0)
15351                         continue;
15352
15353                 if (intel_dp_mst_is_slave_trans(new_crtc_state) ||
15354                     is_trans_port_sync_master(new_crtc_state))
15355                         continue;
15356
15357                 modeset_pipes &= ~BIT(pipe);
15358
15359                 intel_enable_crtc(state, crtc);
15360         }
15361
15362         /*
15363          * Then we enable all remaining pipes that depend on other
15364          * pipes: MST slaves and port sync masters.
15365          */
15366         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
15367                 enum pipe pipe = crtc->pipe;
15368
15369                 if ((modeset_pipes & BIT(pipe)) == 0)
15370                         continue;
15371
15372                 modeset_pipes &= ~BIT(pipe);
15373
15374                 intel_enable_crtc(state, crtc);
15375         }
15376
15377         /*
15378          * Finally we do the plane updates/etc. for all pipes that got enabled.
15379          */
15380         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
15381                 enum pipe pipe = crtc->pipe;
15382
15383                 if ((update_pipes & BIT(pipe)) == 0)
15384                         continue;
15385
15386                 drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
15387                                                                         entries, I915_MAX_PIPES, pipe));
15388
15389                 entries[pipe] = new_crtc_state->wm.skl.ddb;
15390                 update_pipes &= ~BIT(pipe);
15391
15392                 intel_update_crtc(state, crtc);
15393         }
15394
15395         drm_WARN_ON(&dev_priv->drm, modeset_pipes);
15396         drm_WARN_ON(&dev_priv->drm, update_pipes);
15397 }
15398
15399 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
15400 {
15401         struct intel_atomic_state *state, *next;
15402         struct llist_node *freed;
15403
15404         freed = llist_del_all(&dev_priv->atomic_helper.free_list);
15405         llist_for_each_entry_safe(state, next, freed, freed)
15406                 drm_atomic_state_put(&state->base);
15407 }
15408
15409 static void intel_atomic_helper_free_state_worker(struct work_struct *work)
15410 {
15411         struct drm_i915_private *dev_priv =
15412                 container_of(work, typeof(*dev_priv), atomic_helper.free_work);
15413
15414         intel_atomic_helper_free_state(dev_priv);
15415 }
15416
15417 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
15418 {
15419         struct wait_queue_entry wait_fence, wait_reset;
15420         struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
15421
15422         init_wait_entry(&wait_fence, 0);
15423         init_wait_entry(&wait_reset, 0);
15424         for (;;) {
15425                 prepare_to_wait(&intel_state->commit_ready.wait,
15426                                 &wait_fence, TASK_UNINTERRUPTIBLE);
15427                 prepare_to_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
15428                                               I915_RESET_MODESET),
15429                                 &wait_reset, TASK_UNINTERRUPTIBLE);
15430
15431
15432                 if (i915_sw_fence_done(&intel_state->commit_ready) ||
15433                     test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
15434                         break;
15435
15436                 schedule();
15437         }
15438         finish_wait(&intel_state->commit_ready.wait, &wait_fence);
15439         finish_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
15440                                   I915_RESET_MODESET),
15441                     &wait_reset);
15442 }
15443
15444 static void intel_cleanup_dsbs(struct intel_atomic_state *state)
15445 {
15446         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
15447         struct intel_crtc *crtc;
15448         int i;
15449
15450         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
15451                                             new_crtc_state, i)
15452                 intel_dsb_cleanup(old_crtc_state);
15453 }
15454
15455 static void intel_atomic_cleanup_work(struct work_struct *work)
15456 {
15457         struct intel_atomic_state *state =
15458                 container_of(work, struct intel_atomic_state, base.commit_work);
15459         struct drm_i915_private *i915 = to_i915(state->base.dev);
15460
15461         intel_cleanup_dsbs(state);
15462         drm_atomic_helper_cleanup_planes(&i915->drm, &state->base);
15463         drm_atomic_helper_commit_cleanup_done(&state->base);
15464         drm_atomic_state_put(&state->base);
15465
15466         intel_atomic_helper_free_state(i915);
15467 }
15468
15469 static void intel_atomic_commit_tail(struct intel_atomic_state *state)
15470 {
15471         struct drm_device *dev = state->base.dev;
15472         struct drm_i915_private *dev_priv = to_i915(dev);
15473         struct intel_crtc_state *new_crtc_state, *old_crtc_state;
15474         struct intel_crtc *crtc;
15475         u64 put_domains[I915_MAX_PIPES] = {};
15476         intel_wakeref_t wakeref = 0;
15477         int i;
15478
15479         intel_atomic_commit_fence_wait(state);
15480
15481         drm_atomic_helper_wait_for_dependencies(&state->base);
15482
15483         if (state->modeset)
15484                 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
15485
15486         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
15487                                             new_crtc_state, i) {
15488                 if (needs_modeset(new_crtc_state) ||
15489                     new_crtc_state->update_pipe) {
15490
15491                         put_domains[crtc->pipe] =
15492                                 modeset_get_crtc_power_domains(new_crtc_state);
15493                 }
15494         }
15495
15496         intel_commit_modeset_disables(state);
15497
15498         /* FIXME: Eventually get rid of our crtc->config pointer */
15499         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
15500                 crtc->config = new_crtc_state;
15501
15502         if (state->modeset) {
15503                 drm_atomic_helper_update_legacy_modeset_state(dev, &state->base);
15504
15505                 intel_set_cdclk_pre_plane_update(state);
15506
15507                 intel_modeset_verify_disabled(dev_priv, state);
15508         }
15509
15510         intel_sagv_pre_plane_update(state);
15511
15512         /* Complete the events for pipes that have now been disabled */
15513         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
15514                 bool modeset = needs_modeset(new_crtc_state);
15515
15516                 /* Complete events for now disable pipes here. */
15517                 if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) {
15518                         spin_lock_irq(&dev->event_lock);
15519                         drm_crtc_send_vblank_event(&crtc->base,
15520                                                    new_crtc_state->uapi.event);
15521                         spin_unlock_irq(&dev->event_lock);
15522
15523                         new_crtc_state->uapi.event = NULL;
15524                 }
15525         }
15526
15527         if (state->modeset)
15528                 intel_encoders_update_prepare(state);
15529
15530         intel_dbuf_pre_plane_update(state);
15531
15532         /* Now enable the clocks, plane, pipe, and connectors that we set up. */
15533         dev_priv->display.commit_modeset_enables(state);
15534
15535         if (state->modeset) {
15536                 intel_encoders_update_complete(state);
15537
15538                 intel_set_cdclk_post_plane_update(state);
15539         }
15540
15541         /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
15542          * already, but still need the state for the delayed optimization. To
15543          * fix this:
15544          * - wrap the optimization/post_plane_update stuff into a per-crtc work.
15545          * - schedule that vblank worker _before_ calling hw_done
15546          * - at the start of commit_tail, cancel it _synchrously
15547          * - switch over to the vblank wait helper in the core after that since
15548          *   we don't need out special handling any more.
15549          */
15550         drm_atomic_helper_wait_for_flip_done(dev, &state->base);
15551
15552         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
15553                 if (new_crtc_state->hw.active &&
15554                     !needs_modeset(new_crtc_state) &&
15555                     !new_crtc_state->preload_luts &&
15556                     (new_crtc_state->uapi.color_mgmt_changed ||
15557                      new_crtc_state->update_pipe))
15558                         intel_color_load_luts(new_crtc_state);
15559         }
15560
15561         /*
15562          * Now that the vblank has passed, we can go ahead and program the
15563          * optimal watermarks on platforms that need two-step watermark
15564          * programming.
15565          *
15566          * TODO: Move this (and other cleanup) to an async worker eventually.
15567          */
15568         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
15569                                             new_crtc_state, i) {
15570                 /*
15571                  * Gen2 reports pipe underruns whenever all planes are disabled.
15572                  * So re-enable underrun reporting after some planes get enabled.
15573                  *
15574                  * We do this before .optimize_watermarks() so that we have a
15575                  * chance of catching underruns with the intermediate watermarks
15576                  * vs. the new plane configuration.
15577                  */
15578                 if (IS_GEN(dev_priv, 2) && planes_enabling(old_crtc_state, new_crtc_state))
15579                         intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
15580
15581                 if (dev_priv->display.optimize_watermarks)
15582                         dev_priv->display.optimize_watermarks(state, crtc);
15583         }
15584
15585         intel_dbuf_post_plane_update(state);
15586
15587         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
15588                 intel_post_plane_update(state, crtc);
15589
15590                 if (put_domains[i])
15591                         modeset_put_power_domains(dev_priv, put_domains[i]);
15592
15593                 intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
15594
15595                 /*
15596                  * DSB cleanup is done in cleanup_work aligning with framebuffer
15597                  * cleanup. So copy and reset the dsb structure to sync with
15598                  * commit_done and later do dsb cleanup in cleanup_work.
15599                  */
15600                 old_crtc_state->dsb = fetch_and_zero(&new_crtc_state->dsb);
15601         }
15602
15603         /* Underruns don't always raise interrupts, so check manually */
15604         intel_check_cpu_fifo_underruns(dev_priv);
15605         intel_check_pch_fifo_underruns(dev_priv);
15606
15607         if (state->modeset)
15608                 intel_verify_planes(state);
15609
15610         intel_sagv_post_plane_update(state);
15611
15612         drm_atomic_helper_commit_hw_done(&state->base);
15613
15614         if (state->modeset) {
15615                 /* As one of the primary mmio accessors, KMS has a high
15616                  * likelihood of triggering bugs in unclaimed access. After we
15617                  * finish modesetting, see if an error has been flagged, and if
15618                  * so enable debugging for the next modeset - and hope we catch
15619                  * the culprit.
15620                  */
15621                 intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
15622                 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
15623         }
15624         intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
15625
15626         /*
15627          * Defer the cleanup of the old state to a separate worker to not
15628          * impede the current task (userspace for blocking modesets) that
15629          * are executed inline. For out-of-line asynchronous modesets/flips,
15630          * deferring to a new worker seems overkill, but we would place a
15631          * schedule point (cond_resched()) here anyway to keep latencies
15632          * down.
15633          */
15634         INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work);
15635         queue_work(system_highpri_wq, &state->base.commit_work);
15636 }
15637
15638 static void intel_atomic_commit_work(struct work_struct *work)
15639 {
15640         struct intel_atomic_state *state =
15641                 container_of(work, struct intel_atomic_state, base.commit_work);
15642
15643         intel_atomic_commit_tail(state);
15644 }
15645
15646 static int __i915_sw_fence_call
15647 intel_atomic_commit_ready(struct i915_sw_fence *fence,
15648                           enum i915_sw_fence_notify notify)
15649 {
15650         struct intel_atomic_state *state =
15651                 container_of(fence, struct intel_atomic_state, commit_ready);
15652
15653         switch (notify) {
15654         case FENCE_COMPLETE:
15655                 /* we do blocking waits in the worker, nothing to do here */
15656                 break;
15657         case FENCE_FREE:
15658                 {
15659                         struct intel_atomic_helper *helper =
15660                                 &to_i915(state->base.dev)->atomic_helper;
15661
15662                         if (llist_add(&state->freed, &helper->free_list))
15663                                 schedule_work(&helper->free_work);
15664                         break;
15665                 }
15666         }
15667
15668         return NOTIFY_DONE;
15669 }
15670
15671 static void intel_atomic_track_fbs(struct intel_atomic_state *state)
15672 {
15673         struct intel_plane_state *old_plane_state, *new_plane_state;
15674         struct intel_plane *plane;
15675         int i;
15676
15677         for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
15678                                              new_plane_state, i)
15679                 intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
15680                                         to_intel_frontbuffer(new_plane_state->hw.fb),
15681                                         plane->frontbuffer_bit);
15682 }
15683
15684 static void assert_global_state_locked(struct drm_i915_private *dev_priv)
15685 {
15686         struct intel_crtc *crtc;
15687
15688         for_each_intel_crtc(&dev_priv->drm, crtc)
15689                 drm_modeset_lock_assert_held(&crtc->base.mutex);
15690 }
15691
15692 static int intel_atomic_commit(struct drm_device *dev,
15693                                struct drm_atomic_state *_state,
15694                                bool nonblock)
15695 {
15696         struct intel_atomic_state *state = to_intel_atomic_state(_state);
15697         struct drm_i915_private *dev_priv = to_i915(dev);
15698         int ret = 0;
15699
15700         state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
15701
15702         drm_atomic_state_get(&state->base);
15703         i915_sw_fence_init(&state->commit_ready,
15704                            intel_atomic_commit_ready);
15705
15706         /*
15707          * The intel_legacy_cursor_update() fast path takes care
15708          * of avoiding the vblank waits for simple cursor
15709          * movement and flips. For cursor on/off and size changes,
15710          * we want to perform the vblank waits so that watermark
15711          * updates happen during the correct frames. Gen9+ have
15712          * double buffered watermarks and so shouldn't need this.
15713          *
15714          * Unset state->legacy_cursor_update before the call to
15715          * drm_atomic_helper_setup_commit() because otherwise
15716          * drm_atomic_helper_wait_for_flip_done() is a noop and
15717          * we get FIFO underruns because we didn't wait
15718          * for vblank.
15719          *
15720          * FIXME doing watermarks and fb cleanup from a vblank worker
15721          * (assuming we had any) would solve these problems.
15722          */
15723         if (INTEL_GEN(dev_priv) < 9 && state->base.legacy_cursor_update) {
15724                 struct intel_crtc_state *new_crtc_state;
15725                 struct intel_crtc *crtc;
15726                 int i;
15727
15728                 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
15729                         if (new_crtc_state->wm.need_postvbl_update ||
15730                             new_crtc_state->update_wm_post)
15731                                 state->base.legacy_cursor_update = false;
15732         }
15733
15734         ret = intel_atomic_prepare_commit(state);
15735         if (ret) {
15736                 drm_dbg_atomic(&dev_priv->drm,
15737                                "Preparing state failed with %i\n", ret);
15738                 i915_sw_fence_commit(&state->commit_ready);
15739                 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
15740                 return ret;
15741         }
15742
15743         ret = drm_atomic_helper_setup_commit(&state->base, nonblock);
15744         if (!ret)
15745                 ret = drm_atomic_helper_swap_state(&state->base, true);
15746         if (!ret)
15747                 intel_atomic_swap_global_state(state);
15748
15749         if (ret) {
15750                 struct intel_crtc_state *new_crtc_state;
15751                 struct intel_crtc *crtc;
15752                 int i;
15753
15754                 i915_sw_fence_commit(&state->commit_ready);
15755
15756                 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
15757                         intel_dsb_cleanup(new_crtc_state);
15758
15759                 drm_atomic_helper_cleanup_planes(dev, &state->base);
15760                 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
15761                 return ret;
15762         }
15763         dev_priv->wm.distrust_bios_wm = false;
15764         intel_shared_dpll_swap_state(state);
15765         intel_atomic_track_fbs(state);
15766
15767         if (state->global_state_changed) {
15768                 assert_global_state_locked(dev_priv);
15769
15770                 dev_priv->active_pipes = state->active_pipes;
15771         }
15772
15773         drm_atomic_state_get(&state->base);
15774         INIT_WORK(&state->base.commit_work, intel_atomic_commit_work);
15775
15776         i915_sw_fence_commit(&state->commit_ready);
15777         if (nonblock && state->modeset) {
15778                 queue_work(dev_priv->modeset_wq, &state->base.commit_work);
15779         } else if (nonblock) {
15780                 queue_work(dev_priv->flip_wq, &state->base.commit_work);
15781         } else {
15782                 if (state->modeset)
15783                         flush_workqueue(dev_priv->modeset_wq);
15784                 intel_atomic_commit_tail(state);
15785         }
15786
15787         return 0;
15788 }
15789
15790 struct wait_rps_boost {
15791         struct wait_queue_entry wait;
15792
15793         struct drm_crtc *crtc;
15794         struct i915_request *request;
15795 };
15796
15797 static int do_rps_boost(struct wait_queue_entry *_wait,
15798                         unsigned mode, int sync, void *key)
15799 {
15800         struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
15801         struct i915_request *rq = wait->request;
15802
15803         /*
15804          * If we missed the vblank, but the request is already running it
15805          * is reasonable to assume that it will complete before the next
15806          * vblank without our intervention, so leave RPS alone.
15807          */
15808         if (!i915_request_started(rq))
15809                 intel_rps_boost(rq);
15810         i915_request_put(rq);
15811
15812         drm_crtc_vblank_put(wait->crtc);
15813
15814         list_del(&wait->wait.entry);
15815         kfree(wait);
15816         return 1;
15817 }
15818
15819 static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
15820                                        struct dma_fence *fence)
15821 {
15822         struct wait_rps_boost *wait;
15823
15824         if (!dma_fence_is_i915(fence))
15825                 return;
15826
15827         if (INTEL_GEN(to_i915(crtc->dev)) < 6)
15828                 return;
15829
15830         if (drm_crtc_vblank_get(crtc))
15831                 return;
15832
15833         wait = kmalloc(sizeof(*wait), GFP_KERNEL);
15834         if (!wait) {
15835                 drm_crtc_vblank_put(crtc);
15836                 return;
15837         }
15838
15839         wait->request = to_request(dma_fence_get(fence));
15840         wait->crtc = crtc;
15841
15842         wait->wait.func = do_rps_boost;
15843         wait->wait.flags = 0;
15844
15845         add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
15846 }
15847
15848 static int intel_plane_pin_fb(struct intel_plane_state *plane_state)
15849 {
15850         struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
15851         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
15852         struct drm_framebuffer *fb = plane_state->hw.fb;
15853         struct i915_vma *vma;
15854
15855         if (plane->id == PLANE_CURSOR &&
15856             INTEL_INFO(dev_priv)->display.cursor_needs_physical) {
15857                 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
15858                 const int align = intel_cursor_alignment(dev_priv);
15859                 int err;
15860
15861                 err = i915_gem_object_attach_phys(obj, align);
15862                 if (err)
15863                         return err;
15864         }
15865
15866         vma = intel_pin_and_fence_fb_obj(fb,
15867                                          &plane_state->view,
15868                                          intel_plane_uses_fence(plane_state),
15869                                          &plane_state->flags);
15870         if (IS_ERR(vma))
15871                 return PTR_ERR(vma);
15872
15873         plane_state->vma = vma;
15874
15875         return 0;
15876 }
15877
15878 static void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
15879 {
15880         struct i915_vma *vma;
15881
15882         vma = fetch_and_zero(&old_plane_state->vma);
15883         if (vma)
15884                 intel_unpin_fb_vma(vma, old_plane_state->flags);
15885 }
15886
15887 static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj)
15888 {
15889         struct i915_sched_attr attr = {
15890                 .priority = I915_USER_PRIORITY(I915_PRIORITY_DISPLAY),
15891         };
15892
15893         i915_gem_object_wait_priority(obj, 0, &attr);
15894 }
15895
15896 /**
15897  * intel_prepare_plane_fb - Prepare fb for usage on plane
15898  * @_plane: drm plane to prepare for
15899  * @_new_plane_state: the plane state being prepared
15900  *
15901  * Prepares a framebuffer for usage on a display plane.  Generally this
15902  * involves pinning the underlying object and updating the frontbuffer tracking
15903  * bits.  Some older platforms need special physical address handling for
15904  * cursor planes.
15905  *
15906  * Returns 0 on success, negative error code on failure.
15907  */
15908 int
15909 intel_prepare_plane_fb(struct drm_plane *_plane,
15910                        struct drm_plane_state *_new_plane_state)
15911 {
15912         struct intel_plane *plane = to_intel_plane(_plane);
15913         struct intel_plane_state *new_plane_state =
15914                 to_intel_plane_state(_new_plane_state);
15915         struct intel_atomic_state *state =
15916                 to_intel_atomic_state(new_plane_state->uapi.state);
15917         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
15918         const struct intel_plane_state *old_plane_state =
15919                 intel_atomic_get_old_plane_state(state, plane);
15920         struct drm_i915_gem_object *obj = intel_fb_obj(new_plane_state->hw.fb);
15921         struct drm_i915_gem_object *old_obj = intel_fb_obj(old_plane_state->hw.fb);
15922         int ret;
15923
15924         if (old_obj) {
15925                 const struct intel_crtc_state *crtc_state =
15926                         intel_atomic_get_new_crtc_state(state,
15927                                                         to_intel_crtc(old_plane_state->hw.crtc));
15928
15929                 /* Big Hammer, we also need to ensure that any pending
15930                  * MI_WAIT_FOR_EVENT inside a user batch buffer on the
15931                  * current scanout is retired before unpinning the old
15932                  * framebuffer. Note that we rely on userspace rendering
15933                  * into the buffer attached to the pipe they are waiting
15934                  * on. If not, userspace generates a GPU hang with IPEHR
15935                  * point to the MI_WAIT_FOR_EVENT.
15936                  *
15937                  * This should only fail upon a hung GPU, in which case we
15938                  * can safely continue.
15939                  */
15940                 if (needs_modeset(crtc_state)) {
15941                         ret = i915_sw_fence_await_reservation(&state->commit_ready,
15942                                                               old_obj->base.resv, NULL,
15943                                                               false, 0,
15944                                                               GFP_KERNEL);
15945                         if (ret < 0)
15946                                 return ret;
15947                 }
15948         }
15949
15950         if (new_plane_state->uapi.fence) { /* explicit fencing */
15951                 ret = i915_sw_fence_await_dma_fence(&state->commit_ready,
15952                                                     new_plane_state->uapi.fence,
15953                                                     i915_fence_timeout(dev_priv),
15954                                                     GFP_KERNEL);
15955                 if (ret < 0)
15956                         return ret;
15957         }
15958
15959         if (!obj)
15960                 return 0;
15961
15962         ret = i915_gem_object_pin_pages(obj);
15963         if (ret)
15964                 return ret;
15965
15966         ret = intel_plane_pin_fb(new_plane_state);
15967
15968         i915_gem_object_unpin_pages(obj);
15969         if (ret)
15970                 return ret;
15971
15972         fb_obj_bump_render_priority(obj);
15973         i915_gem_object_flush_frontbuffer(obj, ORIGIN_DIRTYFB);
15974
15975         if (!new_plane_state->uapi.fence) { /* implicit fencing */
15976                 struct dma_fence *fence;
15977
15978                 ret = i915_sw_fence_await_reservation(&state->commit_ready,
15979                                                       obj->base.resv, NULL,
15980                                                       false,
15981                                                       i915_fence_timeout(dev_priv),
15982                                                       GFP_KERNEL);
15983                 if (ret < 0)
15984                         goto unpin_fb;
15985
15986                 fence = dma_resv_get_excl_rcu(obj->base.resv);
15987                 if (fence) {
15988                         add_rps_boost_after_vblank(new_plane_state->hw.crtc,
15989                                                    fence);
15990                         dma_fence_put(fence);
15991                 }
15992         } else {
15993                 add_rps_boost_after_vblank(new_plane_state->hw.crtc,
15994                                            new_plane_state->uapi.fence);
15995         }
15996
15997         /*
15998          * We declare pageflips to be interactive and so merit a small bias
15999          * towards upclocking to deliver the frame on time. By only changing
16000          * the RPS thresholds to sample more regularly and aim for higher
16001          * clocks we can hopefully deliver low power workloads (like kodi)
16002          * that are not quite steady state without resorting to forcing
16003          * maximum clocks following a vblank miss (see do_rps_boost()).
16004          */
16005         if (!state->rps_interactive) {
16006                 intel_rps_mark_interactive(&dev_priv->gt.rps, true);
16007                 state->rps_interactive = true;
16008         }
16009
16010         return 0;
16011
16012 unpin_fb:
16013         intel_plane_unpin_fb(new_plane_state);
16014
16015         return ret;
16016 }
16017
16018 /**
16019  * intel_cleanup_plane_fb - Cleans up an fb after plane use
16020  * @plane: drm plane to clean up for
16021  * @_old_plane_state: the state from the previous modeset
16022  *
16023  * Cleans up a framebuffer that has just been removed from a plane.
16024  */
16025 void
16026 intel_cleanup_plane_fb(struct drm_plane *plane,
16027                        struct drm_plane_state *_old_plane_state)
16028 {
16029         struct intel_plane_state *old_plane_state =
16030                 to_intel_plane_state(_old_plane_state);
16031         struct intel_atomic_state *state =
16032                 to_intel_atomic_state(old_plane_state->uapi.state);
16033         struct drm_i915_private *dev_priv = to_i915(plane->dev);
16034         struct drm_i915_gem_object *obj = intel_fb_obj(old_plane_state->hw.fb);
16035
16036         if (!obj)
16037                 return;
16038
16039         if (state->rps_interactive) {
16040                 intel_rps_mark_interactive(&dev_priv->gt.rps, false);
16041                 state->rps_interactive = false;
16042         }
16043
16044         /* Should only be called after a successful intel_prepare_plane_fb()! */
16045         intel_plane_unpin_fb(old_plane_state);
16046 }
16047
16048 /**
16049  * intel_plane_destroy - destroy a plane
16050  * @plane: plane to destroy
16051  *
16052  * Common destruction function for all types of planes (primary, cursor,
16053  * sprite).
16054  */
16055 void intel_plane_destroy(struct drm_plane *plane)
16056 {
16057         drm_plane_cleanup(plane);
16058         kfree(to_intel_plane(plane));
16059 }
16060
16061 static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane,
16062                                             u32 format, u64 modifier)
16063 {
16064         switch (modifier) {
16065         case DRM_FORMAT_MOD_LINEAR:
16066         case I915_FORMAT_MOD_X_TILED:
16067                 break;
16068         default:
16069                 return false;
16070         }
16071
16072         switch (format) {
16073         case DRM_FORMAT_C8:
16074         case DRM_FORMAT_RGB565:
16075         case DRM_FORMAT_XRGB1555:
16076         case DRM_FORMAT_XRGB8888:
16077                 return modifier == DRM_FORMAT_MOD_LINEAR ||
16078                         modifier == I915_FORMAT_MOD_X_TILED;
16079         default:
16080                 return false;
16081         }
16082 }
16083
16084 static bool i965_plane_format_mod_supported(struct drm_plane *_plane,
16085                                             u32 format, u64 modifier)
16086 {
16087         switch (modifier) {
16088         case DRM_FORMAT_MOD_LINEAR:
16089         case I915_FORMAT_MOD_X_TILED:
16090                 break;
16091         default:
16092                 return false;
16093         }
16094
16095         switch (format) {
16096         case DRM_FORMAT_C8:
16097         case DRM_FORMAT_RGB565:
16098         case DRM_FORMAT_XRGB8888:
16099         case DRM_FORMAT_XBGR8888:
16100         case DRM_FORMAT_ARGB8888:
16101         case DRM_FORMAT_ABGR8888:
16102         case DRM_FORMAT_XRGB2101010:
16103         case DRM_FORMAT_XBGR2101010:
16104         case DRM_FORMAT_ARGB2101010:
16105         case DRM_FORMAT_ABGR2101010:
16106         case DRM_FORMAT_XBGR16161616F:
16107                 return modifier == DRM_FORMAT_MOD_LINEAR ||
16108                         modifier == I915_FORMAT_MOD_X_TILED;
16109         default:
16110                 return false;
16111         }
16112 }
16113
16114 static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
16115                                               u32 format, u64 modifier)
16116 {
16117         return modifier == DRM_FORMAT_MOD_LINEAR &&
16118                 format == DRM_FORMAT_ARGB8888;
16119 }
16120
16121 static const struct drm_plane_funcs i965_plane_funcs = {
16122         .update_plane = drm_atomic_helper_update_plane,
16123         .disable_plane = drm_atomic_helper_disable_plane,
16124         .destroy = intel_plane_destroy,
16125         .atomic_duplicate_state = intel_plane_duplicate_state,
16126         .atomic_destroy_state = intel_plane_destroy_state,
16127         .format_mod_supported = i965_plane_format_mod_supported,
16128 };
16129
16130 static const struct drm_plane_funcs i8xx_plane_funcs = {
16131         .update_plane = drm_atomic_helper_update_plane,
16132         .disable_plane = drm_atomic_helper_disable_plane,
16133         .destroy = intel_plane_destroy,
16134         .atomic_duplicate_state = intel_plane_duplicate_state,
16135         .atomic_destroy_state = intel_plane_destroy_state,
16136         .format_mod_supported = i8xx_plane_format_mod_supported,
16137 };
16138
16139 static int
16140 intel_legacy_cursor_update(struct drm_plane *_plane,
16141                            struct drm_crtc *_crtc,
16142                            struct drm_framebuffer *fb,
16143                            int crtc_x, int crtc_y,
16144                            unsigned int crtc_w, unsigned int crtc_h,
16145                            u32 src_x, u32 src_y,
16146                            u32 src_w, u32 src_h,
16147                            struct drm_modeset_acquire_ctx *ctx)
16148 {
16149         struct intel_plane *plane = to_intel_plane(_plane);
16150         struct intel_crtc *crtc = to_intel_crtc(_crtc);
16151         struct intel_plane_state *old_plane_state =
16152                 to_intel_plane_state(plane->base.state);
16153         struct intel_plane_state *new_plane_state;
16154         struct intel_crtc_state *crtc_state =
16155                 to_intel_crtc_state(crtc->base.state);
16156         struct intel_crtc_state *new_crtc_state;
16157         int ret;
16158
16159         /*
16160          * When crtc is inactive or there is a modeset pending,
16161          * wait for it to complete in the slowpath
16162          */
16163         if (!crtc_state->hw.active || needs_modeset(crtc_state) ||
16164             crtc_state->update_pipe)
16165                 goto slow;
16166
16167         /*
16168          * Don't do an async update if there is an outstanding commit modifying
16169          * the plane.  This prevents our async update's changes from getting
16170          * overridden by a previous synchronous update's state.
16171          */
16172         if (old_plane_state->uapi.commit &&
16173             !try_wait_for_completion(&old_plane_state->uapi.commit->hw_done))
16174                 goto slow;
16175
16176         /*
16177          * If any parameters change that may affect watermarks,
16178          * take the slowpath. Only changing fb or position should be
16179          * in the fastpath.
16180          */
16181         if (old_plane_state->uapi.crtc != &crtc->base ||
16182             old_plane_state->uapi.src_w != src_w ||
16183             old_plane_state->uapi.src_h != src_h ||
16184             old_plane_state->uapi.crtc_w != crtc_w ||
16185             old_plane_state->uapi.crtc_h != crtc_h ||
16186             !old_plane_state->uapi.fb != !fb)
16187                 goto slow;
16188
16189         new_plane_state = to_intel_plane_state(intel_plane_duplicate_state(&plane->base));
16190         if (!new_plane_state)
16191                 return -ENOMEM;
16192
16193         new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(&crtc->base));
16194         if (!new_crtc_state) {
16195                 ret = -ENOMEM;
16196                 goto out_free;
16197         }
16198
16199         drm_atomic_set_fb_for_plane(&new_plane_state->uapi, fb);
16200
16201         new_plane_state->uapi.src_x = src_x;
16202         new_plane_state->uapi.src_y = src_y;
16203         new_plane_state->uapi.src_w = src_w;
16204         new_plane_state->uapi.src_h = src_h;
16205         new_plane_state->uapi.crtc_x = crtc_x;
16206         new_plane_state->uapi.crtc_y = crtc_y;
16207         new_plane_state->uapi.crtc_w = crtc_w;
16208         new_plane_state->uapi.crtc_h = crtc_h;
16209
16210         intel_plane_copy_uapi_to_hw_state(new_plane_state, new_plane_state);
16211
16212         ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state,
16213                                                   old_plane_state, new_plane_state);
16214         if (ret)
16215                 goto out_free;
16216
16217         ret = intel_plane_pin_fb(new_plane_state);
16218         if (ret)
16219                 goto out_free;
16220
16221         intel_frontbuffer_flush(to_intel_frontbuffer(new_plane_state->hw.fb),
16222                                 ORIGIN_FLIP);
16223         intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
16224                                 to_intel_frontbuffer(new_plane_state->hw.fb),
16225                                 plane->frontbuffer_bit);
16226
16227         /* Swap plane state */
16228         plane->base.state = &new_plane_state->uapi;
16229
16230         /*
16231          * We cannot swap crtc_state as it may be in use by an atomic commit or
16232          * page flip that's running simultaneously. If we swap crtc_state and
16233          * destroy the old state, we will cause a use-after-free there.
16234          *
16235          * Only update active_planes, which is needed for our internal
16236          * bookkeeping. Either value will do the right thing when updating
16237          * planes atomically. If the cursor was part of the atomic update then
16238          * we would have taken the slowpath.
16239          */
16240         crtc_state->active_planes = new_crtc_state->active_planes;
16241
16242         if (new_plane_state->uapi.visible)
16243                 intel_update_plane(plane, crtc_state, new_plane_state);
16244         else
16245                 intel_disable_plane(plane, crtc_state);
16246
16247         intel_plane_unpin_fb(old_plane_state);
16248
16249 out_free:
16250         if (new_crtc_state)
16251                 intel_crtc_destroy_state(&crtc->base, &new_crtc_state->uapi);
16252         if (ret)
16253                 intel_plane_destroy_state(&plane->base, &new_plane_state->uapi);
16254         else
16255                 intel_plane_destroy_state(&plane->base, &old_plane_state->uapi);
16256         return ret;
16257
16258 slow:
16259         return drm_atomic_helper_update_plane(&plane->base, &crtc->base, fb,
16260                                               crtc_x, crtc_y, crtc_w, crtc_h,
16261                                               src_x, src_y, src_w, src_h, ctx);
16262 }
16263
16264 static const struct drm_plane_funcs intel_cursor_plane_funcs = {
16265         .update_plane = intel_legacy_cursor_update,
16266         .disable_plane = drm_atomic_helper_disable_plane,
16267         .destroy = intel_plane_destroy,
16268         .atomic_duplicate_state = intel_plane_duplicate_state,
16269         .atomic_destroy_state = intel_plane_destroy_state,
16270         .format_mod_supported = intel_cursor_format_mod_supported,
16271 };
16272
16273 static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv,
16274                                enum i9xx_plane_id i9xx_plane)
16275 {
16276         if (!HAS_FBC(dev_priv))
16277                 return false;
16278
16279         if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
16280                 return i9xx_plane == PLANE_A; /* tied to pipe A */
16281         else if (IS_IVYBRIDGE(dev_priv))
16282                 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B ||
16283                         i9xx_plane == PLANE_C;
16284         else if (INTEL_GEN(dev_priv) >= 4)
16285                 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B;
16286         else
16287                 return i9xx_plane == PLANE_A;
16288 }
16289
16290 static struct intel_plane *
16291 intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
16292 {
16293         struct intel_plane *plane;
16294         const struct drm_plane_funcs *plane_funcs;
16295         unsigned int supported_rotations;
16296         const u32 *formats;
16297         int num_formats;
16298         int ret, zpos;
16299
16300         if (INTEL_GEN(dev_priv) >= 9)
16301                 return skl_universal_plane_create(dev_priv, pipe,
16302                                                   PLANE_PRIMARY);
16303
16304         plane = intel_plane_alloc();
16305         if (IS_ERR(plane))
16306                 return plane;
16307
16308         plane->pipe = pipe;
16309         /*
16310          * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
16311          * port is hooked to pipe B. Hence we want plane A feeding pipe B.
16312          */
16313         if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4)
16314                 plane->i9xx_plane = (enum i9xx_plane_id) !pipe;
16315         else
16316                 plane->i9xx_plane = (enum i9xx_plane_id) pipe;
16317         plane->id = PLANE_PRIMARY;
16318         plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id);
16319
16320         plane->has_fbc = i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane);
16321         if (plane->has_fbc) {
16322                 struct intel_fbc *fbc = &dev_priv->fbc;
16323
16324                 fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
16325         }
16326
16327         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
16328                 formats = vlv_primary_formats;
16329                 num_formats = ARRAY_SIZE(vlv_primary_formats);
16330         } else if (INTEL_GEN(dev_priv) >= 4) {
16331                 /*
16332                  * WaFP16GammaEnabling:ivb
16333                  * "Workaround : When using the 64-bit format, the plane
16334                  *  output on each color channel has one quarter amplitude.
16335                  *  It can be brought up to full amplitude by using pipe
16336                  *  gamma correction or pipe color space conversion to
16337                  *  multiply the plane output by four."
16338                  *
16339                  * There is no dedicated plane gamma for the primary plane,
16340                  * and using the pipe gamma/csc could conflict with other
16341                  * planes, so we choose not to expose fp16 on IVB primary
16342                  * planes. HSW primary planes no longer have this problem.
16343                  */
16344                 if (IS_IVYBRIDGE(dev_priv)) {
16345                         formats = ivb_primary_formats;
16346                         num_formats = ARRAY_SIZE(ivb_primary_formats);
16347                 } else {
16348                         formats = i965_primary_formats;
16349                         num_formats = ARRAY_SIZE(i965_primary_formats);
16350                 }
16351         } else {
16352                 formats = i8xx_primary_formats;
16353                 num_formats = ARRAY_SIZE(i8xx_primary_formats);
16354         }
16355
16356         if (INTEL_GEN(dev_priv) >= 4)
16357                 plane_funcs = &i965_plane_funcs;
16358         else
16359                 plane_funcs = &i8xx_plane_funcs;
16360
16361         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
16362                 plane->min_cdclk = vlv_plane_min_cdclk;
16363         else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
16364                 plane->min_cdclk = hsw_plane_min_cdclk;
16365         else if (IS_IVYBRIDGE(dev_priv))
16366                 plane->min_cdclk = ivb_plane_min_cdclk;
16367         else
16368                 plane->min_cdclk = i9xx_plane_min_cdclk;
16369
16370         plane->max_stride = i9xx_plane_max_stride;
16371         plane->update_plane = i9xx_update_plane;
16372         plane->disable_plane = i9xx_disable_plane;
16373         plane->get_hw_state = i9xx_plane_get_hw_state;
16374         plane->check_plane = i9xx_plane_check;
16375
16376         if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
16377                 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
16378                                                0, plane_funcs,
16379                                                formats, num_formats,
16380                                                i9xx_format_modifiers,
16381                                                DRM_PLANE_TYPE_PRIMARY,
16382                                                "primary %c", pipe_name(pipe));
16383         else
16384                 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
16385                                                0, plane_funcs,
16386                                                formats, num_formats,
16387                                                i9xx_format_modifiers,
16388                                                DRM_PLANE_TYPE_PRIMARY,
16389                                                "plane %c",
16390                                                plane_name(plane->i9xx_plane));
16391         if (ret)
16392                 goto fail;
16393
16394         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
16395                 supported_rotations =
16396                         DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
16397                         DRM_MODE_REFLECT_X;
16398         } else if (INTEL_GEN(dev_priv) >= 4) {
16399                 supported_rotations =
16400                         DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
16401         } else {
16402                 supported_rotations = DRM_MODE_ROTATE_0;
16403         }
16404
16405         if (INTEL_GEN(dev_priv) >= 4)
16406                 drm_plane_create_rotation_property(&plane->base,
16407                                                    DRM_MODE_ROTATE_0,
16408                                                    supported_rotations);
16409
16410         zpos = 0;
16411         drm_plane_create_zpos_immutable_property(&plane->base, zpos);
16412
16413         drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
16414
16415         return plane;
16416
16417 fail:
16418         intel_plane_free(plane);
16419
16420         return ERR_PTR(ret);
16421 }
16422
16423 static struct intel_plane *
16424 intel_cursor_plane_create(struct drm_i915_private *dev_priv,
16425                           enum pipe pipe)
16426 {
16427         struct intel_plane *cursor;
16428         int ret, zpos;
16429
16430         cursor = intel_plane_alloc();
16431         if (IS_ERR(cursor))
16432                 return cursor;
16433
16434         cursor->pipe = pipe;
16435         cursor->i9xx_plane = (enum i9xx_plane_id) pipe;
16436         cursor->id = PLANE_CURSOR;
16437         cursor->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, cursor->id);
16438
16439         if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
16440                 cursor->max_stride = i845_cursor_max_stride;
16441                 cursor->update_plane = i845_update_cursor;
16442                 cursor->disable_plane = i845_disable_cursor;
16443                 cursor->get_hw_state = i845_cursor_get_hw_state;
16444                 cursor->check_plane = i845_check_cursor;
16445         } else {
16446                 cursor->max_stride = i9xx_cursor_max_stride;
16447                 cursor->update_plane = i9xx_update_cursor;
16448                 cursor->disable_plane = i9xx_disable_cursor;
16449                 cursor->get_hw_state = i9xx_cursor_get_hw_state;
16450                 cursor->check_plane = i9xx_check_cursor;
16451         }
16452
16453         cursor->cursor.base = ~0;
16454         cursor->cursor.cntl = ~0;
16455
16456         if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv))
16457                 cursor->cursor.size = ~0;
16458
16459         ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
16460                                        0, &intel_cursor_plane_funcs,
16461                                        intel_cursor_formats,
16462                                        ARRAY_SIZE(intel_cursor_formats),
16463                                        cursor_format_modifiers,
16464                                        DRM_PLANE_TYPE_CURSOR,
16465                                        "cursor %c", pipe_name(pipe));
16466         if (ret)
16467                 goto fail;
16468
16469         if (INTEL_GEN(dev_priv) >= 4)
16470                 drm_plane_create_rotation_property(&cursor->base,
16471                                                    DRM_MODE_ROTATE_0,
16472                                                    DRM_MODE_ROTATE_0 |
16473                                                    DRM_MODE_ROTATE_180);
16474
16475         zpos = RUNTIME_INFO(dev_priv)->num_sprites[pipe] + 1;
16476         drm_plane_create_zpos_immutable_property(&cursor->base, zpos);
16477
16478         drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
16479
16480         return cursor;
16481
16482 fail:
16483         intel_plane_free(cursor);
16484
16485         return ERR_PTR(ret);
16486 }
16487
16488 #define INTEL_CRTC_FUNCS \
16489         .gamma_set = drm_atomic_helper_legacy_gamma_set, \
16490         .set_config = drm_atomic_helper_set_config, \
16491         .destroy = intel_crtc_destroy, \
16492         .page_flip = drm_atomic_helper_page_flip, \
16493         .atomic_duplicate_state = intel_crtc_duplicate_state, \
16494         .atomic_destroy_state = intel_crtc_destroy_state, \
16495         .set_crc_source = intel_crtc_set_crc_source, \
16496         .verify_crc_source = intel_crtc_verify_crc_source, \
16497         .get_crc_sources = intel_crtc_get_crc_sources
16498
16499 static const struct drm_crtc_funcs bdw_crtc_funcs = {
16500         INTEL_CRTC_FUNCS,
16501
16502         .get_vblank_counter = g4x_get_vblank_counter,
16503         .enable_vblank = bdw_enable_vblank,
16504         .disable_vblank = bdw_disable_vblank,
16505         .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
16506 };
16507
16508 static const struct drm_crtc_funcs ilk_crtc_funcs = {
16509         INTEL_CRTC_FUNCS,
16510
16511         .get_vblank_counter = g4x_get_vblank_counter,
16512         .enable_vblank = ilk_enable_vblank,
16513         .disable_vblank = ilk_disable_vblank,
16514         .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
16515 };
16516
16517 static const struct drm_crtc_funcs g4x_crtc_funcs = {
16518         INTEL_CRTC_FUNCS,
16519
16520         .get_vblank_counter = g4x_get_vblank_counter,
16521         .enable_vblank = i965_enable_vblank,
16522         .disable_vblank = i965_disable_vblank,
16523         .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
16524 };
16525
16526 static const struct drm_crtc_funcs i965_crtc_funcs = {
16527         INTEL_CRTC_FUNCS,
16528
16529         .get_vblank_counter = i915_get_vblank_counter,
16530         .enable_vblank = i965_enable_vblank,
16531         .disable_vblank = i965_disable_vblank,
16532         .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
16533 };
16534
16535 static const struct drm_crtc_funcs i915gm_crtc_funcs = {
16536         INTEL_CRTC_FUNCS,
16537
16538         .get_vblank_counter = i915_get_vblank_counter,
16539         .enable_vblank = i915gm_enable_vblank,
16540         .disable_vblank = i915gm_disable_vblank,
16541         .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
16542 };
16543
16544 static const struct drm_crtc_funcs i915_crtc_funcs = {
16545         INTEL_CRTC_FUNCS,
16546
16547         .get_vblank_counter = i915_get_vblank_counter,
16548         .enable_vblank = i8xx_enable_vblank,
16549         .disable_vblank = i8xx_disable_vblank,
16550         .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
16551 };
16552
16553 static const struct drm_crtc_funcs i8xx_crtc_funcs = {
16554         INTEL_CRTC_FUNCS,
16555
16556         /* no hw vblank counter */
16557         .enable_vblank = i8xx_enable_vblank,
16558         .disable_vblank = i8xx_disable_vblank,
16559         .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
16560 };
16561
16562 static struct intel_crtc *intel_crtc_alloc(void)
16563 {
16564         struct intel_crtc_state *crtc_state;
16565         struct intel_crtc *crtc;
16566
16567         crtc = kzalloc(sizeof(*crtc), GFP_KERNEL);
16568         if (!crtc)
16569                 return ERR_PTR(-ENOMEM);
16570
16571         crtc_state = intel_crtc_state_alloc(crtc);
16572         if (!crtc_state) {
16573                 kfree(crtc);
16574                 return ERR_PTR(-ENOMEM);
16575         }
16576
16577         crtc->base.state = &crtc_state->uapi;
16578         crtc->config = crtc_state;
16579
16580         return crtc;
16581 }
16582
16583 static void intel_crtc_free(struct intel_crtc *crtc)
16584 {
16585         intel_crtc_destroy_state(&crtc->base, crtc->base.state);
16586         kfree(crtc);
16587 }
16588
16589 static void intel_plane_possible_crtcs_init(struct drm_i915_private *dev_priv)
16590 {
16591         struct intel_plane *plane;
16592
16593         for_each_intel_plane(&dev_priv->drm, plane) {
16594                 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv,
16595                                                                   plane->pipe);
16596
16597                 plane->base.possible_crtcs = drm_crtc_mask(&crtc->base);
16598         }
16599 }
16600
16601 static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
16602 {
16603         struct intel_plane *primary, *cursor;
16604         const struct drm_crtc_funcs *funcs;
16605         struct intel_crtc *crtc;
16606         int sprite, ret;
16607
16608         crtc = intel_crtc_alloc();
16609         if (IS_ERR(crtc))
16610                 return PTR_ERR(crtc);
16611
16612         crtc->pipe = pipe;
16613         crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[pipe];
16614
16615         primary = intel_primary_plane_create(dev_priv, pipe);
16616         if (IS_ERR(primary)) {
16617                 ret = PTR_ERR(primary);
16618                 goto fail;
16619         }
16620         crtc->plane_ids_mask |= BIT(primary->id);
16621
16622         for_each_sprite(dev_priv, pipe, sprite) {
16623                 struct intel_plane *plane;
16624
16625                 plane = intel_sprite_plane_create(dev_priv, pipe, sprite);
16626                 if (IS_ERR(plane)) {
16627                         ret = PTR_ERR(plane);
16628                         goto fail;
16629                 }
16630                 crtc->plane_ids_mask |= BIT(plane->id);
16631         }
16632
16633         cursor = intel_cursor_plane_create(dev_priv, pipe);
16634         if (IS_ERR(cursor)) {
16635                 ret = PTR_ERR(cursor);
16636                 goto fail;
16637         }
16638         crtc->plane_ids_mask |= BIT(cursor->id);
16639
16640         if (HAS_GMCH(dev_priv)) {
16641                 if (IS_CHERRYVIEW(dev_priv) ||
16642                     IS_VALLEYVIEW(dev_priv) || IS_G4X(dev_priv))
16643                         funcs = &g4x_crtc_funcs;
16644                 else if (IS_GEN(dev_priv, 4))
16645                         funcs = &i965_crtc_funcs;
16646                 else if (IS_I945GM(dev_priv) || IS_I915GM(dev_priv))
16647                         funcs = &i915gm_crtc_funcs;
16648                 else if (IS_GEN(dev_priv, 3))
16649                         funcs = &i915_crtc_funcs;
16650                 else
16651                         funcs = &i8xx_crtc_funcs;
16652         } else {
16653                 if (INTEL_GEN(dev_priv) >= 8)
16654                         funcs = &bdw_crtc_funcs;
16655                 else
16656                         funcs = &ilk_crtc_funcs;
16657         }
16658
16659         ret = drm_crtc_init_with_planes(&dev_priv->drm, &crtc->base,
16660                                         &primary->base, &cursor->base,
16661                                         funcs, "pipe %c", pipe_name(pipe));
16662         if (ret)
16663                 goto fail;
16664
16665         BUG_ON(pipe >= ARRAY_SIZE(dev_priv->pipe_to_crtc_mapping) ||
16666                dev_priv->pipe_to_crtc_mapping[pipe] != NULL);
16667         dev_priv->pipe_to_crtc_mapping[pipe] = crtc;
16668
16669         if (INTEL_GEN(dev_priv) < 9) {
16670                 enum i9xx_plane_id i9xx_plane = primary->i9xx_plane;
16671
16672                 BUG_ON(i9xx_plane >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
16673                        dev_priv->plane_to_crtc_mapping[i9xx_plane] != NULL);
16674                 dev_priv->plane_to_crtc_mapping[i9xx_plane] = crtc;
16675         }
16676
16677         intel_color_init(crtc);
16678
16679         intel_crtc_crc_init(crtc);
16680
16681         drm_WARN_ON(&dev_priv->drm, drm_crtc_index(&crtc->base) != crtc->pipe);
16682
16683         return 0;
16684
16685 fail:
16686         intel_crtc_free(crtc);
16687
16688         return ret;
16689 }
16690
16691 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
16692                                       struct drm_file *file)
16693 {
16694         struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
16695         struct drm_crtc *drmmode_crtc;
16696         struct intel_crtc *crtc;
16697
16698         drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
16699         if (!drmmode_crtc)
16700                 return -ENOENT;
16701
16702         crtc = to_intel_crtc(drmmode_crtc);
16703         pipe_from_crtc_id->pipe = crtc->pipe;
16704
16705         return 0;
16706 }
16707
16708 static u32 intel_encoder_possible_clones(struct intel_encoder *encoder)
16709 {
16710         struct drm_device *dev = encoder->base.dev;
16711         struct intel_encoder *source_encoder;
16712         u32 possible_clones = 0;
16713
16714         for_each_intel_encoder(dev, source_encoder) {
16715                 if (encoders_cloneable(encoder, source_encoder))
16716                         possible_clones |= drm_encoder_mask(&source_encoder->base);
16717         }
16718
16719         return possible_clones;
16720 }
16721
16722 static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder)
16723 {
16724         struct drm_device *dev = encoder->base.dev;
16725         struct intel_crtc *crtc;
16726         u32 possible_crtcs = 0;
16727
16728         for_each_intel_crtc(dev, crtc) {
16729                 if (encoder->pipe_mask & BIT(crtc->pipe))
16730                         possible_crtcs |= drm_crtc_mask(&crtc->base);
16731         }
16732
16733         return possible_crtcs;
16734 }
16735
16736 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
16737 {
16738         if (!IS_MOBILE(dev_priv))
16739                 return false;
16740
16741         if ((intel_de_read(dev_priv, DP_A) & DP_DETECTED) == 0)
16742                 return false;
16743
16744         if (IS_GEN(dev_priv, 5) && (intel_de_read(dev_priv, FUSE_STRAP) & ILK_eDP_A_DISABLE))
16745                 return false;
16746
16747         return true;
16748 }
16749
16750 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
16751 {
16752         if (INTEL_GEN(dev_priv) >= 9)
16753                 return false;
16754
16755         if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
16756                 return false;
16757
16758         if (HAS_PCH_LPT_H(dev_priv) &&
16759             intel_de_read(dev_priv, SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
16760                 return false;
16761
16762         /* DDI E can't be used if DDI A requires 4 lanes */
16763         if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
16764                 return false;
16765
16766         if (!dev_priv->vbt.int_crt_support)
16767                 return false;
16768
16769         return true;
16770 }
16771
16772 void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
16773 {
16774         int pps_num;
16775         int pps_idx;
16776
16777         if (HAS_DDI(dev_priv))
16778                 return;
16779         /*
16780          * This w/a is needed at least on CPT/PPT, but to be sure apply it
16781          * everywhere where registers can be write protected.
16782          */
16783         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
16784                 pps_num = 2;
16785         else
16786                 pps_num = 1;
16787
16788         for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
16789                 u32 val = intel_de_read(dev_priv, PP_CONTROL(pps_idx));
16790
16791                 val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS;
16792                 intel_de_write(dev_priv, PP_CONTROL(pps_idx), val);
16793         }
16794 }
16795
16796 static void intel_pps_init(struct drm_i915_private *dev_priv)
16797 {
16798         if (HAS_PCH_SPLIT(dev_priv) || IS_GEN9_LP(dev_priv))
16799                 dev_priv->pps_mmio_base = PCH_PPS_BASE;
16800         else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
16801                 dev_priv->pps_mmio_base = VLV_PPS_BASE;
16802         else
16803                 dev_priv->pps_mmio_base = PPS_BASE;
16804
16805         intel_pps_unlock_regs_wa(dev_priv);
16806 }
16807
16808 static void intel_setup_outputs(struct drm_i915_private *dev_priv)
16809 {
16810         struct intel_encoder *encoder;
16811         bool dpd_is_edp = false;
16812
16813         intel_pps_init(dev_priv);
16814
16815         if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv))
16816                 return;
16817
16818         if (IS_ROCKETLAKE(dev_priv)) {
16819                 intel_ddi_init(dev_priv, PORT_A);
16820                 intel_ddi_init(dev_priv, PORT_B);
16821                 intel_ddi_init(dev_priv, PORT_D);       /* DDI TC1 */
16822                 intel_ddi_init(dev_priv, PORT_E);       /* DDI TC2 */
16823         } else if (INTEL_GEN(dev_priv) >= 12) {
16824                 intel_ddi_init(dev_priv, PORT_A);
16825                 intel_ddi_init(dev_priv, PORT_B);
16826                 intel_ddi_init(dev_priv, PORT_D);
16827                 intel_ddi_init(dev_priv, PORT_E);
16828                 intel_ddi_init(dev_priv, PORT_F);
16829                 intel_ddi_init(dev_priv, PORT_G);
16830                 intel_ddi_init(dev_priv, PORT_H);
16831                 intel_ddi_init(dev_priv, PORT_I);
16832                 icl_dsi_init(dev_priv);
16833         } else if (IS_ELKHARTLAKE(dev_priv)) {
16834                 intel_ddi_init(dev_priv, PORT_A);
16835                 intel_ddi_init(dev_priv, PORT_B);
16836                 intel_ddi_init(dev_priv, PORT_C);
16837                 intel_ddi_init(dev_priv, PORT_D);
16838                 icl_dsi_init(dev_priv);
16839         } else if (IS_GEN(dev_priv, 11)) {
16840                 intel_ddi_init(dev_priv, PORT_A);
16841                 intel_ddi_init(dev_priv, PORT_B);
16842                 intel_ddi_init(dev_priv, PORT_C);
16843                 intel_ddi_init(dev_priv, PORT_D);
16844                 intel_ddi_init(dev_priv, PORT_E);
16845                 /*
16846                  * On some ICL SKUs port F is not present. No strap bits for
16847                  * this, so rely on VBT.
16848                  * Work around broken VBTs on SKUs known to have no port F.
16849                  */
16850                 if (IS_ICL_WITH_PORT_F(dev_priv) &&
16851                     intel_bios_is_port_present(dev_priv, PORT_F))
16852                         intel_ddi_init(dev_priv, PORT_F);
16853
16854                 icl_dsi_init(dev_priv);
16855         } else if (IS_GEN9_LP(dev_priv)) {
16856                 /*
16857                  * FIXME: Broxton doesn't support port detection via the
16858                  * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
16859                  * detect the ports.
16860                  */
16861                 intel_ddi_init(dev_priv, PORT_A);
16862                 intel_ddi_init(dev_priv, PORT_B);
16863                 intel_ddi_init(dev_priv, PORT_C);
16864
16865                 vlv_dsi_init(dev_priv);
16866         } else if (HAS_DDI(dev_priv)) {
16867                 int found;
16868
16869                 if (intel_ddi_crt_present(dev_priv))
16870                         intel_crt_init(dev_priv);
16871
16872                 /*
16873                  * Haswell uses DDI functions to detect digital outputs.
16874                  * On SKL pre-D0 the strap isn't connected, so we assume
16875                  * it's there.
16876                  */
16877                 found = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
16878                 /* WaIgnoreDDIAStrap: skl */
16879                 if (found || IS_GEN9_BC(dev_priv))
16880                         intel_ddi_init(dev_priv, PORT_A);
16881
16882                 /* DDI B, C, D, and F detection is indicated by the SFUSE_STRAP
16883                  * register */
16884                 found = intel_de_read(dev_priv, SFUSE_STRAP);
16885
16886                 if (found & SFUSE_STRAP_DDIB_DETECTED)
16887                         intel_ddi_init(dev_priv, PORT_B);
16888                 if (found & SFUSE_STRAP_DDIC_DETECTED)
16889                         intel_ddi_init(dev_priv, PORT_C);
16890                 if (found & SFUSE_STRAP_DDID_DETECTED)
16891                         intel_ddi_init(dev_priv, PORT_D);
16892                 if (found & SFUSE_STRAP_DDIF_DETECTED)
16893                         intel_ddi_init(dev_priv, PORT_F);
16894                 /*
16895                  * On SKL we don't have a way to detect DDI-E so we rely on VBT.
16896                  */
16897                 if (IS_GEN9_BC(dev_priv) &&
16898                     intel_bios_is_port_present(dev_priv, PORT_E))
16899                         intel_ddi_init(dev_priv, PORT_E);
16900
16901         } else if (HAS_PCH_SPLIT(dev_priv)) {
16902                 int found;
16903
16904                 /*
16905                  * intel_edp_init_connector() depends on this completing first,
16906                  * to prevent the registration of both eDP and LVDS and the
16907                  * incorrect sharing of the PPS.
16908                  */
16909                 intel_lvds_init(dev_priv);
16910                 intel_crt_init(dev_priv);
16911
16912                 dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
16913
16914                 if (ilk_has_edp_a(dev_priv))
16915                         intel_dp_init(dev_priv, DP_A, PORT_A);
16916
16917                 if (intel_de_read(dev_priv, PCH_HDMIB) & SDVO_DETECTED) {
16918                         /* PCH SDVOB multiplex with HDMIB */
16919                         found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
16920                         if (!found)
16921                                 intel_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
16922                         if (!found && (intel_de_read(dev_priv, PCH_DP_B) & DP_DETECTED))
16923                                 intel_dp_init(dev_priv, PCH_DP_B, PORT_B);
16924                 }
16925
16926                 if (intel_de_read(dev_priv, PCH_HDMIC) & SDVO_DETECTED)
16927                         intel_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
16928
16929                 if (!dpd_is_edp && intel_de_read(dev_priv, PCH_HDMID) & SDVO_DETECTED)
16930                         intel_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
16931
16932                 if (intel_de_read(dev_priv, PCH_DP_C) & DP_DETECTED)
16933                         intel_dp_init(dev_priv, PCH_DP_C, PORT_C);
16934
16935                 if (intel_de_read(dev_priv, PCH_DP_D) & DP_DETECTED)
16936                         intel_dp_init(dev_priv, PCH_DP_D, PORT_D);
16937         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
16938                 bool has_edp, has_port;
16939
16940                 if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support)
16941                         intel_crt_init(dev_priv);
16942
16943                 /*
16944                  * The DP_DETECTED bit is the latched state of the DDC
16945                  * SDA pin at boot. However since eDP doesn't require DDC
16946                  * (no way to plug in a DP->HDMI dongle) the DDC pins for
16947                  * eDP ports may have been muxed to an alternate function.
16948                  * Thus we can't rely on the DP_DETECTED bit alone to detect
16949                  * eDP ports. Consult the VBT as well as DP_DETECTED to
16950                  * detect eDP ports.
16951                  *
16952                  * Sadly the straps seem to be missing sometimes even for HDMI
16953                  * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
16954                  * and VBT for the presence of the port. Additionally we can't
16955                  * trust the port type the VBT declares as we've seen at least
16956                  * HDMI ports that the VBT claim are DP or eDP.
16957                  */
16958                 has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
16959                 has_port = intel_bios_is_port_present(dev_priv, PORT_B);
16960                 if (intel_de_read(dev_priv, VLV_DP_B) & DP_DETECTED || has_port)
16961                         has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B);
16962                 if ((intel_de_read(dev_priv, VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
16963                         intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
16964
16965                 has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
16966                 has_port = intel_bios_is_port_present(dev_priv, PORT_C);
16967                 if (intel_de_read(dev_priv, VLV_DP_C) & DP_DETECTED || has_port)
16968                         has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C);
16969                 if ((intel_de_read(dev_priv, VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
16970                         intel_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
16971
16972                 if (IS_CHERRYVIEW(dev_priv)) {
16973                         /*
16974                          * eDP not supported on port D,
16975                          * so no need to worry about it
16976                          */
16977                         has_port = intel_bios_is_port_present(dev_priv, PORT_D);
16978                         if (intel_de_read(dev_priv, CHV_DP_D) & DP_DETECTED || has_port)
16979                                 intel_dp_init(dev_priv, CHV_DP_D, PORT_D);
16980                         if (intel_de_read(dev_priv, CHV_HDMID) & SDVO_DETECTED || has_port)
16981                                 intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
16982                 }
16983
16984                 vlv_dsi_init(dev_priv);
16985         } else if (IS_PINEVIEW(dev_priv)) {
16986                 intel_lvds_init(dev_priv);
16987                 intel_crt_init(dev_priv);
16988         } else if (IS_GEN_RANGE(dev_priv, 3, 4)) {
16989                 bool found = false;
16990
16991                 if (IS_MOBILE(dev_priv))
16992                         intel_lvds_init(dev_priv);
16993
16994                 intel_crt_init(dev_priv);
16995
16996                 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
16997                         drm_dbg_kms(&dev_priv->drm, "probing SDVOB\n");
16998                         found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
16999                         if (!found && IS_G4X(dev_priv)) {
17000                                 drm_dbg_kms(&dev_priv->drm,
17001                                             "probing HDMI on SDVOB\n");
17002                                 intel_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
17003                         }
17004
17005                         if (!found && IS_G4X(dev_priv))
17006                                 intel_dp_init(dev_priv, DP_B, PORT_B);
17007                 }
17008
17009                 /* Before G4X SDVOC doesn't have its own detect register */
17010
17011                 if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
17012                         drm_dbg_kms(&dev_priv->drm, "probing SDVOC\n");
17013                         found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
17014                 }
17015
17016                 if (!found && (intel_de_read(dev_priv, GEN3_SDVOC) & SDVO_DETECTED)) {
17017
17018                         if (IS_G4X(dev_priv)) {
17019                                 drm_dbg_kms(&dev_priv->drm,
17020                                             "probing HDMI on SDVOC\n");
17021                                 intel_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
17022                         }
17023                         if (IS_G4X(dev_priv))
17024                                 intel_dp_init(dev_priv, DP_C, PORT_C);
17025                 }
17026
17027                 if (IS_G4X(dev_priv) && (intel_de_read(dev_priv, DP_D) & DP_DETECTED))
17028                         intel_dp_init(dev_priv, DP_D, PORT_D);
17029
17030                 if (SUPPORTS_TV(dev_priv))
17031                         intel_tv_init(dev_priv);
17032         } else if (IS_GEN(dev_priv, 2)) {
17033                 if (IS_I85X(dev_priv))
17034                         intel_lvds_init(dev_priv);
17035
17036                 intel_crt_init(dev_priv);
17037                 intel_dvo_init(dev_priv);
17038         }
17039
17040         intel_psr_init(dev_priv);
17041
17042         for_each_intel_encoder(&dev_priv->drm, encoder) {
17043                 encoder->base.possible_crtcs =
17044                         intel_encoder_possible_crtcs(encoder);
17045                 encoder->base.possible_clones =
17046                         intel_encoder_possible_clones(encoder);
17047         }
17048
17049         intel_init_pch_refclk(dev_priv);
17050
17051         drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
17052 }
17053
17054 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
17055 {
17056         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
17057
17058         drm_framebuffer_cleanup(fb);
17059         intel_frontbuffer_put(intel_fb->frontbuffer);
17060
17061         kfree(intel_fb);
17062 }
17063
17064 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
17065                                                 struct drm_file *file,
17066                                                 unsigned int *handle)
17067 {
17068         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
17069         struct drm_i915_private *i915 = to_i915(obj->base.dev);
17070
17071         if (obj->userptr.mm) {
17072                 drm_dbg(&i915->drm,
17073                         "attempting to use a userptr for a framebuffer, denied\n");
17074                 return -EINVAL;
17075         }
17076
17077         return drm_gem_handle_create(file, &obj->base, handle);
17078 }
17079
17080 static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
17081                                         struct drm_file *file,
17082                                         unsigned flags, unsigned color,
17083                                         struct drm_clip_rect *clips,
17084                                         unsigned num_clips)
17085 {
17086         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
17087
17088         i915_gem_object_flush_if_display(obj);
17089         intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
17090
17091         return 0;
17092 }
17093
17094 static const struct drm_framebuffer_funcs intel_fb_funcs = {
17095         .destroy = intel_user_framebuffer_destroy,
17096         .create_handle = intel_user_framebuffer_create_handle,
17097         .dirty = intel_user_framebuffer_dirty,
17098 };
17099
17100 static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
17101                                   struct drm_i915_gem_object *obj,
17102                                   struct drm_mode_fb_cmd2 *mode_cmd)
17103 {
17104         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
17105         struct drm_framebuffer *fb = &intel_fb->base;
17106         u32 max_stride;
17107         unsigned int tiling, stride;
17108         int ret = -EINVAL;
17109         int i;
17110
17111         intel_fb->frontbuffer = intel_frontbuffer_get(obj);
17112         if (!intel_fb->frontbuffer)
17113                 return -ENOMEM;
17114
17115         i915_gem_object_lock(obj);
17116         tiling = i915_gem_object_get_tiling(obj);
17117         stride = i915_gem_object_get_stride(obj);
17118         i915_gem_object_unlock(obj);
17119
17120         if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
17121                 /*
17122                  * If there's a fence, enforce that
17123                  * the fb modifier and tiling mode match.
17124                  */
17125                 if (tiling != I915_TILING_NONE &&
17126                     tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
17127                         drm_dbg_kms(&dev_priv->drm,
17128                                     "tiling_mode doesn't match fb modifier\n");
17129                         goto err;
17130                 }
17131         } else {
17132                 if (tiling == I915_TILING_X) {
17133                         mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
17134                 } else if (tiling == I915_TILING_Y) {
17135                         drm_dbg_kms(&dev_priv->drm,
17136                                     "No Y tiling for legacy addfb\n");
17137                         goto err;
17138                 }
17139         }
17140
17141         if (!drm_any_plane_has_format(&dev_priv->drm,
17142                                       mode_cmd->pixel_format,
17143                                       mode_cmd->modifier[0])) {
17144                 struct drm_format_name_buf format_name;
17145
17146                 drm_dbg_kms(&dev_priv->drm,
17147                             "unsupported pixel format %s / modifier 0x%llx\n",
17148                             drm_get_format_name(mode_cmd->pixel_format,
17149                                                 &format_name),
17150                             mode_cmd->modifier[0]);
17151                 goto err;
17152         }
17153
17154         /*
17155          * gen2/3 display engine uses the fence if present,
17156          * so the tiling mode must match the fb modifier exactly.
17157          */
17158         if (INTEL_GEN(dev_priv) < 4 &&
17159             tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
17160                 drm_dbg_kms(&dev_priv->drm,
17161                             "tiling_mode must match fb modifier exactly on gen2/3\n");
17162                 goto err;
17163         }
17164
17165         max_stride = intel_fb_max_stride(dev_priv, mode_cmd->pixel_format,
17166                                          mode_cmd->modifier[0]);
17167         if (mode_cmd->pitches[0] > max_stride) {
17168                 drm_dbg_kms(&dev_priv->drm,
17169                             "%s pitch (%u) must be at most %d\n",
17170                             mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
17171                             "tiled" : "linear",
17172                             mode_cmd->pitches[0], max_stride);
17173                 goto err;
17174         }
17175
17176         /*
17177          * If there's a fence, enforce that
17178          * the fb pitch and fence stride match.
17179          */
17180         if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
17181                 drm_dbg_kms(&dev_priv->drm,
17182                             "pitch (%d) must match tiling stride (%d)\n",
17183                             mode_cmd->pitches[0], stride);
17184                 goto err;
17185         }
17186
17187         /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
17188         if (mode_cmd->offsets[0] != 0) {
17189                 drm_dbg_kms(&dev_priv->drm,
17190                             "plane 0 offset (0x%08x) must be 0\n",
17191                             mode_cmd->offsets[0]);
17192                 goto err;
17193         }
17194
17195         drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
17196
17197         for (i = 0; i < fb->format->num_planes; i++) {
17198                 u32 stride_alignment;
17199
17200                 if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
17201                         drm_dbg_kms(&dev_priv->drm, "bad plane %d handle\n",
17202                                     i);
17203                         goto err;
17204                 }
17205
17206                 stride_alignment = intel_fb_stride_alignment(fb, i);
17207                 if (fb->pitches[i] & (stride_alignment - 1)) {
17208                         drm_dbg_kms(&dev_priv->drm,
17209                                     "plane %d pitch (%d) must be at least %u byte aligned\n",
17210                                     i, fb->pitches[i], stride_alignment);
17211                         goto err;
17212                 }
17213
17214                 if (is_gen12_ccs_plane(fb, i)) {
17215                         int ccs_aux_stride = gen12_ccs_aux_stride(fb, i);
17216
17217                         if (fb->pitches[i] != ccs_aux_stride) {
17218                                 drm_dbg_kms(&dev_priv->drm,
17219                                             "ccs aux plane %d pitch (%d) must be %d\n",
17220                                             i,
17221                                             fb->pitches[i], ccs_aux_stride);
17222                                 goto err;
17223                         }
17224                 }
17225
17226                 fb->obj[i] = &obj->base;
17227         }
17228
17229         ret = intel_fill_fb_info(dev_priv, fb);
17230         if (ret)
17231                 goto err;
17232
17233         ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs);
17234         if (ret) {
17235                 drm_err(&dev_priv->drm, "framebuffer init failed %d\n", ret);
17236                 goto err;
17237         }
17238
17239         return 0;
17240
17241 err:
17242         intel_frontbuffer_put(intel_fb->frontbuffer);
17243         return ret;
17244 }
17245
17246 static struct drm_framebuffer *
17247 intel_user_framebuffer_create(struct drm_device *dev,
17248                               struct drm_file *filp,
17249                               const struct drm_mode_fb_cmd2 *user_mode_cmd)
17250 {
17251         struct drm_framebuffer *fb;
17252         struct drm_i915_gem_object *obj;
17253         struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
17254
17255         obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
17256         if (!obj)
17257                 return ERR_PTR(-ENOENT);
17258
17259         fb = intel_framebuffer_create(obj, &mode_cmd);
17260         i915_gem_object_put(obj);
17261
17262         return fb;
17263 }
17264
17265 static enum drm_mode_status
17266 intel_mode_valid(struct drm_device *dev,
17267                  const struct drm_display_mode *mode)
17268 {
17269         struct drm_i915_private *dev_priv = to_i915(dev);
17270         int hdisplay_max, htotal_max;
17271         int vdisplay_max, vtotal_max;
17272
17273         /*
17274          * Can't reject DBLSCAN here because Xorg ddxen can add piles
17275          * of DBLSCAN modes to the output's mode list when they detect
17276          * the scaling mode property on the connector. And they don't
17277          * ask the kernel to validate those modes in any way until
17278          * modeset time at which point the client gets a protocol error.
17279          * So in order to not upset those clients we silently ignore the
17280          * DBLSCAN flag on such connectors. For other connectors we will
17281          * reject modes with the DBLSCAN flag in encoder->compute_config().
17282          * And we always reject DBLSCAN modes in connector->mode_valid()
17283          * as we never want such modes on the connector's mode list.
17284          */
17285
17286         if (mode->vscan > 1)
17287                 return MODE_NO_VSCAN;
17288
17289         if (mode->flags & DRM_MODE_FLAG_HSKEW)
17290                 return MODE_H_ILLEGAL;
17291
17292         if (mode->flags & (DRM_MODE_FLAG_CSYNC |
17293                            DRM_MODE_FLAG_NCSYNC |
17294                            DRM_MODE_FLAG_PCSYNC))
17295                 return MODE_HSYNC;
17296
17297         if (mode->flags & (DRM_MODE_FLAG_BCAST |
17298                            DRM_MODE_FLAG_PIXMUX |
17299                            DRM_MODE_FLAG_CLKDIV2))
17300                 return MODE_BAD;
17301
17302         /* Transcoder timing limits */
17303         if (INTEL_GEN(dev_priv) >= 11) {
17304                 hdisplay_max = 16384;
17305                 vdisplay_max = 8192;
17306                 htotal_max = 16384;
17307                 vtotal_max = 8192;
17308         } else if (INTEL_GEN(dev_priv) >= 9 ||
17309                    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
17310                 hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
17311                 vdisplay_max = 4096;
17312                 htotal_max = 8192;
17313                 vtotal_max = 8192;
17314         } else if (INTEL_GEN(dev_priv) >= 3) {
17315                 hdisplay_max = 4096;
17316                 vdisplay_max = 4096;
17317                 htotal_max = 8192;
17318                 vtotal_max = 8192;
17319         } else {
17320                 hdisplay_max = 2048;
17321                 vdisplay_max = 2048;
17322                 htotal_max = 4096;
17323                 vtotal_max = 4096;
17324         }
17325
17326         if (mode->hdisplay > hdisplay_max ||
17327             mode->hsync_start > htotal_max ||
17328             mode->hsync_end > htotal_max ||
17329             mode->htotal > htotal_max)
17330                 return MODE_H_ILLEGAL;
17331
17332         if (mode->vdisplay > vdisplay_max ||
17333             mode->vsync_start > vtotal_max ||
17334             mode->vsync_end > vtotal_max ||
17335             mode->vtotal > vtotal_max)
17336                 return MODE_V_ILLEGAL;
17337
17338         if (INTEL_GEN(dev_priv) >= 5) {
17339                 if (mode->hdisplay < 64 ||
17340                     mode->htotal - mode->hdisplay < 32)
17341                         return MODE_H_ILLEGAL;
17342
17343                 if (mode->vtotal - mode->vdisplay < 5)
17344                         return MODE_V_ILLEGAL;
17345         } else {
17346                 if (mode->htotal - mode->hdisplay < 32)
17347                         return MODE_H_ILLEGAL;
17348
17349                 if (mode->vtotal - mode->vdisplay < 3)
17350                         return MODE_V_ILLEGAL;
17351         }
17352
17353         return MODE_OK;
17354 }
17355
17356 enum drm_mode_status
17357 intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
17358                                 const struct drm_display_mode *mode)
17359 {
17360         int plane_width_max, plane_height_max;
17361
17362         /*
17363          * intel_mode_valid() should be
17364          * sufficient on older platforms.
17365          */
17366         if (INTEL_GEN(dev_priv) < 9)
17367                 return MODE_OK;
17368
17369         /*
17370          * Most people will probably want a fullscreen
17371          * plane so let's not advertize modes that are
17372          * too big for that.
17373          */
17374         if (INTEL_GEN(dev_priv) >= 11) {
17375                 plane_width_max = 5120;
17376                 plane_height_max = 4320;
17377         } else {
17378                 plane_width_max = 5120;
17379                 plane_height_max = 4096;
17380         }
17381
17382         if (mode->hdisplay > plane_width_max)
17383                 return MODE_H_ILLEGAL;
17384
17385         if (mode->vdisplay > plane_height_max)
17386                 return MODE_V_ILLEGAL;
17387
17388         return MODE_OK;
17389 }
17390
17391 static const struct drm_mode_config_funcs intel_mode_funcs = {
17392         .fb_create = intel_user_framebuffer_create,
17393         .get_format_info = intel_get_format_info,
17394         .output_poll_changed = intel_fbdev_output_poll_changed,
17395         .mode_valid = intel_mode_valid,
17396         .atomic_check = intel_atomic_check,
17397         .atomic_commit = intel_atomic_commit,
17398         .atomic_state_alloc = intel_atomic_state_alloc,
17399         .atomic_state_clear = intel_atomic_state_clear,
17400         .atomic_state_free = intel_atomic_state_free,
17401 };
17402
17403 /**
17404  * intel_init_display_hooks - initialize the display modesetting hooks
17405  * @dev_priv: device private
17406  */
17407 void intel_init_display_hooks(struct drm_i915_private *dev_priv)
17408 {
17409         intel_init_cdclk_hooks(dev_priv);
17410
17411         if (INTEL_GEN(dev_priv) >= 9) {
17412                 dev_priv->display.get_pipe_config = hsw_get_pipe_config;
17413                 dev_priv->display.get_initial_plane_config =
17414                         skl_get_initial_plane_config;
17415                 dev_priv->display.crtc_compute_clock = hsw_crtc_compute_clock;
17416                 dev_priv->display.crtc_enable = hsw_crtc_enable;
17417                 dev_priv->display.crtc_disable = hsw_crtc_disable;
17418         } else if (HAS_DDI(dev_priv)) {
17419                 dev_priv->display.get_pipe_config = hsw_get_pipe_config;
17420                 dev_priv->display.get_initial_plane_config =
17421                         i9xx_get_initial_plane_config;
17422                 dev_priv->display.crtc_compute_clock =
17423                         hsw_crtc_compute_clock;
17424                 dev_priv->display.crtc_enable = hsw_crtc_enable;
17425                 dev_priv->display.crtc_disable = hsw_crtc_disable;
17426         } else if (HAS_PCH_SPLIT(dev_priv)) {
17427                 dev_priv->display.get_pipe_config = ilk_get_pipe_config;
17428                 dev_priv->display.get_initial_plane_config =
17429                         i9xx_get_initial_plane_config;
17430                 dev_priv->display.crtc_compute_clock =
17431                         ilk_crtc_compute_clock;
17432                 dev_priv->display.crtc_enable = ilk_crtc_enable;
17433                 dev_priv->display.crtc_disable = ilk_crtc_disable;
17434         } else if (IS_CHERRYVIEW(dev_priv)) {
17435                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
17436                 dev_priv->display.get_initial_plane_config =
17437                         i9xx_get_initial_plane_config;
17438                 dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock;
17439                 dev_priv->display.crtc_enable = valleyview_crtc_enable;
17440                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
17441         } else if (IS_VALLEYVIEW(dev_priv)) {
17442                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
17443                 dev_priv->display.get_initial_plane_config =
17444                         i9xx_get_initial_plane_config;
17445                 dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock;
17446                 dev_priv->display.crtc_enable = valleyview_crtc_enable;
17447                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
17448         } else if (IS_G4X(dev_priv)) {
17449                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
17450                 dev_priv->display.get_initial_plane_config =
17451                         i9xx_get_initial_plane_config;
17452                 dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock;
17453                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
17454                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
17455         } else if (IS_PINEVIEW(dev_priv)) {
17456                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
17457                 dev_priv->display.get_initial_plane_config =
17458                         i9xx_get_initial_plane_config;
17459                 dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
17460                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
17461                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
17462         } else if (!IS_GEN(dev_priv, 2)) {
17463                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
17464                 dev_priv->display.get_initial_plane_config =
17465                         i9xx_get_initial_plane_config;
17466                 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
17467                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
17468                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
17469         } else {
17470                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
17471                 dev_priv->display.get_initial_plane_config =
17472                         i9xx_get_initial_plane_config;
17473                 dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
17474                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
17475                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
17476         }
17477
17478         if (IS_GEN(dev_priv, 5)) {
17479                 dev_priv->display.fdi_link_train = ilk_fdi_link_train;
17480         } else if (IS_GEN(dev_priv, 6)) {
17481                 dev_priv->display.fdi_link_train = gen6_fdi_link_train;
17482         } else if (IS_IVYBRIDGE(dev_priv)) {
17483                 /* FIXME: detect B0+ stepping and use auto training */
17484                 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
17485         }
17486
17487         if (INTEL_GEN(dev_priv) >= 9)
17488                 dev_priv->display.commit_modeset_enables = skl_commit_modeset_enables;
17489         else
17490                 dev_priv->display.commit_modeset_enables = intel_commit_modeset_enables;
17491
17492 }
17493
17494 void intel_modeset_init_hw(struct drm_i915_private *i915)
17495 {
17496         struct intel_cdclk_state *cdclk_state =
17497                 to_intel_cdclk_state(i915->cdclk.obj.state);
17498         struct intel_dbuf_state *dbuf_state =
17499                 to_intel_dbuf_state(i915->dbuf.obj.state);
17500
17501         intel_update_cdclk(i915);
17502         intel_dump_cdclk_config(&i915->cdclk.hw, "Current CDCLK");
17503         cdclk_state->logical = cdclk_state->actual = i915->cdclk.hw;
17504
17505         dbuf_state->enabled_slices = i915->dbuf.enabled_slices;
17506 }
17507
17508 static int sanitize_watermarks_add_affected(struct drm_atomic_state *state)
17509 {
17510         struct drm_plane *plane;
17511         struct intel_crtc *crtc;
17512
17513         for_each_intel_crtc(state->dev, crtc) {
17514                 struct intel_crtc_state *crtc_state;
17515
17516                 crtc_state = intel_atomic_get_crtc_state(state, crtc);
17517                 if (IS_ERR(crtc_state))
17518                         return PTR_ERR(crtc_state);
17519
17520                 if (crtc_state->hw.active) {
17521                         /*
17522                          * Preserve the inherited flag to avoid
17523                          * taking the full modeset path.
17524                          */
17525                         crtc_state->inherited = true;
17526                 }
17527         }
17528
17529         drm_for_each_plane(plane, state->dev) {
17530                 struct drm_plane_state *plane_state;
17531
17532                 plane_state = drm_atomic_get_plane_state(state, plane);
17533                 if (IS_ERR(plane_state))
17534                         return PTR_ERR(plane_state);
17535         }
17536
17537         return 0;
17538 }
17539
17540 /*
17541  * Calculate what we think the watermarks should be for the state we've read
17542  * out of the hardware and then immediately program those watermarks so that
17543  * we ensure the hardware settings match our internal state.
17544  *
17545  * We can calculate what we think WM's should be by creating a duplicate of the
17546  * current state (which was constructed during hardware readout) and running it
17547  * through the atomic check code to calculate new watermark values in the
17548  * state object.
17549  */
17550 static void sanitize_watermarks(struct drm_i915_private *dev_priv)
17551 {
17552         struct drm_atomic_state *state;
17553         struct intel_atomic_state *intel_state;
17554         struct intel_crtc *crtc;
17555         struct intel_crtc_state *crtc_state;
17556         struct drm_modeset_acquire_ctx ctx;
17557         int ret;
17558         int i;
17559
17560         /* Only supported on platforms that use atomic watermark design */
17561         if (!dev_priv->display.optimize_watermarks)
17562                 return;
17563
17564         state = drm_atomic_state_alloc(&dev_priv->drm);
17565         if (drm_WARN_ON(&dev_priv->drm, !state))
17566                 return;
17567
17568         intel_state = to_intel_atomic_state(state);
17569
17570         drm_modeset_acquire_init(&ctx, 0);
17571
17572 retry:
17573         state->acquire_ctx = &ctx;
17574
17575         /*
17576          * Hardware readout is the only time we don't want to calculate
17577          * intermediate watermarks (since we don't trust the current
17578          * watermarks).
17579          */
17580         if (!HAS_GMCH(dev_priv))
17581                 intel_state->skip_intermediate_wm = true;
17582
17583         ret = sanitize_watermarks_add_affected(state);
17584         if (ret)
17585                 goto fail;
17586
17587         ret = intel_atomic_check(&dev_priv->drm, state);
17588         if (ret)
17589                 goto fail;
17590
17591         /* Write calculated watermark values back */
17592         for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
17593                 crtc_state->wm.need_postvbl_update = true;
17594                 dev_priv->display.optimize_watermarks(intel_state, crtc);
17595
17596                 to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm;
17597         }
17598
17599 fail:
17600         if (ret == -EDEADLK) {
17601                 drm_atomic_state_clear(state);
17602                 drm_modeset_backoff(&ctx);
17603                 goto retry;
17604         }
17605
17606         /*
17607          * If we fail here, it means that the hardware appears to be
17608          * programmed in a way that shouldn't be possible, given our
17609          * understanding of watermark requirements.  This might mean a
17610          * mistake in the hardware readout code or a mistake in the
17611          * watermark calculations for a given platform.  Raise a WARN
17612          * so that this is noticeable.
17613          *
17614          * If this actually happens, we'll have to just leave the
17615          * BIOS-programmed watermarks untouched and hope for the best.
17616          */
17617         drm_WARN(&dev_priv->drm, ret,
17618                  "Could not determine valid watermarks for inherited state\n");
17619
17620         drm_atomic_state_put(state);
17621
17622         drm_modeset_drop_locks(&ctx);
17623         drm_modeset_acquire_fini(&ctx);
17624 }
17625
17626 static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
17627 {
17628         if (IS_GEN(dev_priv, 5)) {
17629                 u32 fdi_pll_clk =
17630                         intel_de_read(dev_priv, FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
17631
17632                 dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
17633         } else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) {
17634                 dev_priv->fdi_pll_freq = 270000;
17635         } else {
17636                 return;
17637         }
17638
17639         drm_dbg(&dev_priv->drm, "FDI PLL freq=%d\n", dev_priv->fdi_pll_freq);
17640 }
17641
17642 static int intel_initial_commit(struct drm_device *dev)
17643 {
17644         struct drm_atomic_state *state = NULL;
17645         struct drm_modeset_acquire_ctx ctx;
17646         struct intel_crtc *crtc;
17647         int ret = 0;
17648
17649         state = drm_atomic_state_alloc(dev);
17650         if (!state)
17651                 return -ENOMEM;
17652
17653         drm_modeset_acquire_init(&ctx, 0);
17654
17655 retry:
17656         state->acquire_ctx = &ctx;
17657
17658         for_each_intel_crtc(dev, crtc) {
17659                 struct intel_crtc_state *crtc_state =
17660                         intel_atomic_get_crtc_state(state, crtc);
17661
17662                 if (IS_ERR(crtc_state)) {
17663                         ret = PTR_ERR(crtc_state);
17664                         goto out;
17665                 }
17666
17667                 if (crtc_state->hw.active) {
17668                         /*
17669                          * We've not yet detected sink capabilities
17670                          * (audio,infoframes,etc.) and thus we don't want to
17671                          * force a full state recomputation yet. We want that to
17672                          * happen only for the first real commit from userspace.
17673                          * So preserve the inherited flag for the time being.
17674                          */
17675                         crtc_state->inherited = true;
17676
17677                         ret = drm_atomic_add_affected_planes(state, &crtc->base);
17678                         if (ret)
17679                                 goto out;
17680
17681                         /*
17682                          * FIXME hack to force a LUT update to avoid the
17683                          * plane update forcing the pipe gamma on without
17684                          * having a proper LUT loaded. Remove once we
17685                          * have readout for pipe gamma enable.
17686                          */
17687                         crtc_state->uapi.color_mgmt_changed = true;
17688
17689                         /*
17690                          * FIXME hack to force full modeset when DSC is being
17691                          * used.
17692                          *
17693                          * As long as we do not have full state readout and
17694                          * config comparison of crtc_state->dsc, we have no way
17695                          * to ensure reliable fastset. Remove once we have
17696                          * readout for DSC.
17697                          */
17698                         if (crtc_state->dsc.compression_enable) {
17699                                 ret = drm_atomic_add_affected_connectors(state,
17700                                                                          &crtc->base);
17701                                 if (ret)
17702                                         goto out;
17703                                 crtc_state->uapi.mode_changed = true;
17704                                 drm_dbg_kms(dev, "Force full modeset for DSC\n");
17705                         }
17706                 }
17707         }
17708
17709         ret = drm_atomic_commit(state);
17710
17711 out:
17712         if (ret == -EDEADLK) {
17713                 drm_atomic_state_clear(state);
17714                 drm_modeset_backoff(&ctx);
17715                 goto retry;
17716         }
17717
17718         drm_atomic_state_put(state);
17719
17720         drm_modeset_drop_locks(&ctx);
17721         drm_modeset_acquire_fini(&ctx);
17722
17723         return ret;
17724 }
17725
17726 static void intel_mode_config_init(struct drm_i915_private *i915)
17727 {
17728         struct drm_mode_config *mode_config = &i915->drm.mode_config;
17729
17730         drm_mode_config_init(&i915->drm);
17731         INIT_LIST_HEAD(&i915->global_obj_list);
17732
17733         mode_config->min_width = 0;
17734         mode_config->min_height = 0;
17735
17736         mode_config->preferred_depth = 24;
17737         mode_config->prefer_shadow = 1;
17738
17739         mode_config->allow_fb_modifiers = true;
17740
17741         mode_config->funcs = &intel_mode_funcs;
17742
17743         /*
17744          * Maximum framebuffer dimensions, chosen to match
17745          * the maximum render engine surface size on gen4+.
17746          */
17747         if (INTEL_GEN(i915) >= 7) {
17748                 mode_config->max_width = 16384;
17749                 mode_config->max_height = 16384;
17750         } else if (INTEL_GEN(i915) >= 4) {
17751                 mode_config->max_width = 8192;
17752                 mode_config->max_height = 8192;
17753         } else if (IS_GEN(i915, 3)) {
17754                 mode_config->max_width = 4096;
17755                 mode_config->max_height = 4096;
17756         } else {
17757                 mode_config->max_width = 2048;
17758                 mode_config->max_height = 2048;
17759         }
17760
17761         if (IS_I845G(i915) || IS_I865G(i915)) {
17762                 mode_config->cursor_width = IS_I845G(i915) ? 64 : 512;
17763                 mode_config->cursor_height = 1023;
17764         } else if (IS_I830(i915) || IS_I85X(i915) ||
17765                    IS_I915G(i915) || IS_I915GM(i915)) {
17766                 mode_config->cursor_width = 64;
17767                 mode_config->cursor_height = 64;
17768         } else {
17769                 mode_config->cursor_width = 256;
17770                 mode_config->cursor_height = 256;
17771         }
17772 }
17773
17774 static void intel_mode_config_cleanup(struct drm_i915_private *i915)
17775 {
17776         intel_atomic_global_obj_cleanup(i915);
17777         drm_mode_config_cleanup(&i915->drm);
17778 }
17779
17780 static void plane_config_fini(struct intel_initial_plane_config *plane_config)
17781 {
17782         if (plane_config->fb) {
17783                 struct drm_framebuffer *fb = &plane_config->fb->base;
17784
17785                 /* We may only have the stub and not a full framebuffer */
17786                 if (drm_framebuffer_read_refcount(fb))
17787                         drm_framebuffer_put(fb);
17788                 else
17789                         kfree(fb);
17790         }
17791
17792         if (plane_config->vma)
17793                 i915_vma_put(plane_config->vma);
17794 }
17795
17796 /* part #1: call before irq install */
17797 int intel_modeset_init_noirq(struct drm_i915_private *i915)
17798 {
17799         int ret;
17800
17801         i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
17802         i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI |
17803                                         WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
17804
17805         intel_mode_config_init(i915);
17806
17807         ret = intel_cdclk_init(i915);
17808         if (ret)
17809                 return ret;
17810
17811         ret = intel_dbuf_init(i915);
17812         if (ret)
17813                 return ret;
17814
17815         ret = intel_bw_init(i915);
17816         if (ret)
17817                 return ret;
17818
17819         init_llist_head(&i915->atomic_helper.free_list);
17820         INIT_WORK(&i915->atomic_helper.free_work,
17821                   intel_atomic_helper_free_state_worker);
17822
17823         intel_init_quirks(i915);
17824
17825         intel_fbc_init(i915);
17826
17827         return 0;
17828 }
17829
17830 /* part #2: call after irq install */
17831 int intel_modeset_init(struct drm_i915_private *i915)
17832 {
17833         struct drm_device *dev = &i915->drm;
17834         enum pipe pipe;
17835         struct intel_crtc *crtc;
17836         int ret;
17837
17838         intel_init_pm(i915);
17839
17840         intel_panel_sanitize_ssc(i915);
17841
17842         intel_gmbus_setup(i915);
17843
17844         drm_dbg_kms(&i915->drm, "%d display pipe%s available.\n",
17845                     INTEL_NUM_PIPES(i915),
17846                     INTEL_NUM_PIPES(i915) > 1 ? "s" : "");
17847
17848         if (HAS_DISPLAY(i915) && INTEL_DISPLAY_ENABLED(i915)) {
17849                 for_each_pipe(i915, pipe) {
17850                         ret = intel_crtc_init(i915, pipe);
17851                         if (ret) {
17852                                 intel_mode_config_cleanup(i915);
17853                                 return ret;
17854                         }
17855                 }
17856         }
17857
17858         intel_plane_possible_crtcs_init(i915);
17859         intel_shared_dpll_init(dev);
17860         intel_update_fdi_pll_freq(i915);
17861
17862         intel_update_czclk(i915);
17863         intel_modeset_init_hw(i915);
17864
17865         intel_hdcp_component_init(i915);
17866
17867         if (i915->max_cdclk_freq == 0)
17868                 intel_update_max_cdclk(i915);
17869
17870         /* Just disable it once at startup */
17871         intel_vga_disable(i915);
17872         intel_setup_outputs(i915);
17873
17874         drm_modeset_lock_all(dev);
17875         intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
17876         drm_modeset_unlock_all(dev);
17877
17878         for_each_intel_crtc(dev, crtc) {
17879                 struct intel_initial_plane_config plane_config = {};
17880
17881                 if (!crtc->active)
17882                         continue;
17883
17884                 /*
17885                  * Note that reserving the BIOS fb up front prevents us
17886                  * from stuffing other stolen allocations like the ring
17887                  * on top.  This prevents some ugliness at boot time, and
17888                  * can even allow for smooth boot transitions if the BIOS
17889                  * fb is large enough for the active pipe configuration.
17890                  */
17891                 i915->display.get_initial_plane_config(crtc, &plane_config);
17892
17893                 /*
17894                  * If the fb is shared between multiple heads, we'll
17895                  * just get the first one.
17896                  */
17897                 intel_find_initial_plane_obj(crtc, &plane_config);
17898
17899                 plane_config_fini(&plane_config);
17900         }
17901
17902         /*
17903          * Make sure hardware watermarks really match the state we read out.
17904          * Note that we need to do this after reconstructing the BIOS fb's
17905          * since the watermark calculation done here will use pstate->fb.
17906          */
17907         if (!HAS_GMCH(i915))
17908                 sanitize_watermarks(i915);
17909
17910         /*
17911          * Force all active planes to recompute their states. So that on
17912          * mode_setcrtc after probe, all the intel_plane_state variables
17913          * are already calculated and there is no assert_plane warnings
17914          * during bootup.
17915          */
17916         ret = intel_initial_commit(dev);
17917         if (ret)
17918                 drm_dbg_kms(&i915->drm, "Initial commit in probe failed.\n");
17919
17920         return 0;
17921 }
17922
17923 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
17924 {
17925         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
17926         /* 640x480@60Hz, ~25175 kHz */
17927         struct dpll clock = {
17928                 .m1 = 18,
17929                 .m2 = 7,
17930                 .p1 = 13,
17931                 .p2 = 4,
17932                 .n = 2,
17933         };
17934         u32 dpll, fp;
17935         int i;
17936
17937         drm_WARN_ON(&dev_priv->drm,
17938                     i9xx_calc_dpll_params(48000, &clock) != 25154);
17939
17940         drm_dbg_kms(&dev_priv->drm,
17941                     "enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
17942                     pipe_name(pipe), clock.vco, clock.dot);
17943
17944         fp = i9xx_dpll_compute_fp(&clock);
17945         dpll = DPLL_DVO_2X_MODE |
17946                 DPLL_VGA_MODE_DIS |
17947                 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
17948                 PLL_P2_DIVIDE_BY_4 |
17949                 PLL_REF_INPUT_DREFCLK |
17950                 DPLL_VCO_ENABLE;
17951
17952         intel_de_write(dev_priv, FP0(pipe), fp);
17953         intel_de_write(dev_priv, FP1(pipe), fp);
17954
17955         intel_de_write(dev_priv, HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
17956         intel_de_write(dev_priv, HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
17957         intel_de_write(dev_priv, HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
17958         intel_de_write(dev_priv, VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
17959         intel_de_write(dev_priv, VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
17960         intel_de_write(dev_priv, VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
17961         intel_de_write(dev_priv, PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
17962
17963         /*
17964          * Apparently we need to have VGA mode enabled prior to changing
17965          * the P1/P2 dividers. Otherwise the DPLL will keep using the old
17966          * dividers, even though the register value does change.
17967          */
17968         intel_de_write(dev_priv, DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
17969         intel_de_write(dev_priv, DPLL(pipe), dpll);
17970
17971         /* Wait for the clocks to stabilize. */
17972         intel_de_posting_read(dev_priv, DPLL(pipe));
17973         udelay(150);
17974
17975         /* The pixel multiplier can only be updated once the
17976          * DPLL is enabled and the clocks are stable.
17977          *
17978          * So write it again.
17979          */
17980         intel_de_write(dev_priv, DPLL(pipe), dpll);
17981
17982         /* We do this three times for luck */
17983         for (i = 0; i < 3 ; i++) {
17984                 intel_de_write(dev_priv, DPLL(pipe), dpll);
17985                 intel_de_posting_read(dev_priv, DPLL(pipe));
17986                 udelay(150); /* wait for warmup */
17987         }
17988
17989         intel_de_write(dev_priv, PIPECONF(pipe),
17990                        PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
17991         intel_de_posting_read(dev_priv, PIPECONF(pipe));
17992
17993         intel_wait_for_pipe_scanline_moving(crtc);
17994 }
17995
17996 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
17997 {
17998         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
17999
18000         drm_dbg_kms(&dev_priv->drm, "disabling pipe %c due to force quirk\n",
18001                     pipe_name(pipe));
18002
18003         drm_WARN_ON(&dev_priv->drm,
18004                     intel_de_read(dev_priv, DSPCNTR(PLANE_A)) &
18005                     DISPLAY_PLANE_ENABLE);
18006         drm_WARN_ON(&dev_priv->drm,
18007                     intel_de_read(dev_priv, DSPCNTR(PLANE_B)) &
18008                     DISPLAY_PLANE_ENABLE);
18009         drm_WARN_ON(&dev_priv->drm,
18010                     intel_de_read(dev_priv, DSPCNTR(PLANE_C)) &
18011                     DISPLAY_PLANE_ENABLE);
18012         drm_WARN_ON(&dev_priv->drm,
18013                     intel_de_read(dev_priv, CURCNTR(PIPE_A)) & MCURSOR_MODE);
18014         drm_WARN_ON(&dev_priv->drm,
18015                     intel_de_read(dev_priv, CURCNTR(PIPE_B)) & MCURSOR_MODE);
18016
18017         intel_de_write(dev_priv, PIPECONF(pipe), 0);
18018         intel_de_posting_read(dev_priv, PIPECONF(pipe));
18019
18020         intel_wait_for_pipe_scanline_stopped(crtc);
18021
18022         intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS);
18023         intel_de_posting_read(dev_priv, DPLL(pipe));
18024 }
18025
18026 static void
18027 intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
18028 {
18029         struct intel_crtc *crtc;
18030
18031         if (INTEL_GEN(dev_priv) >= 4)
18032                 return;
18033
18034         for_each_intel_crtc(&dev_priv->drm, crtc) {
18035                 struct intel_plane *plane =
18036                         to_intel_plane(crtc->base.primary);
18037                 struct intel_crtc *plane_crtc;
18038                 enum pipe pipe;
18039
18040                 if (!plane->get_hw_state(plane, &pipe))
18041                         continue;
18042
18043                 if (pipe == crtc->pipe)
18044                         continue;
18045
18046                 drm_dbg_kms(&dev_priv->drm,
18047                             "[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
18048                             plane->base.base.id, plane->base.name);
18049
18050                 plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
18051                 intel_plane_disable_noatomic(plane_crtc, plane);
18052         }
18053 }
18054
18055 static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
18056 {
18057         struct drm_device *dev = crtc->base.dev;
18058         struct intel_encoder *encoder;
18059
18060         for_each_encoder_on_crtc(dev, &crtc->base, encoder)
18061                 return true;
18062
18063         return false;
18064 }
18065
18066 static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
18067 {
18068         struct drm_device *dev = encoder->base.dev;
18069         struct intel_connector *connector;
18070
18071         for_each_connector_on_encoder(dev, &encoder->base, connector)
18072                 return connector;
18073
18074         return NULL;
18075 }
18076
18077 static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
18078                               enum pipe pch_transcoder)
18079 {
18080         return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
18081                 (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A);
18082 }
18083
18084 static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc_state)
18085 {
18086         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
18087         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
18088         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
18089
18090         if (INTEL_GEN(dev_priv) >= 9 ||
18091             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
18092                 i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder);
18093                 u32 val;
18094
18095                 if (transcoder_is_dsi(cpu_transcoder))
18096                         return;
18097
18098                 val = intel_de_read(dev_priv, reg);
18099                 val &= ~HSW_FRAME_START_DELAY_MASK;
18100                 val |= HSW_FRAME_START_DELAY(0);
18101                 intel_de_write(dev_priv, reg, val);
18102         } else {
18103                 i915_reg_t reg = PIPECONF(cpu_transcoder);
18104                 u32 val;
18105
18106                 val = intel_de_read(dev_priv, reg);
18107                 val &= ~PIPECONF_FRAME_START_DELAY_MASK;
18108                 val |= PIPECONF_FRAME_START_DELAY(0);
18109                 intel_de_write(dev_priv, reg, val);
18110         }
18111
18112         if (!crtc_state->has_pch_encoder)
18113                 return;
18114
18115         if (HAS_PCH_IBX(dev_priv)) {
18116                 i915_reg_t reg = PCH_TRANSCONF(crtc->pipe);
18117                 u32 val;
18118
18119                 val = intel_de_read(dev_priv, reg);
18120                 val &= ~TRANS_FRAME_START_DELAY_MASK;
18121                 val |= TRANS_FRAME_START_DELAY(0);
18122                 intel_de_write(dev_priv, reg, val);
18123         } else {
18124                 enum pipe pch_transcoder = intel_crtc_pch_transcoder(crtc);
18125                 i915_reg_t reg = TRANS_CHICKEN2(pch_transcoder);
18126                 u32 val;
18127
18128                 val = intel_de_read(dev_priv, reg);
18129                 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
18130                 val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
18131                 intel_de_write(dev_priv, reg, val);
18132         }
18133 }
18134
18135 static void intel_sanitize_crtc(struct intel_crtc *crtc,
18136                                 struct drm_modeset_acquire_ctx *ctx)
18137 {
18138         struct drm_device *dev = crtc->base.dev;
18139         struct drm_i915_private *dev_priv = to_i915(dev);
18140         struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
18141
18142         if (crtc_state->hw.active) {
18143                 struct intel_plane *plane;
18144
18145                 /* Clear any frame start delays used for debugging left by the BIOS */
18146                 intel_sanitize_frame_start_delay(crtc_state);
18147
18148                 /* Disable everything but the primary plane */
18149                 for_each_intel_plane_on_crtc(dev, crtc, plane) {
18150                         const struct intel_plane_state *plane_state =
18151                                 to_intel_plane_state(plane->base.state);
18152
18153                         if (plane_state->uapi.visible &&
18154                             plane->base.type != DRM_PLANE_TYPE_PRIMARY)
18155                                 intel_plane_disable_noatomic(crtc, plane);
18156                 }
18157
18158                 /*
18159                  * Disable any background color set by the BIOS, but enable the
18160                  * gamma and CSC to match how we program our planes.
18161                  */
18162                 if (INTEL_GEN(dev_priv) >= 9)
18163                         intel_de_write(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe),
18164                                        SKL_BOTTOM_COLOR_GAMMA_ENABLE | SKL_BOTTOM_COLOR_CSC_ENABLE);
18165         }
18166
18167         /* Adjust the state of the output pipe according to whether we
18168          * have active connectors/encoders. */
18169         if (crtc_state->hw.active && !intel_crtc_has_encoders(crtc))
18170                 intel_crtc_disable_noatomic(crtc, ctx);
18171
18172         if (crtc_state->hw.active || HAS_GMCH(dev_priv)) {
18173                 /*
18174                  * We start out with underrun reporting disabled to avoid races.
18175                  * For correct bookkeeping mark this on active crtcs.
18176                  *
18177                  * Also on gmch platforms we dont have any hardware bits to
18178                  * disable the underrun reporting. Which means we need to start
18179                  * out with underrun reporting disabled also on inactive pipes,
18180                  * since otherwise we'll complain about the garbage we read when
18181                  * e.g. coming up after runtime pm.
18182                  *
18183                  * No protection against concurrent access is required - at
18184                  * worst a fifo underrun happens which also sets this to false.
18185                  */
18186                 crtc->cpu_fifo_underrun_disabled = true;
18187                 /*
18188                  * We track the PCH trancoder underrun reporting state
18189                  * within the crtc. With crtc for pipe A housing the underrun
18190                  * reporting state for PCH transcoder A, crtc for pipe B housing
18191                  * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
18192                  * and marking underrun reporting as disabled for the non-existing
18193                  * PCH transcoders B and C would prevent enabling the south
18194                  * error interrupt (see cpt_can_enable_serr_int()).
18195                  */
18196                 if (has_pch_trancoder(dev_priv, crtc->pipe))
18197                         crtc->pch_fifo_underrun_disabled = true;
18198         }
18199 }
18200
18201 static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
18202 {
18203         struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
18204
18205         /*
18206          * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
18207          * the hardware when a high res displays plugged in. DPLL P
18208          * divider is zero, and the pipe timings are bonkers. We'll
18209          * try to disable everything in that case.
18210          *
18211          * FIXME would be nice to be able to sanitize this state
18212          * without several WARNs, but for now let's take the easy
18213          * road.
18214          */
18215         return IS_GEN(dev_priv, 6) &&
18216                 crtc_state->hw.active &&
18217                 crtc_state->shared_dpll &&
18218                 crtc_state->port_clock == 0;
18219 }
18220
18221 static void intel_sanitize_encoder(struct intel_encoder *encoder)
18222 {
18223         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
18224         struct intel_connector *connector;
18225         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
18226         struct intel_crtc_state *crtc_state = crtc ?
18227                 to_intel_crtc_state(crtc->base.state) : NULL;
18228
18229         /* We need to check both for a crtc link (meaning that the
18230          * encoder is active and trying to read from a pipe) and the
18231          * pipe itself being active. */
18232         bool has_active_crtc = crtc_state &&
18233                 crtc_state->hw.active;
18234
18235         if (crtc_state && has_bogus_dpll_config(crtc_state)) {
18236                 drm_dbg_kms(&dev_priv->drm,
18237                             "BIOS has misprogrammed the hardware. Disabling pipe %c\n",
18238                             pipe_name(crtc->pipe));
18239                 has_active_crtc = false;
18240         }
18241
18242         connector = intel_encoder_find_connector(encoder);
18243         if (connector && !has_active_crtc) {
18244                 drm_dbg_kms(&dev_priv->drm,
18245                             "[ENCODER:%d:%s] has active connectors but no active pipe!\n",
18246                             encoder->base.base.id,
18247                             encoder->base.name);
18248
18249                 /* Connector is active, but has no active pipe. This is
18250                  * fallout from our resume register restoring. Disable
18251                  * the encoder manually again. */
18252                 if (crtc_state) {
18253                         struct drm_encoder *best_encoder;
18254
18255                         drm_dbg_kms(&dev_priv->drm,
18256                                     "[ENCODER:%d:%s] manually disabled\n",
18257                                     encoder->base.base.id,
18258                                     encoder->base.name);
18259
18260                         /* avoid oopsing in case the hooks consult best_encoder */
18261                         best_encoder = connector->base.state->best_encoder;
18262                         connector->base.state->best_encoder = &encoder->base;
18263
18264                         /* FIXME NULL atomic state passed! */
18265                         if (encoder->disable)
18266                                 encoder->disable(NULL, encoder, crtc_state,
18267                                                  connector->base.state);
18268                         if (encoder->post_disable)
18269                                 encoder->post_disable(NULL, encoder, crtc_state,
18270                                                       connector->base.state);
18271
18272                         connector->base.state->best_encoder = best_encoder;
18273                 }
18274                 encoder->base.crtc = NULL;
18275
18276                 /* Inconsistent output/port/pipe state happens presumably due to
18277                  * a bug in one of the get_hw_state functions. Or someplace else
18278                  * in our code, like the register restore mess on resume. Clamp
18279                  * things to off as a safer default. */
18280
18281                 connector->base.dpms = DRM_MODE_DPMS_OFF;
18282                 connector->base.encoder = NULL;
18283         }
18284
18285         /* notify opregion of the sanitized encoder state */
18286         intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
18287
18288         if (INTEL_GEN(dev_priv) >= 11)
18289                 icl_sanitize_encoder_pll_mapping(encoder);
18290 }
18291
18292 /* FIXME read out full plane state for all planes */
18293 static void readout_plane_state(struct drm_i915_private *dev_priv)
18294 {
18295         struct intel_plane *plane;
18296         struct intel_crtc *crtc;
18297
18298         for_each_intel_plane(&dev_priv->drm, plane) {
18299                 struct intel_plane_state *plane_state =
18300                         to_intel_plane_state(plane->base.state);
18301                 struct intel_crtc_state *crtc_state;
18302                 enum pipe pipe = PIPE_A;
18303                 bool visible;
18304
18305                 visible = plane->get_hw_state(plane, &pipe);
18306
18307                 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
18308                 crtc_state = to_intel_crtc_state(crtc->base.state);
18309
18310                 intel_set_plane_visible(crtc_state, plane_state, visible);
18311
18312                 drm_dbg_kms(&dev_priv->drm,
18313                             "[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
18314                             plane->base.base.id, plane->base.name,
18315                             enableddisabled(visible), pipe_name(pipe));
18316         }
18317
18318         for_each_intel_crtc(&dev_priv->drm, crtc) {
18319                 struct intel_crtc_state *crtc_state =
18320                         to_intel_crtc_state(crtc->base.state);
18321
18322                 fixup_active_planes(crtc_state);
18323         }
18324 }
18325
18326 static void intel_modeset_readout_hw_state(struct drm_device *dev)
18327 {
18328         struct drm_i915_private *dev_priv = to_i915(dev);
18329         struct intel_cdclk_state *cdclk_state =
18330                 to_intel_cdclk_state(dev_priv->cdclk.obj.state);
18331         struct intel_dbuf_state *dbuf_state =
18332                 to_intel_dbuf_state(dev_priv->dbuf.obj.state);
18333         enum pipe pipe;
18334         struct intel_crtc *crtc;
18335         struct intel_encoder *encoder;
18336         struct intel_connector *connector;
18337         struct drm_connector_list_iter conn_iter;
18338         u8 active_pipes = 0;
18339
18340         for_each_intel_crtc(dev, crtc) {
18341                 struct intel_crtc_state *crtc_state =
18342                         to_intel_crtc_state(crtc->base.state);
18343
18344                 __drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi);
18345                 intel_crtc_free_hw_state(crtc_state);
18346                 intel_crtc_state_reset(crtc_state, crtc);
18347
18348                 crtc_state->hw.active = crtc_state->hw.enable =
18349                         dev_priv->display.get_pipe_config(crtc, crtc_state);
18350
18351                 crtc->base.enabled = crtc_state->hw.enable;
18352                 crtc->active = crtc_state->hw.active;
18353
18354                 if (crtc_state->hw.active)
18355                         active_pipes |= BIT(crtc->pipe);
18356
18357                 drm_dbg_kms(&dev_priv->drm,
18358                             "[CRTC:%d:%s] hw state readout: %s\n",
18359                             crtc->base.base.id, crtc->base.name,
18360                             enableddisabled(crtc_state->hw.active));
18361         }
18362
18363         dev_priv->active_pipes = cdclk_state->active_pipes =
18364                 dbuf_state->active_pipes = active_pipes;
18365
18366         readout_plane_state(dev_priv);
18367
18368         intel_dpll_readout_hw_state(dev_priv);
18369
18370         for_each_intel_encoder(dev, encoder) {
18371                 pipe = 0;
18372
18373                 if (encoder->get_hw_state(encoder, &pipe)) {
18374                         struct intel_crtc_state *crtc_state;
18375
18376                         crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
18377                         crtc_state = to_intel_crtc_state(crtc->base.state);
18378
18379                         encoder->base.crtc = &crtc->base;
18380                         encoder->get_config(encoder, crtc_state);
18381                 } else {
18382                         encoder->base.crtc = NULL;
18383                 }
18384
18385                 drm_dbg_kms(&dev_priv->drm,
18386                             "[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
18387                             encoder->base.base.id, encoder->base.name,
18388                             enableddisabled(encoder->base.crtc),
18389                             pipe_name(pipe));
18390         }
18391
18392         drm_connector_list_iter_begin(dev, &conn_iter);
18393         for_each_intel_connector_iter(connector, &conn_iter) {
18394                 if (connector->get_hw_state(connector)) {
18395                         struct intel_crtc_state *crtc_state;
18396                         struct intel_crtc *crtc;
18397
18398                         connector->base.dpms = DRM_MODE_DPMS_ON;
18399
18400                         encoder = intel_attached_encoder(connector);
18401                         connector->base.encoder = &encoder->base;
18402
18403                         crtc = to_intel_crtc(encoder->base.crtc);
18404                         crtc_state = crtc ? to_intel_crtc_state(crtc->base.state) : NULL;
18405
18406                         if (crtc_state && crtc_state->hw.active) {
18407                                 /*
18408                                  * This has to be done during hardware readout
18409                                  * because anything calling .crtc_disable may
18410                                  * rely on the connector_mask being accurate.
18411                                  */
18412                                 crtc_state->uapi.connector_mask |=
18413                                         drm_connector_mask(&connector->base);
18414                                 crtc_state->uapi.encoder_mask |=
18415                                         drm_encoder_mask(&encoder->base);
18416                         }
18417                 } else {
18418                         connector->base.dpms = DRM_MODE_DPMS_OFF;
18419                         connector->base.encoder = NULL;
18420                 }
18421                 drm_dbg_kms(&dev_priv->drm,
18422                             "[CONNECTOR:%d:%s] hw state readout: %s\n",
18423                             connector->base.base.id, connector->base.name,
18424                             enableddisabled(connector->base.encoder));
18425         }
18426         drm_connector_list_iter_end(&conn_iter);
18427
18428         for_each_intel_crtc(dev, crtc) {
18429                 struct intel_bw_state *bw_state =
18430                         to_intel_bw_state(dev_priv->bw_obj.state);
18431                 struct intel_crtc_state *crtc_state =
18432                         to_intel_crtc_state(crtc->base.state);
18433                 struct intel_plane *plane;
18434                 int min_cdclk = 0;
18435
18436                 if (crtc_state->hw.active) {
18437                         struct drm_display_mode *mode = &crtc_state->hw.mode;
18438
18439                         intel_mode_from_pipe_config(&crtc_state->hw.adjusted_mode,
18440                                                     crtc_state);
18441
18442                         *mode = crtc_state->hw.adjusted_mode;
18443                         mode->hdisplay = crtc_state->pipe_src_w;
18444                         mode->vdisplay = crtc_state->pipe_src_h;
18445
18446                         /*
18447                          * The initial mode needs to be set in order to keep
18448                          * the atomic core happy. It wants a valid mode if the
18449                          * crtc's enabled, so we do the above call.
18450                          *
18451                          * But we don't set all the derived state fully, hence
18452                          * set a flag to indicate that a full recalculation is
18453                          * needed on the next commit.
18454                          */
18455                         crtc_state->inherited = true;
18456
18457                         intel_crtc_compute_pixel_rate(crtc_state);
18458
18459                         intel_crtc_update_active_timings(crtc_state);
18460
18461                         intel_crtc_copy_hw_to_uapi_state(crtc_state);
18462                 }
18463
18464                 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
18465                         const struct intel_plane_state *plane_state =
18466                                 to_intel_plane_state(plane->base.state);
18467
18468                         /*
18469                          * FIXME don't have the fb yet, so can't
18470                          * use intel_plane_data_rate() :(
18471                          */
18472                         if (plane_state->uapi.visible)
18473                                 crtc_state->data_rate[plane->id] =
18474                                         4 * crtc_state->pixel_rate;
18475                         /*
18476                          * FIXME don't have the fb yet, so can't
18477                          * use plane->min_cdclk() :(
18478                          */
18479                         if (plane_state->uapi.visible && plane->min_cdclk) {
18480                                 if (crtc_state->double_wide ||
18481                                     INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
18482                                         crtc_state->min_cdclk[plane->id] =
18483                                                 DIV_ROUND_UP(crtc_state->pixel_rate, 2);
18484                                 else
18485                                         crtc_state->min_cdclk[plane->id] =
18486                                                 crtc_state->pixel_rate;
18487                         }
18488                         drm_dbg_kms(&dev_priv->drm,
18489                                     "[PLANE:%d:%s] min_cdclk %d kHz\n",
18490                                     plane->base.base.id, plane->base.name,
18491                                     crtc_state->min_cdclk[plane->id]);
18492                 }
18493
18494                 if (crtc_state->hw.active) {
18495                         min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
18496                         if (drm_WARN_ON(dev, min_cdclk < 0))
18497                                 min_cdclk = 0;
18498                 }
18499
18500                 cdclk_state->min_cdclk[crtc->pipe] = min_cdclk;
18501                 cdclk_state->min_voltage_level[crtc->pipe] =
18502                         crtc_state->min_voltage_level;
18503
18504                 intel_bw_crtc_update(bw_state, crtc_state);
18505
18506                 intel_pipe_config_sanity_check(dev_priv, crtc_state);
18507         }
18508 }
18509
18510 static void
18511 get_encoder_power_domains(struct drm_i915_private *dev_priv)
18512 {
18513         struct intel_encoder *encoder;
18514
18515         for_each_intel_encoder(&dev_priv->drm, encoder) {
18516                 struct intel_crtc_state *crtc_state;
18517
18518                 if (!encoder->get_power_domains)
18519                         continue;
18520
18521                 /*
18522                  * MST-primary and inactive encoders don't have a crtc state
18523                  * and neither of these require any power domain references.
18524                  */
18525                 if (!encoder->base.crtc)
18526                         continue;
18527
18528                 crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
18529                 encoder->get_power_domains(encoder, crtc_state);
18530         }
18531 }
18532
18533 static void intel_early_display_was(struct drm_i915_private *dev_priv)
18534 {
18535         /*
18536          * Display WA #1185 WaDisableDARBFClkGating:cnl,glk,icl,ehl,tgl
18537          * Also known as Wa_14010480278.
18538          */
18539         if (IS_GEN_RANGE(dev_priv, 10, 12) || IS_GEMINILAKE(dev_priv))
18540                 intel_de_write(dev_priv, GEN9_CLKGATE_DIS_0,
18541                                intel_de_read(dev_priv, GEN9_CLKGATE_DIS_0) | DARBF_GATING_DIS);
18542
18543         if (IS_HASWELL(dev_priv)) {
18544                 /*
18545                  * WaRsPkgCStateDisplayPMReq:hsw
18546                  * System hang if this isn't done before disabling all planes!
18547                  */
18548                 intel_de_write(dev_priv, CHICKEN_PAR1_1,
18549                                intel_de_read(dev_priv, CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
18550         }
18551 }
18552
18553 static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
18554                                        enum port port, i915_reg_t hdmi_reg)
18555 {
18556         u32 val = intel_de_read(dev_priv, hdmi_reg);
18557
18558         if (val & SDVO_ENABLE ||
18559             (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
18560                 return;
18561
18562         drm_dbg_kms(&dev_priv->drm,
18563                     "Sanitizing transcoder select for HDMI %c\n",
18564                     port_name(port));
18565
18566         val &= ~SDVO_PIPE_SEL_MASK;
18567         val |= SDVO_PIPE_SEL(PIPE_A);
18568
18569         intel_de_write(dev_priv, hdmi_reg, val);
18570 }
18571
18572 static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
18573                                      enum port port, i915_reg_t dp_reg)
18574 {
18575         u32 val = intel_de_read(dev_priv, dp_reg);
18576
18577         if (val & DP_PORT_EN ||
18578             (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
18579                 return;
18580
18581         drm_dbg_kms(&dev_priv->drm,
18582                     "Sanitizing transcoder select for DP %c\n",
18583                     port_name(port));
18584
18585         val &= ~DP_PIPE_SEL_MASK;
18586         val |= DP_PIPE_SEL(PIPE_A);
18587
18588         intel_de_write(dev_priv, dp_reg, val);
18589 }
18590
18591 static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
18592 {
18593         /*
18594          * The BIOS may select transcoder B on some of the PCH
18595          * ports even it doesn't enable the port. This would trip
18596          * assert_pch_dp_disabled() and assert_pch_hdmi_disabled().
18597          * Sanitize the transcoder select bits to prevent that. We
18598          * assume that the BIOS never actually enabled the port,
18599          * because if it did we'd actually have to toggle the port
18600          * on and back off to make the transcoder A select stick
18601          * (see. intel_dp_link_down(), intel_disable_hdmi(),
18602          * intel_disable_sdvo()).
18603          */
18604         ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B);
18605         ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C);
18606         ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D);
18607
18608         /* PCH SDVOB multiplex with HDMIB */
18609         ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB);
18610         ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC);
18611         ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID);
18612 }
18613
18614 /* Scan out the current hw modeset state,
18615  * and sanitizes it to the current state
18616  */
18617 static void
18618 intel_modeset_setup_hw_state(struct drm_device *dev,
18619                              struct drm_modeset_acquire_ctx *ctx)
18620 {
18621         struct drm_i915_private *dev_priv = to_i915(dev);
18622         struct intel_encoder *encoder;
18623         struct intel_crtc *crtc;
18624         intel_wakeref_t wakeref;
18625
18626         wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
18627
18628         intel_early_display_was(dev_priv);
18629         intel_modeset_readout_hw_state(dev);
18630
18631         /* HW state is read out, now we need to sanitize this mess. */
18632
18633         /* Sanitize the TypeC port mode upfront, encoders depend on this */
18634         for_each_intel_encoder(dev, encoder) {
18635                 enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
18636
18637                 /* We need to sanitize only the MST primary port. */
18638                 if (encoder->type != INTEL_OUTPUT_DP_MST &&
18639                     intel_phy_is_tc(dev_priv, phy))
18640                         intel_tc_port_sanitize(enc_to_dig_port(encoder));
18641         }
18642
18643         get_encoder_power_domains(dev_priv);
18644
18645         if (HAS_PCH_IBX(dev_priv))
18646                 ibx_sanitize_pch_ports(dev_priv);
18647
18648         /*
18649          * intel_sanitize_plane_mapping() may need to do vblank
18650          * waits, so we need vblank interrupts restored beforehand.
18651          */
18652         for_each_intel_crtc(&dev_priv->drm, crtc) {
18653                 struct intel_crtc_state *crtc_state =
18654                         to_intel_crtc_state(crtc->base.state);
18655
18656                 drm_crtc_vblank_reset(&crtc->base);
18657
18658                 if (crtc_state->hw.active)
18659                         intel_crtc_vblank_on(crtc_state);
18660         }
18661
18662         intel_sanitize_plane_mapping(dev_priv);
18663
18664         for_each_intel_encoder(dev, encoder)
18665                 intel_sanitize_encoder(encoder);
18666
18667         for_each_intel_crtc(&dev_priv->drm, crtc) {
18668                 struct intel_crtc_state *crtc_state =
18669                         to_intel_crtc_state(crtc->base.state);
18670
18671                 intel_sanitize_crtc(crtc, ctx);
18672                 intel_dump_pipe_config(crtc_state, NULL, "[setup_hw_state]");
18673         }
18674
18675         intel_modeset_update_connector_atomic_state(dev);
18676
18677         intel_dpll_sanitize_state(dev_priv);
18678
18679         if (IS_G4X(dev_priv)) {
18680                 g4x_wm_get_hw_state(dev_priv);
18681                 g4x_wm_sanitize(dev_priv);
18682         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
18683                 vlv_wm_get_hw_state(dev_priv);
18684                 vlv_wm_sanitize(dev_priv);
18685         } else if (INTEL_GEN(dev_priv) >= 9) {
18686                 skl_wm_get_hw_state(dev_priv);
18687         } else if (HAS_PCH_SPLIT(dev_priv)) {
18688                 ilk_wm_get_hw_state(dev_priv);
18689         }
18690
18691         for_each_intel_crtc(dev, crtc) {
18692                 struct intel_crtc_state *crtc_state =
18693                         to_intel_crtc_state(crtc->base.state);
18694                 u64 put_domains;
18695
18696                 put_domains = modeset_get_crtc_power_domains(crtc_state);
18697                 if (drm_WARN_ON(dev, put_domains))
18698                         modeset_put_power_domains(dev_priv, put_domains);
18699         }
18700
18701         intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
18702 }
18703
18704 void intel_display_resume(struct drm_device *dev)
18705 {
18706         struct drm_i915_private *dev_priv = to_i915(dev);
18707         struct drm_atomic_state *state = dev_priv->modeset_restore_state;
18708         struct drm_modeset_acquire_ctx ctx;
18709         int ret;
18710
18711         dev_priv->modeset_restore_state = NULL;
18712         if (state)
18713                 state->acquire_ctx = &ctx;
18714
18715         drm_modeset_acquire_init(&ctx, 0);
18716
18717         while (1) {
18718                 ret = drm_modeset_lock_all_ctx(dev, &ctx);
18719                 if (ret != -EDEADLK)
18720                         break;
18721
18722                 drm_modeset_backoff(&ctx);
18723         }
18724
18725         if (!ret)
18726                 ret = __intel_display_resume(dev, state, &ctx);
18727
18728         intel_enable_ipc(dev_priv);
18729         drm_modeset_drop_locks(&ctx);
18730         drm_modeset_acquire_fini(&ctx);
18731
18732         if (ret)
18733                 drm_err(&dev_priv->drm,
18734                         "Restoring old state failed with %i\n", ret);
18735         if (state)
18736                 drm_atomic_state_put(state);
18737 }
18738
18739 static void intel_hpd_poll_fini(struct drm_i915_private *i915)
18740 {
18741         struct intel_connector *connector;
18742         struct drm_connector_list_iter conn_iter;
18743
18744         /* Kill all the work that may have been queued by hpd. */
18745         drm_connector_list_iter_begin(&i915->drm, &conn_iter);
18746         for_each_intel_connector_iter(connector, &conn_iter) {
18747                 if (connector->modeset_retry_work.func)
18748                         cancel_work_sync(&connector->modeset_retry_work);
18749                 if (connector->hdcp.shim) {
18750                         cancel_delayed_work_sync(&connector->hdcp.check_work);
18751                         cancel_work_sync(&connector->hdcp.prop_work);
18752                 }
18753         }
18754         drm_connector_list_iter_end(&conn_iter);
18755 }
18756
18757 /* part #1: call before irq uninstall */
18758 void intel_modeset_driver_remove(struct drm_i915_private *i915)
18759 {
18760         flush_workqueue(i915->flip_wq);
18761         flush_workqueue(i915->modeset_wq);
18762
18763         flush_work(&i915->atomic_helper.free_work);
18764         drm_WARN_ON(&i915->drm, !llist_empty(&i915->atomic_helper.free_list));
18765 }
18766
18767 /* part #2: call after irq uninstall */
18768 void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915)
18769 {
18770         /*
18771          * Due to the hpd irq storm handling the hotplug work can re-arm the
18772          * poll handlers. Hence disable polling after hpd handling is shut down.
18773          */
18774         intel_hpd_poll_fini(i915);
18775
18776         /*
18777          * MST topology needs to be suspended so we don't have any calls to
18778          * fbdev after it's finalized. MST will be destroyed later as part of
18779          * drm_mode_config_cleanup()
18780          */
18781         intel_dp_mst_suspend(i915);
18782
18783         /* poll work can call into fbdev, hence clean that up afterwards */
18784         intel_fbdev_fini(i915);
18785
18786         intel_unregister_dsm_handler();
18787
18788         intel_fbc_global_disable(i915);
18789
18790         /* flush any delayed tasks or pending work */
18791         flush_scheduled_work();
18792
18793         intel_hdcp_component_fini(i915);
18794
18795         intel_mode_config_cleanup(i915);
18796
18797         intel_overlay_cleanup(i915);
18798
18799         intel_gmbus_teardown(i915);
18800
18801         destroy_workqueue(i915->flip_wq);
18802         destroy_workqueue(i915->modeset_wq);
18803
18804         intel_fbc_cleanup_cfb(i915);
18805 }
18806
18807 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
18808
18809 struct intel_display_error_state {
18810
18811         u32 power_well_driver;
18812
18813         struct intel_cursor_error_state {
18814                 u32 control;
18815                 u32 position;
18816                 u32 base;
18817                 u32 size;
18818         } cursor[I915_MAX_PIPES];
18819
18820         struct intel_pipe_error_state {
18821                 bool power_domain_on;
18822                 u32 source;
18823                 u32 stat;
18824         } pipe[I915_MAX_PIPES];
18825
18826         struct intel_plane_error_state {
18827                 u32 control;
18828                 u32 stride;
18829                 u32 size;
18830                 u32 pos;
18831                 u32 addr;
18832                 u32 surface;
18833                 u32 tile_offset;
18834         } plane[I915_MAX_PIPES];
18835
18836         struct intel_transcoder_error_state {
18837                 bool available;
18838                 bool power_domain_on;
18839                 enum transcoder cpu_transcoder;
18840
18841                 u32 conf;
18842
18843                 u32 htotal;
18844                 u32 hblank;
18845                 u32 hsync;
18846                 u32 vtotal;
18847                 u32 vblank;
18848                 u32 vsync;
18849         } transcoder[5];
18850 };
18851
18852 struct intel_display_error_state *
18853 intel_display_capture_error_state(struct drm_i915_private *dev_priv)
18854 {
18855         struct intel_display_error_state *error;
18856         int transcoders[] = {
18857                 TRANSCODER_A,
18858                 TRANSCODER_B,
18859                 TRANSCODER_C,
18860                 TRANSCODER_D,
18861                 TRANSCODER_EDP,
18862         };
18863         int i;
18864
18865         BUILD_BUG_ON(ARRAY_SIZE(transcoders) != ARRAY_SIZE(error->transcoder));
18866
18867         if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv))
18868                 return NULL;
18869
18870         error = kzalloc(sizeof(*error), GFP_ATOMIC);
18871         if (error == NULL)
18872                 return NULL;
18873
18874         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
18875                 error->power_well_driver = intel_de_read(dev_priv,
18876                                                          HSW_PWR_WELL_CTL2);
18877
18878         for_each_pipe(dev_priv, i) {
18879                 error->pipe[i].power_domain_on =
18880                         __intel_display_power_is_enabled(dev_priv,
18881                                                          POWER_DOMAIN_PIPE(i));
18882                 if (!error->pipe[i].power_domain_on)
18883                         continue;
18884
18885                 error->cursor[i].control = intel_de_read(dev_priv, CURCNTR(i));
18886                 error->cursor[i].position = intel_de_read(dev_priv, CURPOS(i));
18887                 error->cursor[i].base = intel_de_read(dev_priv, CURBASE(i));
18888
18889                 error->plane[i].control = intel_de_read(dev_priv, DSPCNTR(i));
18890                 error->plane[i].stride = intel_de_read(dev_priv, DSPSTRIDE(i));
18891                 if (INTEL_GEN(dev_priv) <= 3) {
18892                         error->plane[i].size = intel_de_read(dev_priv,
18893                                                              DSPSIZE(i));
18894                         error->plane[i].pos = intel_de_read(dev_priv,
18895                                                             DSPPOS(i));
18896                 }
18897                 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
18898                         error->plane[i].addr = intel_de_read(dev_priv,
18899                                                              DSPADDR(i));
18900                 if (INTEL_GEN(dev_priv) >= 4) {
18901                         error->plane[i].surface = intel_de_read(dev_priv,
18902                                                                 DSPSURF(i));
18903                         error->plane[i].tile_offset = intel_de_read(dev_priv,
18904                                                                     DSPTILEOFF(i));
18905                 }
18906
18907                 error->pipe[i].source = intel_de_read(dev_priv, PIPESRC(i));
18908
18909                 if (HAS_GMCH(dev_priv))
18910                         error->pipe[i].stat = intel_de_read(dev_priv,
18911                                                             PIPESTAT(i));
18912         }
18913
18914         for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
18915                 enum transcoder cpu_transcoder = transcoders[i];
18916
18917                 if (!HAS_TRANSCODER(dev_priv, cpu_transcoder))
18918                         continue;
18919
18920                 error->transcoder[i].available = true;
18921                 error->transcoder[i].power_domain_on =
18922                         __intel_display_power_is_enabled(dev_priv,
18923                                 POWER_DOMAIN_TRANSCODER(cpu_transcoder));
18924                 if (!error->transcoder[i].power_domain_on)
18925                         continue;
18926
18927                 error->transcoder[i].cpu_transcoder = cpu_transcoder;
18928
18929                 error->transcoder[i].conf = intel_de_read(dev_priv,
18930                                                           PIPECONF(cpu_transcoder));
18931                 error->transcoder[i].htotal = intel_de_read(dev_priv,
18932                                                             HTOTAL(cpu_transcoder));
18933                 error->transcoder[i].hblank = intel_de_read(dev_priv,
18934                                                             HBLANK(cpu_transcoder));
18935                 error->transcoder[i].hsync = intel_de_read(dev_priv,
18936                                                            HSYNC(cpu_transcoder));
18937                 error->transcoder[i].vtotal = intel_de_read(dev_priv,
18938                                                             VTOTAL(cpu_transcoder));
18939                 error->transcoder[i].vblank = intel_de_read(dev_priv,
18940                                                             VBLANK(cpu_transcoder));
18941                 error->transcoder[i].vsync = intel_de_read(dev_priv,
18942                                                            VSYNC(cpu_transcoder));
18943         }
18944
18945         return error;
18946 }
18947
18948 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
18949
18950 void
18951 intel_display_print_error_state(struct drm_i915_error_state_buf *m,
18952                                 struct intel_display_error_state *error)
18953 {
18954         struct drm_i915_private *dev_priv = m->i915;
18955         int i;
18956
18957         if (!error)
18958                 return;
18959
18960         err_printf(m, "Num Pipes: %d\n", INTEL_NUM_PIPES(dev_priv));
18961         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
18962                 err_printf(m, "PWR_WELL_CTL2: %08x\n",
18963                            error->power_well_driver);
18964         for_each_pipe(dev_priv, i) {
18965                 err_printf(m, "Pipe [%d]:\n", i);
18966                 err_printf(m, "  Power: %s\n",
18967                            onoff(error->pipe[i].power_domain_on));
18968                 err_printf(m, "  SRC: %08x\n", error->pipe[i].source);
18969                 err_printf(m, "  STAT: %08x\n", error->pipe[i].stat);
18970
18971                 err_printf(m, "Plane [%d]:\n", i);
18972                 err_printf(m, "  CNTR: %08x\n", error->plane[i].control);
18973                 err_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
18974                 if (INTEL_GEN(dev_priv) <= 3) {
18975                         err_printf(m, "  SIZE: %08x\n", error->plane[i].size);
18976                         err_printf(m, "  POS: %08x\n", error->plane[i].pos);
18977                 }
18978                 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
18979                         err_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
18980                 if (INTEL_GEN(dev_priv) >= 4) {
18981                         err_printf(m, "  SURF: %08x\n", error->plane[i].surface);
18982                         err_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
18983                 }
18984
18985                 err_printf(m, "Cursor [%d]:\n", i);
18986                 err_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
18987                 err_printf(m, "  POS: %08x\n", error->cursor[i].position);
18988                 err_printf(m, "  BASE: %08x\n", error->cursor[i].base);
18989         }
18990
18991         for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
18992                 if (!error->transcoder[i].available)
18993                         continue;
18994
18995                 err_printf(m, "CPU transcoder: %s\n",
18996                            transcoder_name(error->transcoder[i].cpu_transcoder));
18997                 err_printf(m, "  Power: %s\n",
18998                            onoff(error->transcoder[i].power_domain_on));
18999                 err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
19000                 err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
19001                 err_printf(m, "  HBLANK: %08x\n", error->transcoder[i].hblank);
19002                 err_printf(m, "  HSYNC: %08x\n", error->transcoder[i].hsync);
19003                 err_printf(m, "  VTOTAL: %08x\n", error->transcoder[i].vtotal);
19004                 err_printf(m, "  VBLANK: %08x\n", error->transcoder[i].vblank);
19005                 err_printf(m, "  VSYNC: %08x\n", error->transcoder[i].vsync);
19006         }
19007 }
19008
19009 #endif