drm/i915/icl: Add support to read out the TBT PLL HW state
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / display / intel_display.c
1 /*
2  * Copyright © 2006-2007 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *      Eric Anholt <eric@anholt.net>
25  */
26
27 #include <linux/i2c.h>
28 #include <linux/input.h>
29 #include <linux/intel-iommu.h>
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/reservation.h>
33 #include <linux/slab.h>
34 #include <linux/vgaarb.h>
35
36 #include <drm/drm_atomic.h>
37 #include <drm/drm_atomic_helper.h>
38 #include <drm/drm_atomic_uapi.h>
39 #include <drm/drm_dp_helper.h>
40 #include <drm/drm_edid.h>
41 #include <drm/drm_fourcc.h>
42 #include <drm/drm_plane_helper.h>
43 #include <drm/drm_probe_helper.h>
44 #include <drm/drm_rect.h>
45 #include <drm/i915_drm.h>
46
47 #include "display/intel_crt.h"
48 #include "display/intel_ddi.h"
49 #include "display/intel_dp.h"
50 #include "display/intel_dsi.h"
51 #include "display/intel_dvo.h"
52 #include "display/intel_gmbus.h"
53 #include "display/intel_hdmi.h"
54 #include "display/intel_lvds.h"
55 #include "display/intel_sdvo.h"
56 #include "display/intel_tv.h"
57 #include "display/intel_vdsc.h"
58
59 #include "i915_drv.h"
60 #include "i915_trace.h"
61 #include "intel_acpi.h"
62 #include "intel_atomic.h"
63 #include "intel_atomic_plane.h"
64 #include "intel_bw.h"
65 #include "intel_color.h"
66 #include "intel_cdclk.h"
67 #include "intel_drv.h"
68 #include "intel_fbc.h"
69 #include "intel_fbdev.h"
70 #include "intel_fifo_underrun.h"
71 #include "intel_frontbuffer.h"
72 #include "intel_hdcp.h"
73 #include "intel_hotplug.h"
74 #include "intel_overlay.h"
75 #include "intel_pipe_crc.h"
76 #include "intel_pm.h"
77 #include "intel_psr.h"
78 #include "intel_quirks.h"
79 #include "intel_sideband.h"
80 #include "intel_sprite.h"
81
82 /* Primary plane formats for gen <= 3 */
83 static const u32 i8xx_primary_formats[] = {
84         DRM_FORMAT_C8,
85         DRM_FORMAT_RGB565,
86         DRM_FORMAT_XRGB1555,
87         DRM_FORMAT_XRGB8888,
88 };
89
90 /* Primary plane formats for gen >= 4 */
91 static const u32 i965_primary_formats[] = {
92         DRM_FORMAT_C8,
93         DRM_FORMAT_RGB565,
94         DRM_FORMAT_XRGB8888,
95         DRM_FORMAT_XBGR8888,
96         DRM_FORMAT_XRGB2101010,
97         DRM_FORMAT_XBGR2101010,
98 };
99
100 static const u64 i9xx_format_modifiers[] = {
101         I915_FORMAT_MOD_X_TILED,
102         DRM_FORMAT_MOD_LINEAR,
103         DRM_FORMAT_MOD_INVALID
104 };
105
106 /* Cursor formats */
107 static const u32 intel_cursor_formats[] = {
108         DRM_FORMAT_ARGB8888,
109 };
110
111 static const u64 cursor_format_modifiers[] = {
112         DRM_FORMAT_MOD_LINEAR,
113         DRM_FORMAT_MOD_INVALID
114 };
115
116 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
117                                 struct intel_crtc_state *pipe_config);
118 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
119                                    struct intel_crtc_state *pipe_config);
120
121 static int intel_framebuffer_init(struct intel_framebuffer *ifb,
122                                   struct drm_i915_gem_object *obj,
123                                   struct drm_mode_fb_cmd2 *mode_cmd);
124 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state);
125 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
126 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
127                                          const struct intel_link_m_n *m_n,
128                                          const struct intel_link_m_n *m2_n2);
129 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
130 static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state);
131 static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state);
132 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
133 static void vlv_prepare_pll(struct intel_crtc *crtc,
134                             const struct intel_crtc_state *pipe_config);
135 static void chv_prepare_pll(struct intel_crtc *crtc,
136                             const struct intel_crtc_state *pipe_config);
137 static void intel_begin_crtc_commit(struct intel_atomic_state *, struct intel_crtc *);
138 static void intel_finish_crtc_commit(struct intel_atomic_state *, struct intel_crtc *);
139 static void intel_crtc_init_scalers(struct intel_crtc *crtc,
140                                     struct intel_crtc_state *crtc_state);
141 static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state);
142 static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state);
143 static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state);
144 static void intel_modeset_setup_hw_state(struct drm_device *dev,
145                                          struct drm_modeset_acquire_ctx *ctx);
146 static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
147
148 struct intel_limit {
149         struct {
150                 int min, max;
151         } dot, vco, n, m, m1, m2, p, p1;
152
153         struct {
154                 int dot_limit;
155                 int p2_slow, p2_fast;
156         } p2;
157 };
158
159 /* returns HPLL frequency in kHz */
160 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
161 {
162         int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
163
164         /* Obtain SKU information */
165         hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
166                 CCK_FUSE_HPLL_FREQ_MASK;
167
168         return vco_freq[hpll_freq] * 1000;
169 }
170
171 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
172                       const char *name, u32 reg, int ref_freq)
173 {
174         u32 val;
175         int divider;
176
177         val = vlv_cck_read(dev_priv, reg);
178         divider = val & CCK_FREQUENCY_VALUES;
179
180         WARN((val & CCK_FREQUENCY_STATUS) !=
181              (divider << CCK_FREQUENCY_STATUS_SHIFT),
182              "%s change in progress\n", name);
183
184         return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
185 }
186
187 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
188                            const char *name, u32 reg)
189 {
190         int hpll;
191
192         vlv_cck_get(dev_priv);
193
194         if (dev_priv->hpll_freq == 0)
195                 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
196
197         hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq);
198
199         vlv_cck_put(dev_priv);
200
201         return hpll;
202 }
203
204 static void intel_update_czclk(struct drm_i915_private *dev_priv)
205 {
206         if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
207                 return;
208
209         dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
210                                                       CCK_CZ_CLOCK_CONTROL);
211
212         DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
213 }
214
215 static inline u32 /* units of 100MHz */
216 intel_fdi_link_freq(struct drm_i915_private *dev_priv,
217                     const struct intel_crtc_state *pipe_config)
218 {
219         if (HAS_DDI(dev_priv))
220                 return pipe_config->port_clock; /* SPLL */
221         else
222                 return dev_priv->fdi_pll_freq;
223 }
224
225 static const struct intel_limit intel_limits_i8xx_dac = {
226         .dot = { .min = 25000, .max = 350000 },
227         .vco = { .min = 908000, .max = 1512000 },
228         .n = { .min = 2, .max = 16 },
229         .m = { .min = 96, .max = 140 },
230         .m1 = { .min = 18, .max = 26 },
231         .m2 = { .min = 6, .max = 16 },
232         .p = { .min = 4, .max = 128 },
233         .p1 = { .min = 2, .max = 33 },
234         .p2 = { .dot_limit = 165000,
235                 .p2_slow = 4, .p2_fast = 2 },
236 };
237
238 static const struct intel_limit intel_limits_i8xx_dvo = {
239         .dot = { .min = 25000, .max = 350000 },
240         .vco = { .min = 908000, .max = 1512000 },
241         .n = { .min = 2, .max = 16 },
242         .m = { .min = 96, .max = 140 },
243         .m1 = { .min = 18, .max = 26 },
244         .m2 = { .min = 6, .max = 16 },
245         .p = { .min = 4, .max = 128 },
246         .p1 = { .min = 2, .max = 33 },
247         .p2 = { .dot_limit = 165000,
248                 .p2_slow = 4, .p2_fast = 4 },
249 };
250
251 static const struct intel_limit intel_limits_i8xx_lvds = {
252         .dot = { .min = 25000, .max = 350000 },
253         .vco = { .min = 908000, .max = 1512000 },
254         .n = { .min = 2, .max = 16 },
255         .m = { .min = 96, .max = 140 },
256         .m1 = { .min = 18, .max = 26 },
257         .m2 = { .min = 6, .max = 16 },
258         .p = { .min = 4, .max = 128 },
259         .p1 = { .min = 1, .max = 6 },
260         .p2 = { .dot_limit = 165000,
261                 .p2_slow = 14, .p2_fast = 7 },
262 };
263
264 static const struct intel_limit intel_limits_i9xx_sdvo = {
265         .dot = { .min = 20000, .max = 400000 },
266         .vco = { .min = 1400000, .max = 2800000 },
267         .n = { .min = 1, .max = 6 },
268         .m = { .min = 70, .max = 120 },
269         .m1 = { .min = 8, .max = 18 },
270         .m2 = { .min = 3, .max = 7 },
271         .p = { .min = 5, .max = 80 },
272         .p1 = { .min = 1, .max = 8 },
273         .p2 = { .dot_limit = 200000,
274                 .p2_slow = 10, .p2_fast = 5 },
275 };
276
277 static const struct intel_limit intel_limits_i9xx_lvds = {
278         .dot = { .min = 20000, .max = 400000 },
279         .vco = { .min = 1400000, .max = 2800000 },
280         .n = { .min = 1, .max = 6 },
281         .m = { .min = 70, .max = 120 },
282         .m1 = { .min = 8, .max = 18 },
283         .m2 = { .min = 3, .max = 7 },
284         .p = { .min = 7, .max = 98 },
285         .p1 = { .min = 1, .max = 8 },
286         .p2 = { .dot_limit = 112000,
287                 .p2_slow = 14, .p2_fast = 7 },
288 };
289
290
291 static const struct intel_limit intel_limits_g4x_sdvo = {
292         .dot = { .min = 25000, .max = 270000 },
293         .vco = { .min = 1750000, .max = 3500000},
294         .n = { .min = 1, .max = 4 },
295         .m = { .min = 104, .max = 138 },
296         .m1 = { .min = 17, .max = 23 },
297         .m2 = { .min = 5, .max = 11 },
298         .p = { .min = 10, .max = 30 },
299         .p1 = { .min = 1, .max = 3},
300         .p2 = { .dot_limit = 270000,
301                 .p2_slow = 10,
302                 .p2_fast = 10
303         },
304 };
305
306 static const struct intel_limit intel_limits_g4x_hdmi = {
307         .dot = { .min = 22000, .max = 400000 },
308         .vco = { .min = 1750000, .max = 3500000},
309         .n = { .min = 1, .max = 4 },
310         .m = { .min = 104, .max = 138 },
311         .m1 = { .min = 16, .max = 23 },
312         .m2 = { .min = 5, .max = 11 },
313         .p = { .min = 5, .max = 80 },
314         .p1 = { .min = 1, .max = 8},
315         .p2 = { .dot_limit = 165000,
316                 .p2_slow = 10, .p2_fast = 5 },
317 };
318
319 static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
320         .dot = { .min = 20000, .max = 115000 },
321         .vco = { .min = 1750000, .max = 3500000 },
322         .n = { .min = 1, .max = 3 },
323         .m = { .min = 104, .max = 138 },
324         .m1 = { .min = 17, .max = 23 },
325         .m2 = { .min = 5, .max = 11 },
326         .p = { .min = 28, .max = 112 },
327         .p1 = { .min = 2, .max = 8 },
328         .p2 = { .dot_limit = 0,
329                 .p2_slow = 14, .p2_fast = 14
330         },
331 };
332
333 static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
334         .dot = { .min = 80000, .max = 224000 },
335         .vco = { .min = 1750000, .max = 3500000 },
336         .n = { .min = 1, .max = 3 },
337         .m = { .min = 104, .max = 138 },
338         .m1 = { .min = 17, .max = 23 },
339         .m2 = { .min = 5, .max = 11 },
340         .p = { .min = 14, .max = 42 },
341         .p1 = { .min = 2, .max = 6 },
342         .p2 = { .dot_limit = 0,
343                 .p2_slow = 7, .p2_fast = 7
344         },
345 };
346
347 static const struct intel_limit intel_limits_pineview_sdvo = {
348         .dot = { .min = 20000, .max = 400000},
349         .vco = { .min = 1700000, .max = 3500000 },
350         /* Pineview's Ncounter is a ring counter */
351         .n = { .min = 3, .max = 6 },
352         .m = { .min = 2, .max = 256 },
353         /* Pineview only has one combined m divider, which we treat as m2. */
354         .m1 = { .min = 0, .max = 0 },
355         .m2 = { .min = 0, .max = 254 },
356         .p = { .min = 5, .max = 80 },
357         .p1 = { .min = 1, .max = 8 },
358         .p2 = { .dot_limit = 200000,
359                 .p2_slow = 10, .p2_fast = 5 },
360 };
361
362 static const struct intel_limit intel_limits_pineview_lvds = {
363         .dot = { .min = 20000, .max = 400000 },
364         .vco = { .min = 1700000, .max = 3500000 },
365         .n = { .min = 3, .max = 6 },
366         .m = { .min = 2, .max = 256 },
367         .m1 = { .min = 0, .max = 0 },
368         .m2 = { .min = 0, .max = 254 },
369         .p = { .min = 7, .max = 112 },
370         .p1 = { .min = 1, .max = 8 },
371         .p2 = { .dot_limit = 112000,
372                 .p2_slow = 14, .p2_fast = 14 },
373 };
374
375 /* Ironlake / Sandybridge
376  *
377  * We calculate clock using (register_value + 2) for N/M1/M2, so here
378  * the range value for them is (actual_value - 2).
379  */
380 static const struct intel_limit intel_limits_ironlake_dac = {
381         .dot = { .min = 25000, .max = 350000 },
382         .vco = { .min = 1760000, .max = 3510000 },
383         .n = { .min = 1, .max = 5 },
384         .m = { .min = 79, .max = 127 },
385         .m1 = { .min = 12, .max = 22 },
386         .m2 = { .min = 5, .max = 9 },
387         .p = { .min = 5, .max = 80 },
388         .p1 = { .min = 1, .max = 8 },
389         .p2 = { .dot_limit = 225000,
390                 .p2_slow = 10, .p2_fast = 5 },
391 };
392
393 static const struct intel_limit intel_limits_ironlake_single_lvds = {
394         .dot = { .min = 25000, .max = 350000 },
395         .vco = { .min = 1760000, .max = 3510000 },
396         .n = { .min = 1, .max = 3 },
397         .m = { .min = 79, .max = 118 },
398         .m1 = { .min = 12, .max = 22 },
399         .m2 = { .min = 5, .max = 9 },
400         .p = { .min = 28, .max = 112 },
401         .p1 = { .min = 2, .max = 8 },
402         .p2 = { .dot_limit = 225000,
403                 .p2_slow = 14, .p2_fast = 14 },
404 };
405
406 static const struct intel_limit intel_limits_ironlake_dual_lvds = {
407         .dot = { .min = 25000, .max = 350000 },
408         .vco = { .min = 1760000, .max = 3510000 },
409         .n = { .min = 1, .max = 3 },
410         .m = { .min = 79, .max = 127 },
411         .m1 = { .min = 12, .max = 22 },
412         .m2 = { .min = 5, .max = 9 },
413         .p = { .min = 14, .max = 56 },
414         .p1 = { .min = 2, .max = 8 },
415         .p2 = { .dot_limit = 225000,
416                 .p2_slow = 7, .p2_fast = 7 },
417 };
418
419 /* LVDS 100mhz refclk limits. */
420 static const struct intel_limit intel_limits_ironlake_single_lvds_100m = {
421         .dot = { .min = 25000, .max = 350000 },
422         .vco = { .min = 1760000, .max = 3510000 },
423         .n = { .min = 1, .max = 2 },
424         .m = { .min = 79, .max = 126 },
425         .m1 = { .min = 12, .max = 22 },
426         .m2 = { .min = 5, .max = 9 },
427         .p = { .min = 28, .max = 112 },
428         .p1 = { .min = 2, .max = 8 },
429         .p2 = { .dot_limit = 225000,
430                 .p2_slow = 14, .p2_fast = 14 },
431 };
432
433 static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = {
434         .dot = { .min = 25000, .max = 350000 },
435         .vco = { .min = 1760000, .max = 3510000 },
436         .n = { .min = 1, .max = 3 },
437         .m = { .min = 79, .max = 126 },
438         .m1 = { .min = 12, .max = 22 },
439         .m2 = { .min = 5, .max = 9 },
440         .p = { .min = 14, .max = 42 },
441         .p1 = { .min = 2, .max = 6 },
442         .p2 = { .dot_limit = 225000,
443                 .p2_slow = 7, .p2_fast = 7 },
444 };
445
446 static const struct intel_limit intel_limits_vlv = {
447          /*
448           * These are the data rate limits (measured in fast clocks)
449           * since those are the strictest limits we have. The fast
450           * clock and actual rate limits are more relaxed, so checking
451           * them would make no difference.
452           */
453         .dot = { .min = 25000 * 5, .max = 270000 * 5 },
454         .vco = { .min = 4000000, .max = 6000000 },
455         .n = { .min = 1, .max = 7 },
456         .m1 = { .min = 2, .max = 3 },
457         .m2 = { .min = 11, .max = 156 },
458         .p1 = { .min = 2, .max = 3 },
459         .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
460 };
461
462 static const struct intel_limit intel_limits_chv = {
463         /*
464          * These are the data rate limits (measured in fast clocks)
465          * since those are the strictest limits we have.  The fast
466          * clock and actual rate limits are more relaxed, so checking
467          * them would make no difference.
468          */
469         .dot = { .min = 25000 * 5, .max = 540000 * 5},
470         .vco = { .min = 4800000, .max = 6480000 },
471         .n = { .min = 1, .max = 1 },
472         .m1 = { .min = 2, .max = 2 },
473         .m2 = { .min = 24 << 22, .max = 175 << 22 },
474         .p1 = { .min = 2, .max = 4 },
475         .p2 = { .p2_slow = 1, .p2_fast = 14 },
476 };
477
478 static const struct intel_limit intel_limits_bxt = {
479         /* FIXME: find real dot limits */
480         .dot = { .min = 0, .max = INT_MAX },
481         .vco = { .min = 4800000, .max = 6700000 },
482         .n = { .min = 1, .max = 1 },
483         .m1 = { .min = 2, .max = 2 },
484         /* FIXME: find real m2 limits */
485         .m2 = { .min = 2 << 22, .max = 255 << 22 },
486         .p1 = { .min = 2, .max = 4 },
487         .p2 = { .p2_slow = 1, .p2_fast = 20 },
488 };
489
490 /* WA Display #0827: Gen9:all */
491 static void
492 skl_wa_827(struct drm_i915_private *dev_priv, int pipe, bool enable)
493 {
494         if (enable)
495                 I915_WRITE(CLKGATE_DIS_PSL(pipe),
496                            I915_READ(CLKGATE_DIS_PSL(pipe)) |
497                            DUPS1_GATING_DIS | DUPS2_GATING_DIS);
498         else
499                 I915_WRITE(CLKGATE_DIS_PSL(pipe),
500                            I915_READ(CLKGATE_DIS_PSL(pipe)) &
501                            ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
502 }
503
504 /* Wa_2006604312:icl */
505 static void
506 icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
507                        bool enable)
508 {
509         if (enable)
510                 I915_WRITE(CLKGATE_DIS_PSL(pipe),
511                            I915_READ(CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS);
512         else
513                 I915_WRITE(CLKGATE_DIS_PSL(pipe),
514                            I915_READ(CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
515 }
516
517 static bool
518 needs_modeset(const struct intel_crtc_state *state)
519 {
520         return drm_atomic_crtc_needs_modeset(&state->base);
521 }
522
523 /*
524  * Platform specific helpers to calculate the port PLL loopback- (clock.m),
525  * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
526  * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
527  * The helpers' return value is the rate of the clock that is fed to the
528  * display engine's pipe which can be the above fast dot clock rate or a
529  * divided-down version of it.
530  */
531 /* m1 is reserved as 0 in Pineview, n is a ring counter */
532 static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
533 {
534         clock->m = clock->m2 + 2;
535         clock->p = clock->p1 * clock->p2;
536         if (WARN_ON(clock->n == 0 || clock->p == 0))
537                 return 0;
538         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
539         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
540
541         return clock->dot;
542 }
543
544 static u32 i9xx_dpll_compute_m(struct dpll *dpll)
545 {
546         return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
547 }
548
549 static int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
550 {
551         clock->m = i9xx_dpll_compute_m(clock);
552         clock->p = clock->p1 * clock->p2;
553         if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
554                 return 0;
555         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
556         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
557
558         return clock->dot;
559 }
560
561 static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
562 {
563         clock->m = clock->m1 * clock->m2;
564         clock->p = clock->p1 * clock->p2;
565         if (WARN_ON(clock->n == 0 || clock->p == 0))
566                 return 0;
567         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
568         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
569
570         return clock->dot / 5;
571 }
572
573 int chv_calc_dpll_params(int refclk, struct dpll *clock)
574 {
575         clock->m = clock->m1 * clock->m2;
576         clock->p = clock->p1 * clock->p2;
577         if (WARN_ON(clock->n == 0 || clock->p == 0))
578                 return 0;
579         clock->vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock->m),
580                                            clock->n << 22);
581         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
582
583         return clock->dot / 5;
584 }
585
586 #define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
587
588 /*
589  * Returns whether the given set of divisors are valid for a given refclk with
590  * the given connectors.
591  */
592 static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv,
593                                const struct intel_limit *limit,
594                                const struct dpll *clock)
595 {
596         if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
597                 INTELPllInvalid("n out of range\n");
598         if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
599                 INTELPllInvalid("p1 out of range\n");
600         if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
601                 INTELPllInvalid("m2 out of range\n");
602         if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
603                 INTELPllInvalid("m1 out of range\n");
604
605         if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
606             !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv))
607                 if (clock->m1 <= clock->m2)
608                         INTELPllInvalid("m1 <= m2\n");
609
610         if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
611             !IS_GEN9_LP(dev_priv)) {
612                 if (clock->p < limit->p.min || limit->p.max < clock->p)
613                         INTELPllInvalid("p out of range\n");
614                 if (clock->m < limit->m.min || limit->m.max < clock->m)
615                         INTELPllInvalid("m out of range\n");
616         }
617
618         if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
619                 INTELPllInvalid("vco out of range\n");
620         /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
621          * connector, etc., rather than just a single range.
622          */
623         if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
624                 INTELPllInvalid("dot out of range\n");
625
626         return true;
627 }
628
629 static int
630 i9xx_select_p2_div(const struct intel_limit *limit,
631                    const struct intel_crtc_state *crtc_state,
632                    int target)
633 {
634         struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
635
636         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
637                 /*
638                  * For LVDS just rely on its current settings for dual-channel.
639                  * We haven't figured out how to reliably set up different
640                  * single/dual channel state, if we even can.
641                  */
642                 if (intel_is_dual_link_lvds(dev_priv))
643                         return limit->p2.p2_fast;
644                 else
645                         return limit->p2.p2_slow;
646         } else {
647                 if (target < limit->p2.dot_limit)
648                         return limit->p2.p2_slow;
649                 else
650                         return limit->p2.p2_fast;
651         }
652 }
653
654 /*
655  * Returns a set of divisors for the desired target clock with the given
656  * refclk, or FALSE.  The returned values represent the clock equation:
657  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
658  *
659  * Target and reference clocks are specified in kHz.
660  *
661  * If match_clock is provided, then best_clock P divider must match the P
662  * divider from @match_clock used for LVDS downclocking.
663  */
664 static bool
665 i9xx_find_best_dpll(const struct intel_limit *limit,
666                     struct intel_crtc_state *crtc_state,
667                     int target, int refclk, struct dpll *match_clock,
668                     struct dpll *best_clock)
669 {
670         struct drm_device *dev = crtc_state->base.crtc->dev;
671         struct dpll clock;
672         int err = target;
673
674         memset(best_clock, 0, sizeof(*best_clock));
675
676         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
677
678         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
679              clock.m1++) {
680                 for (clock.m2 = limit->m2.min;
681                      clock.m2 <= limit->m2.max; clock.m2++) {
682                         if (clock.m2 >= clock.m1)
683                                 break;
684                         for (clock.n = limit->n.min;
685                              clock.n <= limit->n.max; clock.n++) {
686                                 for (clock.p1 = limit->p1.min;
687                                         clock.p1 <= limit->p1.max; clock.p1++) {
688                                         int this_err;
689
690                                         i9xx_calc_dpll_params(refclk, &clock);
691                                         if (!intel_PLL_is_valid(to_i915(dev),
692                                                                 limit,
693                                                                 &clock))
694                                                 continue;
695                                         if (match_clock &&
696                                             clock.p != match_clock->p)
697                                                 continue;
698
699                                         this_err = abs(clock.dot - target);
700                                         if (this_err < err) {
701                                                 *best_clock = clock;
702                                                 err = this_err;
703                                         }
704                                 }
705                         }
706                 }
707         }
708
709         return (err != target);
710 }
711
712 /*
713  * Returns a set of divisors for the desired target clock with the given
714  * refclk, or FALSE.  The returned values represent the clock equation:
715  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
716  *
717  * Target and reference clocks are specified in kHz.
718  *
719  * If match_clock is provided, then best_clock P divider must match the P
720  * divider from @match_clock used for LVDS downclocking.
721  */
722 static bool
723 pnv_find_best_dpll(const struct intel_limit *limit,
724                    struct intel_crtc_state *crtc_state,
725                    int target, int refclk, struct dpll *match_clock,
726                    struct dpll *best_clock)
727 {
728         struct drm_device *dev = crtc_state->base.crtc->dev;
729         struct dpll clock;
730         int err = target;
731
732         memset(best_clock, 0, sizeof(*best_clock));
733
734         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
735
736         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
737              clock.m1++) {
738                 for (clock.m2 = limit->m2.min;
739                      clock.m2 <= limit->m2.max; clock.m2++) {
740                         for (clock.n = limit->n.min;
741                              clock.n <= limit->n.max; clock.n++) {
742                                 for (clock.p1 = limit->p1.min;
743                                         clock.p1 <= limit->p1.max; clock.p1++) {
744                                         int this_err;
745
746                                         pnv_calc_dpll_params(refclk, &clock);
747                                         if (!intel_PLL_is_valid(to_i915(dev),
748                                                                 limit,
749                                                                 &clock))
750                                                 continue;
751                                         if (match_clock &&
752                                             clock.p != match_clock->p)
753                                                 continue;
754
755                                         this_err = abs(clock.dot - target);
756                                         if (this_err < err) {
757                                                 *best_clock = clock;
758                                                 err = this_err;
759                                         }
760                                 }
761                         }
762                 }
763         }
764
765         return (err != target);
766 }
767
768 /*
769  * Returns a set of divisors for the desired target clock with the given
770  * refclk, or FALSE.  The returned values represent the clock equation:
771  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
772  *
773  * Target and reference clocks are specified in kHz.
774  *
775  * If match_clock is provided, then best_clock P divider must match the P
776  * divider from @match_clock used for LVDS downclocking.
777  */
778 static bool
779 g4x_find_best_dpll(const struct intel_limit *limit,
780                    struct intel_crtc_state *crtc_state,
781                    int target, int refclk, struct dpll *match_clock,
782                    struct dpll *best_clock)
783 {
784         struct drm_device *dev = crtc_state->base.crtc->dev;
785         struct dpll clock;
786         int max_n;
787         bool found = false;
788         /* approximately equals target * 0.00585 */
789         int err_most = (target >> 8) + (target >> 9);
790
791         memset(best_clock, 0, sizeof(*best_clock));
792
793         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
794
795         max_n = limit->n.max;
796         /* based on hardware requirement, prefer smaller n to precision */
797         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
798                 /* based on hardware requirement, prefere larger m1,m2 */
799                 for (clock.m1 = limit->m1.max;
800                      clock.m1 >= limit->m1.min; clock.m1--) {
801                         for (clock.m2 = limit->m2.max;
802                              clock.m2 >= limit->m2.min; clock.m2--) {
803                                 for (clock.p1 = limit->p1.max;
804                                      clock.p1 >= limit->p1.min; clock.p1--) {
805                                         int this_err;
806
807                                         i9xx_calc_dpll_params(refclk, &clock);
808                                         if (!intel_PLL_is_valid(to_i915(dev),
809                                                                 limit,
810                                                                 &clock))
811                                                 continue;
812
813                                         this_err = abs(clock.dot - target);
814                                         if (this_err < err_most) {
815                                                 *best_clock = clock;
816                                                 err_most = this_err;
817                                                 max_n = clock.n;
818                                                 found = true;
819                                         }
820                                 }
821                         }
822                 }
823         }
824         return found;
825 }
826
827 /*
828  * Check if the calculated PLL configuration is more optimal compared to the
829  * best configuration and error found so far. Return the calculated error.
830  */
831 static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
832                                const struct dpll *calculated_clock,
833                                const struct dpll *best_clock,
834                                unsigned int best_error_ppm,
835                                unsigned int *error_ppm)
836 {
837         /*
838          * For CHV ignore the error and consider only the P value.
839          * Prefer a bigger P value based on HW requirements.
840          */
841         if (IS_CHERRYVIEW(to_i915(dev))) {
842                 *error_ppm = 0;
843
844                 return calculated_clock->p > best_clock->p;
845         }
846
847         if (WARN_ON_ONCE(!target_freq))
848                 return false;
849
850         *error_ppm = div_u64(1000000ULL *
851                                 abs(target_freq - calculated_clock->dot),
852                              target_freq);
853         /*
854          * Prefer a better P value over a better (smaller) error if the error
855          * is small. Ensure this preference for future configurations too by
856          * setting the error to 0.
857          */
858         if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
859                 *error_ppm = 0;
860
861                 return true;
862         }
863
864         return *error_ppm + 10 < best_error_ppm;
865 }
866
867 /*
868  * Returns a set of divisors for the desired target clock with the given
869  * refclk, or FALSE.  The returned values represent the clock equation:
870  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
871  */
872 static bool
873 vlv_find_best_dpll(const struct intel_limit *limit,
874                    struct intel_crtc_state *crtc_state,
875                    int target, int refclk, struct dpll *match_clock,
876                    struct dpll *best_clock)
877 {
878         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
879         struct drm_device *dev = crtc->base.dev;
880         struct dpll clock;
881         unsigned int bestppm = 1000000;
882         /* min update 19.2 MHz */
883         int max_n = min(limit->n.max, refclk / 19200);
884         bool found = false;
885
886         target *= 5; /* fast clock */
887
888         memset(best_clock, 0, sizeof(*best_clock));
889
890         /* based on hardware requirement, prefer smaller n to precision */
891         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
892                 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
893                         for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
894                              clock.p2 -= clock.p2 > 10 ? 2 : 1) {
895                                 clock.p = clock.p1 * clock.p2;
896                                 /* based on hardware requirement, prefer bigger m1,m2 values */
897                                 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
898                                         unsigned int ppm;
899
900                                         clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
901                                                                      refclk * clock.m1);
902
903                                         vlv_calc_dpll_params(refclk, &clock);
904
905                                         if (!intel_PLL_is_valid(to_i915(dev),
906                                                                 limit,
907                                                                 &clock))
908                                                 continue;
909
910                                         if (!vlv_PLL_is_optimal(dev, target,
911                                                                 &clock,
912                                                                 best_clock,
913                                                                 bestppm, &ppm))
914                                                 continue;
915
916                                         *best_clock = clock;
917                                         bestppm = ppm;
918                                         found = true;
919                                 }
920                         }
921                 }
922         }
923
924         return found;
925 }
926
927 /*
928  * Returns a set of divisors for the desired target clock with the given
929  * refclk, or FALSE.  The returned values represent the clock equation:
930  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
931  */
932 static bool
933 chv_find_best_dpll(const struct intel_limit *limit,
934                    struct intel_crtc_state *crtc_state,
935                    int target, int refclk, struct dpll *match_clock,
936                    struct dpll *best_clock)
937 {
938         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
939         struct drm_device *dev = crtc->base.dev;
940         unsigned int best_error_ppm;
941         struct dpll clock;
942         u64 m2;
943         int found = false;
944
945         memset(best_clock, 0, sizeof(*best_clock));
946         best_error_ppm = 1000000;
947
948         /*
949          * Based on hardware doc, the n always set to 1, and m1 always
950          * set to 2.  If requires to support 200Mhz refclk, we need to
951          * revisit this because n may not 1 anymore.
952          */
953         clock.n = 1, clock.m1 = 2;
954         target *= 5;    /* fast clock */
955
956         for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
957                 for (clock.p2 = limit->p2.p2_fast;
958                                 clock.p2 >= limit->p2.p2_slow;
959                                 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
960                         unsigned int error_ppm;
961
962                         clock.p = clock.p1 * clock.p2;
963
964                         m2 = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(target, clock.p * clock.n) << 22,
965                                                    refclk * clock.m1);
966
967                         if (m2 > INT_MAX/clock.m1)
968                                 continue;
969
970                         clock.m2 = m2;
971
972                         chv_calc_dpll_params(refclk, &clock);
973
974                         if (!intel_PLL_is_valid(to_i915(dev), limit, &clock))
975                                 continue;
976
977                         if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
978                                                 best_error_ppm, &error_ppm))
979                                 continue;
980
981                         *best_clock = clock;
982                         best_error_ppm = error_ppm;
983                         found = true;
984                 }
985         }
986
987         return found;
988 }
989
990 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
991                         struct dpll *best_clock)
992 {
993         int refclk = 100000;
994         const struct intel_limit *limit = &intel_limits_bxt;
995
996         return chv_find_best_dpll(limit, crtc_state,
997                                   crtc_state->port_clock, refclk,
998                                   NULL, best_clock);
999 }
1000
1001 bool intel_crtc_active(struct intel_crtc *crtc)
1002 {
1003         /* Be paranoid as we can arrive here with only partial
1004          * state retrieved from the hardware during setup.
1005          *
1006          * We can ditch the adjusted_mode.crtc_clock check as soon
1007          * as Haswell has gained clock readout/fastboot support.
1008          *
1009          * We can ditch the crtc->primary->state->fb check as soon as we can
1010          * properly reconstruct framebuffers.
1011          *
1012          * FIXME: The intel_crtc->active here should be switched to
1013          * crtc->state->active once we have proper CRTC states wired up
1014          * for atomic.
1015          */
1016         return crtc->active && crtc->base.primary->state->fb &&
1017                 crtc->config->base.adjusted_mode.crtc_clock;
1018 }
1019
1020 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
1021                                              enum pipe pipe)
1022 {
1023         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
1024
1025         return crtc->config->cpu_transcoder;
1026 }
1027
1028 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
1029                                     enum pipe pipe)
1030 {
1031         i915_reg_t reg = PIPEDSL(pipe);
1032         u32 line1, line2;
1033         u32 line_mask;
1034
1035         if (IS_GEN(dev_priv, 2))
1036                 line_mask = DSL_LINEMASK_GEN2;
1037         else
1038                 line_mask = DSL_LINEMASK_GEN3;
1039
1040         line1 = I915_READ(reg) & line_mask;
1041         msleep(5);
1042         line2 = I915_READ(reg) & line_mask;
1043
1044         return line1 != line2;
1045 }
1046
1047 static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
1048 {
1049         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1050         enum pipe pipe = crtc->pipe;
1051
1052         /* Wait for the display line to settle/start moving */
1053         if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
1054                 DRM_ERROR("pipe %c scanline %s wait timed out\n",
1055                           pipe_name(pipe), onoff(state));
1056 }
1057
1058 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
1059 {
1060         wait_for_pipe_scanline_moving(crtc, false);
1061 }
1062
1063 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
1064 {
1065         wait_for_pipe_scanline_moving(crtc, true);
1066 }
1067
1068 static void
1069 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
1070 {
1071         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
1072         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1073
1074         if (INTEL_GEN(dev_priv) >= 4) {
1075                 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1076                 i915_reg_t reg = PIPECONF(cpu_transcoder);
1077
1078                 /* Wait for the Pipe State to go off */
1079                 if (intel_wait_for_register(&dev_priv->uncore,
1080                                             reg, I965_PIPECONF_ACTIVE, 0,
1081                                             100))
1082                         WARN(1, "pipe_off wait timed out\n");
1083         } else {
1084                 intel_wait_for_pipe_scanline_stopped(crtc);
1085         }
1086 }
1087
1088 /* Only for pre-ILK configs */
1089 void assert_pll(struct drm_i915_private *dev_priv,
1090                 enum pipe pipe, bool state)
1091 {
1092         u32 val;
1093         bool cur_state;
1094
1095         val = I915_READ(DPLL(pipe));
1096         cur_state = !!(val & DPLL_VCO_ENABLE);
1097         I915_STATE_WARN(cur_state != state,
1098              "PLL state assertion failure (expected %s, current %s)\n",
1099                         onoff(state), onoff(cur_state));
1100 }
1101
1102 /* XXX: the dsi pll is shared between MIPI DSI ports */
1103 void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
1104 {
1105         u32 val;
1106         bool cur_state;
1107
1108         vlv_cck_get(dev_priv);
1109         val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
1110         vlv_cck_put(dev_priv);
1111
1112         cur_state = val & DSI_PLL_VCO_EN;
1113         I915_STATE_WARN(cur_state != state,
1114              "DSI PLL state assertion failure (expected %s, current %s)\n",
1115                         onoff(state), onoff(cur_state));
1116 }
1117
1118 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1119                           enum pipe pipe, bool state)
1120 {
1121         bool cur_state;
1122         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1123                                                                       pipe);
1124
1125         if (HAS_DDI(dev_priv)) {
1126                 /* DDI does not have a specific FDI_TX register */
1127                 u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
1128                 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
1129         } else {
1130                 u32 val = I915_READ(FDI_TX_CTL(pipe));
1131                 cur_state = !!(val & FDI_TX_ENABLE);
1132         }
1133         I915_STATE_WARN(cur_state != state,
1134              "FDI TX state assertion failure (expected %s, current %s)\n",
1135                         onoff(state), onoff(cur_state));
1136 }
1137 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1138 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1139
1140 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1141                           enum pipe pipe, bool state)
1142 {
1143         u32 val;
1144         bool cur_state;
1145
1146         val = I915_READ(FDI_RX_CTL(pipe));
1147         cur_state = !!(val & FDI_RX_ENABLE);
1148         I915_STATE_WARN(cur_state != state,
1149              "FDI RX state assertion failure (expected %s, current %s)\n",
1150                         onoff(state), onoff(cur_state));
1151 }
1152 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1153 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1154
1155 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1156                                       enum pipe pipe)
1157 {
1158         u32 val;
1159
1160         /* ILK FDI PLL is always enabled */
1161         if (IS_GEN(dev_priv, 5))
1162                 return;
1163
1164         /* On Haswell, DDI ports are responsible for the FDI PLL setup */
1165         if (HAS_DDI(dev_priv))
1166                 return;
1167
1168         val = I915_READ(FDI_TX_CTL(pipe));
1169         I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1170 }
1171
1172 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1173                        enum pipe pipe, bool state)
1174 {
1175         u32 val;
1176         bool cur_state;
1177
1178         val = I915_READ(FDI_RX_CTL(pipe));
1179         cur_state = !!(val & FDI_RX_PLL_ENABLE);
1180         I915_STATE_WARN(cur_state != state,
1181              "FDI RX PLL assertion failure (expected %s, current %s)\n",
1182                         onoff(state), onoff(cur_state));
1183 }
1184
1185 void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
1186 {
1187         i915_reg_t pp_reg;
1188         u32 val;
1189         enum pipe panel_pipe = INVALID_PIPE;
1190         bool locked = true;
1191
1192         if (WARN_ON(HAS_DDI(dev_priv)))
1193                 return;
1194
1195         if (HAS_PCH_SPLIT(dev_priv)) {
1196                 u32 port_sel;
1197
1198                 pp_reg = PP_CONTROL(0);
1199                 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1200
1201                 switch (port_sel) {
1202                 case PANEL_PORT_SELECT_LVDS:
1203                         intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
1204                         break;
1205                 case PANEL_PORT_SELECT_DPA:
1206                         intel_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
1207                         break;
1208                 case PANEL_PORT_SELECT_DPC:
1209                         intel_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
1210                         break;
1211                 case PANEL_PORT_SELECT_DPD:
1212                         intel_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
1213                         break;
1214                 default:
1215                         MISSING_CASE(port_sel);
1216                         break;
1217                 }
1218         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1219                 /* presumably write lock depends on pipe, not port select */
1220                 pp_reg = PP_CONTROL(pipe);
1221                 panel_pipe = pipe;
1222         } else {
1223                 u32 port_sel;
1224
1225                 pp_reg = PP_CONTROL(0);
1226                 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1227
1228                 WARN_ON(port_sel != PANEL_PORT_SELECT_LVDS);
1229                 intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
1230         }
1231
1232         val = I915_READ(pp_reg);
1233         if (!(val & PANEL_POWER_ON) ||
1234             ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
1235                 locked = false;
1236
1237         I915_STATE_WARN(panel_pipe == pipe && locked,
1238              "panel assertion failure, pipe %c regs locked\n",
1239              pipe_name(pipe));
1240 }
1241
1242 void assert_pipe(struct drm_i915_private *dev_priv,
1243                  enum pipe pipe, bool state)
1244 {
1245         bool cur_state;
1246         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1247                                                                       pipe);
1248         enum intel_display_power_domain power_domain;
1249         intel_wakeref_t wakeref;
1250
1251         /* we keep both pipes enabled on 830 */
1252         if (IS_I830(dev_priv))
1253                 state = true;
1254
1255         power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
1256         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
1257         if (wakeref) {
1258                 u32 val = I915_READ(PIPECONF(cpu_transcoder));
1259                 cur_state = !!(val & PIPECONF_ENABLE);
1260
1261                 intel_display_power_put(dev_priv, power_domain, wakeref);
1262         } else {
1263                 cur_state = false;
1264         }
1265
1266         I915_STATE_WARN(cur_state != state,
1267              "pipe %c assertion failure (expected %s, current %s)\n",
1268                         pipe_name(pipe), onoff(state), onoff(cur_state));
1269 }
1270
1271 static void assert_plane(struct intel_plane *plane, bool state)
1272 {
1273         enum pipe pipe;
1274         bool cur_state;
1275
1276         cur_state = plane->get_hw_state(plane, &pipe);
1277
1278         I915_STATE_WARN(cur_state != state,
1279                         "%s assertion failure (expected %s, current %s)\n",
1280                         plane->base.name, onoff(state), onoff(cur_state));
1281 }
1282
1283 #define assert_plane_enabled(p) assert_plane(p, true)
1284 #define assert_plane_disabled(p) assert_plane(p, false)
1285
1286 static void assert_planes_disabled(struct intel_crtc *crtc)
1287 {
1288         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1289         struct intel_plane *plane;
1290
1291         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
1292                 assert_plane_disabled(plane);
1293 }
1294
1295 static void assert_vblank_disabled(struct drm_crtc *crtc)
1296 {
1297         if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
1298                 drm_crtc_vblank_put(crtc);
1299 }
1300
1301 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1302                                     enum pipe pipe)
1303 {
1304         u32 val;
1305         bool enabled;
1306
1307         val = I915_READ(PCH_TRANSCONF(pipe));
1308         enabled = !!(val & TRANS_ENABLE);
1309         I915_STATE_WARN(enabled,
1310              "transcoder assertion failed, should be off on pipe %c but is still active\n",
1311              pipe_name(pipe));
1312 }
1313
1314 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1315                                    enum pipe pipe, enum port port,
1316                                    i915_reg_t dp_reg)
1317 {
1318         enum pipe port_pipe;
1319         bool state;
1320
1321         state = intel_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
1322
1323         I915_STATE_WARN(state && port_pipe == pipe,
1324                         "PCH DP %c enabled on transcoder %c, should be disabled\n",
1325                         port_name(port), pipe_name(pipe));
1326
1327         I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1328                         "IBX PCH DP %c still using transcoder B\n",
1329                         port_name(port));
1330 }
1331
1332 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1333                                      enum pipe pipe, enum port port,
1334                                      i915_reg_t hdmi_reg)
1335 {
1336         enum pipe port_pipe;
1337         bool state;
1338
1339         state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
1340
1341         I915_STATE_WARN(state && port_pipe == pipe,
1342                         "PCH HDMI %c enabled on transcoder %c, should be disabled\n",
1343                         port_name(port), pipe_name(pipe));
1344
1345         I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1346                         "IBX PCH HDMI %c still using transcoder B\n",
1347                         port_name(port));
1348 }
1349
1350 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1351                                       enum pipe pipe)
1352 {
1353         enum pipe port_pipe;
1354
1355         assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
1356         assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
1357         assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
1358
1359         I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) &&
1360                         port_pipe == pipe,
1361                         "PCH VGA enabled on transcoder %c, should be disabled\n",
1362                         pipe_name(pipe));
1363
1364         I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) &&
1365                         port_pipe == pipe,
1366                         "PCH LVDS enabled on transcoder %c, should be disabled\n",
1367                         pipe_name(pipe));
1368
1369         /* PCH SDVOB multiplex with HDMIB */
1370         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
1371         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
1372         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
1373 }
1374
1375 static void _vlv_enable_pll(struct intel_crtc *crtc,
1376                             const struct intel_crtc_state *pipe_config)
1377 {
1378         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1379         enum pipe pipe = crtc->pipe;
1380
1381         I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1382         POSTING_READ(DPLL(pipe));
1383         udelay(150);
1384
1385         if (intel_wait_for_register(&dev_priv->uncore,
1386                                     DPLL(pipe),
1387                                     DPLL_LOCK_VLV,
1388                                     DPLL_LOCK_VLV,
1389                                     1))
1390                 DRM_ERROR("DPLL %d failed to lock\n", pipe);
1391 }
1392
1393 static void vlv_enable_pll(struct intel_crtc *crtc,
1394                            const struct intel_crtc_state *pipe_config)
1395 {
1396         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1397         enum pipe pipe = crtc->pipe;
1398
1399         assert_pipe_disabled(dev_priv, pipe);
1400
1401         /* PLL is protected by panel, make sure we can write it */
1402         assert_panel_unlocked(dev_priv, pipe);
1403
1404         if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1405                 _vlv_enable_pll(crtc, pipe_config);
1406
1407         I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1408         POSTING_READ(DPLL_MD(pipe));
1409 }
1410
1411
1412 static void _chv_enable_pll(struct intel_crtc *crtc,
1413                             const struct intel_crtc_state *pipe_config)
1414 {
1415         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1416         enum pipe pipe = crtc->pipe;
1417         enum dpio_channel port = vlv_pipe_to_channel(pipe);
1418         u32 tmp;
1419
1420         vlv_dpio_get(dev_priv);
1421
1422         /* Enable back the 10bit clock to display controller */
1423         tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1424         tmp |= DPIO_DCLKP_EN;
1425         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1426
1427         vlv_dpio_put(dev_priv);
1428
1429         /*
1430          * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1431          */
1432         udelay(1);
1433
1434         /* Enable PLL */
1435         I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1436
1437         /* Check PLL is locked */
1438         if (intel_wait_for_register(&dev_priv->uncore,
1439                                     DPLL(pipe), DPLL_LOCK_VLV, DPLL_LOCK_VLV,
1440                                     1))
1441                 DRM_ERROR("PLL %d failed to lock\n", pipe);
1442 }
1443
1444 static void chv_enable_pll(struct intel_crtc *crtc,
1445                            const struct intel_crtc_state *pipe_config)
1446 {
1447         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1448         enum pipe pipe = crtc->pipe;
1449
1450         assert_pipe_disabled(dev_priv, pipe);
1451
1452         /* PLL is protected by panel, make sure we can write it */
1453         assert_panel_unlocked(dev_priv, pipe);
1454
1455         if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1456                 _chv_enable_pll(crtc, pipe_config);
1457
1458         if (pipe != PIPE_A) {
1459                 /*
1460                  * WaPixelRepeatModeFixForC0:chv
1461                  *
1462                  * DPLLCMD is AWOL. Use chicken bits to propagate
1463                  * the value from DPLLBMD to either pipe B or C.
1464                  */
1465                 I915_WRITE(CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
1466                 I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md);
1467                 I915_WRITE(CBR4_VLV, 0);
1468                 dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
1469
1470                 /*
1471                  * DPLLB VGA mode also seems to cause problems.
1472                  * We should always have it disabled.
1473                  */
1474                 WARN_ON((I915_READ(DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0);
1475         } else {
1476                 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1477                 POSTING_READ(DPLL_MD(pipe));
1478         }
1479 }
1480
1481 static bool i9xx_has_pps(struct drm_i915_private *dev_priv)
1482 {
1483         if (IS_I830(dev_priv))
1484                 return false;
1485
1486         return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
1487 }
1488
1489 static void i9xx_enable_pll(struct intel_crtc *crtc,
1490                             const struct intel_crtc_state *crtc_state)
1491 {
1492         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1493         i915_reg_t reg = DPLL(crtc->pipe);
1494         u32 dpll = crtc_state->dpll_hw_state.dpll;
1495         int i;
1496
1497         assert_pipe_disabled(dev_priv, crtc->pipe);
1498
1499         /* PLL is protected by panel, make sure we can write it */
1500         if (i9xx_has_pps(dev_priv))
1501                 assert_panel_unlocked(dev_priv, crtc->pipe);
1502
1503         /*
1504          * Apparently we need to have VGA mode enabled prior to changing
1505          * the P1/P2 dividers. Otherwise the DPLL will keep using the old
1506          * dividers, even though the register value does change.
1507          */
1508         I915_WRITE(reg, dpll & ~DPLL_VGA_MODE_DIS);
1509         I915_WRITE(reg, dpll);
1510
1511         /* Wait for the clocks to stabilize. */
1512         POSTING_READ(reg);
1513         udelay(150);
1514
1515         if (INTEL_GEN(dev_priv) >= 4) {
1516                 I915_WRITE(DPLL_MD(crtc->pipe),
1517                            crtc_state->dpll_hw_state.dpll_md);
1518         } else {
1519                 /* The pixel multiplier can only be updated once the
1520                  * DPLL is enabled and the clocks are stable.
1521                  *
1522                  * So write it again.
1523                  */
1524                 I915_WRITE(reg, dpll);
1525         }
1526
1527         /* We do this three times for luck */
1528         for (i = 0; i < 3; i++) {
1529                 I915_WRITE(reg, dpll);
1530                 POSTING_READ(reg);
1531                 udelay(150); /* wait for warmup */
1532         }
1533 }
1534
1535 static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
1536 {
1537         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1538         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1539         enum pipe pipe = crtc->pipe;
1540
1541         /* Don't disable pipe or pipe PLLs if needed */
1542         if (IS_I830(dev_priv))
1543                 return;
1544
1545         /* Make sure the pipe isn't still relying on us */
1546         assert_pipe_disabled(dev_priv, pipe);
1547
1548         I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
1549         POSTING_READ(DPLL(pipe));
1550 }
1551
1552 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1553 {
1554         u32 val;
1555
1556         /* Make sure the pipe isn't still relying on us */
1557         assert_pipe_disabled(dev_priv, pipe);
1558
1559         val = DPLL_INTEGRATED_REF_CLK_VLV |
1560                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1561         if (pipe != PIPE_A)
1562                 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1563
1564         I915_WRITE(DPLL(pipe), val);
1565         POSTING_READ(DPLL(pipe));
1566 }
1567
1568 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1569 {
1570         enum dpio_channel port = vlv_pipe_to_channel(pipe);
1571         u32 val;
1572
1573         /* Make sure the pipe isn't still relying on us */
1574         assert_pipe_disabled(dev_priv, pipe);
1575
1576         val = DPLL_SSC_REF_CLK_CHV |
1577                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1578         if (pipe != PIPE_A)
1579                 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1580
1581         I915_WRITE(DPLL(pipe), val);
1582         POSTING_READ(DPLL(pipe));
1583
1584         vlv_dpio_get(dev_priv);
1585
1586         /* Disable 10bit clock to display controller */
1587         val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1588         val &= ~DPIO_DCLKP_EN;
1589         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1590
1591         vlv_dpio_put(dev_priv);
1592 }
1593
1594 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1595                          struct intel_digital_port *dport,
1596                          unsigned int expected_mask)
1597 {
1598         u32 port_mask;
1599         i915_reg_t dpll_reg;
1600
1601         switch (dport->base.port) {
1602         case PORT_B:
1603                 port_mask = DPLL_PORTB_READY_MASK;
1604                 dpll_reg = DPLL(0);
1605                 break;
1606         case PORT_C:
1607                 port_mask = DPLL_PORTC_READY_MASK;
1608                 dpll_reg = DPLL(0);
1609                 expected_mask <<= 4;
1610                 break;
1611         case PORT_D:
1612                 port_mask = DPLL_PORTD_READY_MASK;
1613                 dpll_reg = DPIO_PHY_STATUS;
1614                 break;
1615         default:
1616                 BUG();
1617         }
1618
1619         if (intel_wait_for_register(&dev_priv->uncore,
1620                                     dpll_reg, port_mask, expected_mask,
1621                                     1000))
1622                 WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
1623                      port_name(dport->base.port),
1624                      I915_READ(dpll_reg) & port_mask, expected_mask);
1625 }
1626
1627 static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
1628 {
1629         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1630         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1631         enum pipe pipe = crtc->pipe;
1632         i915_reg_t reg;
1633         u32 val, pipeconf_val;
1634
1635         /* Make sure PCH DPLL is enabled */
1636         assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll);
1637
1638         /* FDI must be feeding us bits for PCH ports */
1639         assert_fdi_tx_enabled(dev_priv, pipe);
1640         assert_fdi_rx_enabled(dev_priv, pipe);
1641
1642         if (HAS_PCH_CPT(dev_priv)) {
1643                 /* Workaround: Set the timing override bit before enabling the
1644                  * pch transcoder. */
1645                 reg = TRANS_CHICKEN2(pipe);
1646                 val = I915_READ(reg);
1647                 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1648                 I915_WRITE(reg, val);
1649         }
1650
1651         reg = PCH_TRANSCONF(pipe);
1652         val = I915_READ(reg);
1653         pipeconf_val = I915_READ(PIPECONF(pipe));
1654
1655         if (HAS_PCH_IBX(dev_priv)) {
1656                 /*
1657                  * Make the BPC in transcoder be consistent with
1658                  * that in pipeconf reg. For HDMI we must use 8bpc
1659                  * here for both 8bpc and 12bpc.
1660                  */
1661                 val &= ~PIPECONF_BPC_MASK;
1662                 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1663                         val |= PIPECONF_8BPC;
1664                 else
1665                         val |= pipeconf_val & PIPECONF_BPC_MASK;
1666         }
1667
1668         val &= ~TRANS_INTERLACE_MASK;
1669         if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) {
1670                 if (HAS_PCH_IBX(dev_priv) &&
1671                     intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
1672                         val |= TRANS_LEGACY_INTERLACED_ILK;
1673                 else
1674                         val |= TRANS_INTERLACED;
1675         } else {
1676                 val |= TRANS_PROGRESSIVE;
1677         }
1678
1679         I915_WRITE(reg, val | TRANS_ENABLE);
1680         if (intel_wait_for_register(&dev_priv->uncore,
1681                                     reg, TRANS_STATE_ENABLE, TRANS_STATE_ENABLE,
1682                                     100))
1683                 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
1684 }
1685
1686 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1687                                       enum transcoder cpu_transcoder)
1688 {
1689         u32 val, pipeconf_val;
1690
1691         /* FDI must be feeding us bits for PCH ports */
1692         assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
1693         assert_fdi_rx_enabled(dev_priv, PIPE_A);
1694
1695         /* Workaround: set timing override bit. */
1696         val = I915_READ(TRANS_CHICKEN2(PIPE_A));
1697         val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1698         I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
1699
1700         val = TRANS_ENABLE;
1701         pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
1702
1703         if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1704             PIPECONF_INTERLACED_ILK)
1705                 val |= TRANS_INTERLACED;
1706         else
1707                 val |= TRANS_PROGRESSIVE;
1708
1709         I915_WRITE(LPT_TRANSCONF, val);
1710         if (intel_wait_for_register(&dev_priv->uncore,
1711                                     LPT_TRANSCONF,
1712                                     TRANS_STATE_ENABLE,
1713                                     TRANS_STATE_ENABLE,
1714                                     100))
1715                 DRM_ERROR("Failed to enable PCH transcoder\n");
1716 }
1717
1718 static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1719                                             enum pipe pipe)
1720 {
1721         i915_reg_t reg;
1722         u32 val;
1723
1724         /* FDI relies on the transcoder */
1725         assert_fdi_tx_disabled(dev_priv, pipe);
1726         assert_fdi_rx_disabled(dev_priv, pipe);
1727
1728         /* Ports must be off as well */
1729         assert_pch_ports_disabled(dev_priv, pipe);
1730
1731         reg = PCH_TRANSCONF(pipe);
1732         val = I915_READ(reg);
1733         val &= ~TRANS_ENABLE;
1734         I915_WRITE(reg, val);
1735         /* wait for PCH transcoder off, transcoder state */
1736         if (intel_wait_for_register(&dev_priv->uncore,
1737                                     reg, TRANS_STATE_ENABLE, 0,
1738                                     50))
1739                 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
1740
1741         if (HAS_PCH_CPT(dev_priv)) {
1742                 /* Workaround: Clear the timing override chicken bit again. */
1743                 reg = TRANS_CHICKEN2(pipe);
1744                 val = I915_READ(reg);
1745                 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1746                 I915_WRITE(reg, val);
1747         }
1748 }
1749
1750 void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1751 {
1752         u32 val;
1753
1754         val = I915_READ(LPT_TRANSCONF);
1755         val &= ~TRANS_ENABLE;
1756         I915_WRITE(LPT_TRANSCONF, val);
1757         /* wait for PCH transcoder off, transcoder state */
1758         if (intel_wait_for_register(&dev_priv->uncore,
1759                                     LPT_TRANSCONF, TRANS_STATE_ENABLE, 0,
1760                                     50))
1761                 DRM_ERROR("Failed to disable PCH transcoder\n");
1762
1763         /* Workaround: clear timing override bit. */
1764         val = I915_READ(TRANS_CHICKEN2(PIPE_A));
1765         val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1766         I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
1767 }
1768
1769 enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
1770 {
1771         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1772
1773         if (HAS_PCH_LPT(dev_priv))
1774                 return PIPE_A;
1775         else
1776                 return crtc->pipe;
1777 }
1778
1779 static u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state)
1780 {
1781         struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
1782
1783         /*
1784          * On i965gm the hardware frame counter reads
1785          * zero when the TV encoder is enabled :(
1786          */
1787         if (IS_I965GM(dev_priv) &&
1788             (crtc_state->output_types & BIT(INTEL_OUTPUT_TVOUT)))
1789                 return 0;
1790
1791         if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
1792                 return 0xffffffff; /* full 32 bit counter */
1793         else if (INTEL_GEN(dev_priv) >= 3)
1794                 return 0xffffff; /* only 24 bits of frame count */
1795         else
1796                 return 0; /* Gen2 doesn't have a hardware frame counter */
1797 }
1798
1799 static void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state)
1800 {
1801         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1802
1803         drm_crtc_set_max_vblank_count(&crtc->base,
1804                                       intel_crtc_max_vblank_count(crtc_state));
1805         drm_crtc_vblank_on(&crtc->base);
1806 }
1807
1808 static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
1809 {
1810         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
1811         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1812         enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
1813         enum pipe pipe = crtc->pipe;
1814         i915_reg_t reg;
1815         u32 val;
1816
1817         DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
1818
1819         assert_planes_disabled(crtc);
1820
1821         /*
1822          * A pipe without a PLL won't actually be able to drive bits from
1823          * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
1824          * need the check.
1825          */
1826         if (HAS_GMCH(dev_priv)) {
1827                 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
1828                         assert_dsi_pll_enabled(dev_priv);
1829                 else
1830                         assert_pll_enabled(dev_priv, pipe);
1831         } else {
1832                 if (new_crtc_state->has_pch_encoder) {
1833                         /* if driving the PCH, we need FDI enabled */
1834                         assert_fdi_rx_pll_enabled(dev_priv,
1835                                                   intel_crtc_pch_transcoder(crtc));
1836                         assert_fdi_tx_pll_enabled(dev_priv,
1837                                                   (enum pipe) cpu_transcoder);
1838                 }
1839                 /* FIXME: assert CPU port conditions for SNB+ */
1840         }
1841
1842         trace_intel_pipe_enable(crtc);
1843
1844         reg = PIPECONF(cpu_transcoder);
1845         val = I915_READ(reg);
1846         if (val & PIPECONF_ENABLE) {
1847                 /* we keep both pipes enabled on 830 */
1848                 WARN_ON(!IS_I830(dev_priv));
1849                 return;
1850         }
1851
1852         I915_WRITE(reg, val | PIPECONF_ENABLE);
1853         POSTING_READ(reg);
1854
1855         /*
1856          * Until the pipe starts PIPEDSL reads will return a stale value,
1857          * which causes an apparent vblank timestamp jump when PIPEDSL
1858          * resets to its proper value. That also messes up the frame count
1859          * when it's derived from the timestamps. So let's wait for the
1860          * pipe to start properly before we call drm_crtc_vblank_on()
1861          */
1862         if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
1863                 intel_wait_for_pipe_scanline_moving(crtc);
1864 }
1865
1866 static void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
1867 {
1868         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
1869         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1870         enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1871         enum pipe pipe = crtc->pipe;
1872         i915_reg_t reg;
1873         u32 val;
1874
1875         DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
1876
1877         /*
1878          * Make sure planes won't keep trying to pump pixels to us,
1879          * or we might hang the display.
1880          */
1881         assert_planes_disabled(crtc);
1882
1883         trace_intel_pipe_disable(crtc);
1884
1885         reg = PIPECONF(cpu_transcoder);
1886         val = I915_READ(reg);
1887         if ((val & PIPECONF_ENABLE) == 0)
1888                 return;
1889
1890         /*
1891          * Double wide has implications for planes
1892          * so best keep it disabled when not needed.
1893          */
1894         if (old_crtc_state->double_wide)
1895                 val &= ~PIPECONF_DOUBLE_WIDE;
1896
1897         /* Don't disable pipe or pipe PLLs if needed */
1898         if (!IS_I830(dev_priv))
1899                 val &= ~PIPECONF_ENABLE;
1900
1901         I915_WRITE(reg, val);
1902         if ((val & PIPECONF_ENABLE) == 0)
1903                 intel_wait_for_pipe_off(old_crtc_state);
1904 }
1905
1906 static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
1907 {
1908         return IS_GEN(dev_priv, 2) ? 2048 : 4096;
1909 }
1910
1911 static unsigned int
1912 intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
1913 {
1914         struct drm_i915_private *dev_priv = to_i915(fb->dev);
1915         unsigned int cpp = fb->format->cpp[color_plane];
1916
1917         switch (fb->modifier) {
1918         case DRM_FORMAT_MOD_LINEAR:
1919                 return intel_tile_size(dev_priv);
1920         case I915_FORMAT_MOD_X_TILED:
1921                 if (IS_GEN(dev_priv, 2))
1922                         return 128;
1923                 else
1924                         return 512;
1925         case I915_FORMAT_MOD_Y_TILED_CCS:
1926                 if (color_plane == 1)
1927                         return 128;
1928                 /* fall through */
1929         case I915_FORMAT_MOD_Y_TILED:
1930                 if (IS_GEN(dev_priv, 2) || HAS_128_BYTE_Y_TILING(dev_priv))
1931                         return 128;
1932                 else
1933                         return 512;
1934         case I915_FORMAT_MOD_Yf_TILED_CCS:
1935                 if (color_plane == 1)
1936                         return 128;
1937                 /* fall through */
1938         case I915_FORMAT_MOD_Yf_TILED:
1939                 switch (cpp) {
1940                 case 1:
1941                         return 64;
1942                 case 2:
1943                 case 4:
1944                         return 128;
1945                 case 8:
1946                 case 16:
1947                         return 256;
1948                 default:
1949                         MISSING_CASE(cpp);
1950                         return cpp;
1951                 }
1952                 break;
1953         default:
1954                 MISSING_CASE(fb->modifier);
1955                 return cpp;
1956         }
1957 }
1958
1959 static unsigned int
1960 intel_tile_height(const struct drm_framebuffer *fb, int color_plane)
1961 {
1962         return intel_tile_size(to_i915(fb->dev)) /
1963                 intel_tile_width_bytes(fb, color_plane);
1964 }
1965
1966 /* Return the tile dimensions in pixel units */
1967 static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane,
1968                             unsigned int *tile_width,
1969                             unsigned int *tile_height)
1970 {
1971         unsigned int tile_width_bytes = intel_tile_width_bytes(fb, color_plane);
1972         unsigned int cpp = fb->format->cpp[color_plane];
1973
1974         *tile_width = tile_width_bytes / cpp;
1975         *tile_height = intel_tile_size(to_i915(fb->dev)) / tile_width_bytes;
1976 }
1977
1978 unsigned int
1979 intel_fb_align_height(const struct drm_framebuffer *fb,
1980                       int color_plane, unsigned int height)
1981 {
1982         unsigned int tile_height = intel_tile_height(fb, color_plane);
1983
1984         return ALIGN(height, tile_height);
1985 }
1986
1987 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
1988 {
1989         unsigned int size = 0;
1990         int i;
1991
1992         for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
1993                 size += rot_info->plane[i].width * rot_info->plane[i].height;
1994
1995         return size;
1996 }
1997
1998 unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info)
1999 {
2000         unsigned int size = 0;
2001         int i;
2002
2003         for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++)
2004                 size += rem_info->plane[i].width * rem_info->plane[i].height;
2005
2006         return size;
2007 }
2008
2009 static void
2010 intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
2011                         const struct drm_framebuffer *fb,
2012                         unsigned int rotation)
2013 {
2014         view->type = I915_GGTT_VIEW_NORMAL;
2015         if (drm_rotation_90_or_270(rotation)) {
2016                 view->type = I915_GGTT_VIEW_ROTATED;
2017                 view->rotated = to_intel_framebuffer(fb)->rot_info;
2018         }
2019 }
2020
2021 static unsigned int intel_cursor_alignment(const struct drm_i915_private *dev_priv)
2022 {
2023         if (IS_I830(dev_priv))
2024                 return 16 * 1024;
2025         else if (IS_I85X(dev_priv))
2026                 return 256;
2027         else if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
2028                 return 32;
2029         else
2030                 return 4 * 1024;
2031 }
2032
2033 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
2034 {
2035         if (INTEL_GEN(dev_priv) >= 9)
2036                 return 256 * 1024;
2037         else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
2038                  IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2039                 return 128 * 1024;
2040         else if (INTEL_GEN(dev_priv) >= 4)
2041                 return 4 * 1024;
2042         else
2043                 return 0;
2044 }
2045
2046 static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
2047                                          int color_plane)
2048 {
2049         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2050
2051         /* AUX_DIST needs only 4K alignment */
2052         if (color_plane == 1)
2053                 return 4096;
2054
2055         switch (fb->modifier) {
2056         case DRM_FORMAT_MOD_LINEAR:
2057                 return intel_linear_alignment(dev_priv);
2058         case I915_FORMAT_MOD_X_TILED:
2059                 if (INTEL_GEN(dev_priv) >= 9)
2060                         return 256 * 1024;
2061                 return 0;
2062         case I915_FORMAT_MOD_Y_TILED_CCS:
2063         case I915_FORMAT_MOD_Yf_TILED_CCS:
2064         case I915_FORMAT_MOD_Y_TILED:
2065         case I915_FORMAT_MOD_Yf_TILED:
2066                 return 1 * 1024 * 1024;
2067         default:
2068                 MISSING_CASE(fb->modifier);
2069                 return 0;
2070         }
2071 }
2072
2073 static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
2074 {
2075         struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
2076         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
2077
2078         return INTEL_GEN(dev_priv) < 4 ||
2079                 (plane->has_fbc &&
2080                  plane_state->view.type == I915_GGTT_VIEW_NORMAL);
2081 }
2082
2083 struct i915_vma *
2084 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
2085                            const struct i915_ggtt_view *view,
2086                            bool uses_fence,
2087                            unsigned long *out_flags)
2088 {
2089         struct drm_device *dev = fb->dev;
2090         struct drm_i915_private *dev_priv = to_i915(dev);
2091         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2092         intel_wakeref_t wakeref;
2093         struct i915_vma *vma;
2094         unsigned int pinctl;
2095         u32 alignment;
2096
2097         WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2098
2099         alignment = intel_surf_alignment(fb, 0);
2100
2101         /* Note that the w/a also requires 64 PTE of padding following the
2102          * bo. We currently fill all unused PTE with the shadow page and so
2103          * we should always have valid PTE following the scanout preventing
2104          * the VT-d warning.
2105          */
2106         if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
2107                 alignment = 256 * 1024;
2108
2109         /*
2110          * Global gtt pte registers are special registers which actually forward
2111          * writes to a chunk of system memory. Which means that there is no risk
2112          * that the register values disappear as soon as we call
2113          * intel_runtime_pm_put(), so it is correct to wrap only the
2114          * pin/unpin/fence and not more.
2115          */
2116         wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2117         i915_gem_object_lock(obj);
2118
2119         atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
2120
2121         pinctl = 0;
2122
2123         /* Valleyview is definitely limited to scanning out the first
2124          * 512MiB. Lets presume this behaviour was inherited from the
2125          * g4x display engine and that all earlier gen are similarly
2126          * limited. Testing suggests that it is a little more
2127          * complicated than this. For example, Cherryview appears quite
2128          * happy to scanout from anywhere within its global aperture.
2129          */
2130         if (HAS_GMCH(dev_priv))
2131                 pinctl |= PIN_MAPPABLE;
2132
2133         vma = i915_gem_object_pin_to_display_plane(obj,
2134                                                    alignment, view, pinctl);
2135         if (IS_ERR(vma))
2136                 goto err;
2137
2138         if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
2139                 int ret;
2140
2141                 /* Install a fence for tiled scan-out. Pre-i965 always needs a
2142                  * fence, whereas 965+ only requires a fence if using
2143                  * framebuffer compression.  For simplicity, we always, when
2144                  * possible, install a fence as the cost is not that onerous.
2145                  *
2146                  * If we fail to fence the tiled scanout, then either the
2147                  * modeset will reject the change (which is highly unlikely as
2148                  * the affected systems, all but one, do not have unmappable
2149                  * space) or we will not be able to enable full powersaving
2150                  * techniques (also likely not to apply due to various limits
2151                  * FBC and the like impose on the size of the buffer, which
2152                  * presumably we violated anyway with this unmappable buffer).
2153                  * Anyway, it is presumably better to stumble onwards with
2154                  * something and try to run the system in a "less than optimal"
2155                  * mode that matches the user configuration.
2156                  */
2157                 ret = i915_vma_pin_fence(vma);
2158                 if (ret != 0 && INTEL_GEN(dev_priv) < 4) {
2159                         i915_gem_object_unpin_from_display_plane(vma);
2160                         vma = ERR_PTR(ret);
2161                         goto err;
2162                 }
2163
2164                 if (ret == 0 && vma->fence)
2165                         *out_flags |= PLANE_HAS_FENCE;
2166         }
2167
2168         i915_vma_get(vma);
2169 err:
2170         atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
2171
2172         i915_gem_object_unlock(obj);
2173         intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2174         return vma;
2175 }
2176
2177 void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
2178 {
2179         lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
2180
2181         i915_gem_object_lock(vma->obj);
2182         if (flags & PLANE_HAS_FENCE)
2183                 i915_vma_unpin_fence(vma);
2184         i915_gem_object_unpin_from_display_plane(vma);
2185         i915_gem_object_unlock(vma->obj);
2186
2187         i915_vma_put(vma);
2188 }
2189
2190 static int intel_fb_pitch(const struct drm_framebuffer *fb, int color_plane,
2191                           unsigned int rotation)
2192 {
2193         if (drm_rotation_90_or_270(rotation))
2194                 return to_intel_framebuffer(fb)->rotated[color_plane].pitch;
2195         else
2196                 return fb->pitches[color_plane];
2197 }
2198
2199 /*
2200  * Convert the x/y offsets into a linear offset.
2201  * Only valid with 0/180 degree rotation, which is fine since linear
2202  * offset is only used with linear buffers on pre-hsw and tiled buffers
2203  * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
2204  */
2205 u32 intel_fb_xy_to_linear(int x, int y,
2206                           const struct intel_plane_state *state,
2207                           int color_plane)
2208 {
2209         const struct drm_framebuffer *fb = state->base.fb;
2210         unsigned int cpp = fb->format->cpp[color_plane];
2211         unsigned int pitch = state->color_plane[color_plane].stride;
2212
2213         return y * pitch + x * cpp;
2214 }
2215
2216 /*
2217  * Add the x/y offsets derived from fb->offsets[] to the user
2218  * specified plane src x/y offsets. The resulting x/y offsets
2219  * specify the start of scanout from the beginning of the gtt mapping.
2220  */
2221 void intel_add_fb_offsets(int *x, int *y,
2222                           const struct intel_plane_state *state,
2223                           int color_plane)
2224
2225 {
2226         *x += state->color_plane[color_plane].x;
2227         *y += state->color_plane[color_plane].y;
2228 }
2229
2230 static u32 intel_adjust_tile_offset(int *x, int *y,
2231                                     unsigned int tile_width,
2232                                     unsigned int tile_height,
2233                                     unsigned int tile_size,
2234                                     unsigned int pitch_tiles,
2235                                     u32 old_offset,
2236                                     u32 new_offset)
2237 {
2238         unsigned int pitch_pixels = pitch_tiles * tile_width;
2239         unsigned int tiles;
2240
2241         WARN_ON(old_offset & (tile_size - 1));
2242         WARN_ON(new_offset & (tile_size - 1));
2243         WARN_ON(new_offset > old_offset);
2244
2245         tiles = (old_offset - new_offset) / tile_size;
2246
2247         *y += tiles / pitch_tiles * tile_height;
2248         *x += tiles % pitch_tiles * tile_width;
2249
2250         /* minimize x in case it got needlessly big */
2251         *y += *x / pitch_pixels * tile_height;
2252         *x %= pitch_pixels;
2253
2254         return new_offset;
2255 }
2256
2257 static bool is_surface_linear(u64 modifier, int color_plane)
2258 {
2259         return modifier == DRM_FORMAT_MOD_LINEAR;
2260 }
2261
2262 static u32 intel_adjust_aligned_offset(int *x, int *y,
2263                                        const struct drm_framebuffer *fb,
2264                                        int color_plane,
2265                                        unsigned int rotation,
2266                                        unsigned int pitch,
2267                                        u32 old_offset, u32 new_offset)
2268 {
2269         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2270         unsigned int cpp = fb->format->cpp[color_plane];
2271
2272         WARN_ON(new_offset > old_offset);
2273
2274         if (!is_surface_linear(fb->modifier, color_plane)) {
2275                 unsigned int tile_size, tile_width, tile_height;
2276                 unsigned int pitch_tiles;
2277
2278                 tile_size = intel_tile_size(dev_priv);
2279                 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2280
2281                 if (drm_rotation_90_or_270(rotation)) {
2282                         pitch_tiles = pitch / tile_height;
2283                         swap(tile_width, tile_height);
2284                 } else {
2285                         pitch_tiles = pitch / (tile_width * cpp);
2286                 }
2287
2288                 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2289                                          tile_size, pitch_tiles,
2290                                          old_offset, new_offset);
2291         } else {
2292                 old_offset += *y * pitch + *x * cpp;
2293
2294                 *y = (old_offset - new_offset) / pitch;
2295                 *x = ((old_offset - new_offset) - *y * pitch) / cpp;
2296         }
2297
2298         return new_offset;
2299 }
2300
2301 /*
2302  * Adjust the tile offset by moving the difference into
2303  * the x/y offsets.
2304  */
2305 static u32 intel_plane_adjust_aligned_offset(int *x, int *y,
2306                                              const struct intel_plane_state *state,
2307                                              int color_plane,
2308                                              u32 old_offset, u32 new_offset)
2309 {
2310         return intel_adjust_aligned_offset(x, y, state->base.fb, color_plane,
2311                                            state->base.rotation,
2312                                            state->color_plane[color_plane].stride,
2313                                            old_offset, new_offset);
2314 }
2315
2316 /*
2317  * Computes the aligned offset to the base tile and adjusts
2318  * x, y. bytes per pixel is assumed to be a power-of-two.
2319  *
2320  * In the 90/270 rotated case, x and y are assumed
2321  * to be already rotated to match the rotated GTT view, and
2322  * pitch is the tile_height aligned framebuffer height.
2323  *
2324  * This function is used when computing the derived information
2325  * under intel_framebuffer, so using any of that information
2326  * here is not allowed. Anything under drm_framebuffer can be
2327  * used. This is why the user has to pass in the pitch since it
2328  * is specified in the rotated orientation.
2329  */
2330 static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv,
2331                                         int *x, int *y,
2332                                         const struct drm_framebuffer *fb,
2333                                         int color_plane,
2334                                         unsigned int pitch,
2335                                         unsigned int rotation,
2336                                         u32 alignment)
2337 {
2338         unsigned int cpp = fb->format->cpp[color_plane];
2339         u32 offset, offset_aligned;
2340
2341         if (alignment)
2342                 alignment--;
2343
2344         if (!is_surface_linear(fb->modifier, color_plane)) {
2345                 unsigned int tile_size, tile_width, tile_height;
2346                 unsigned int tile_rows, tiles, pitch_tiles;
2347
2348                 tile_size = intel_tile_size(dev_priv);
2349                 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
2350
2351                 if (drm_rotation_90_or_270(rotation)) {
2352                         pitch_tiles = pitch / tile_height;
2353                         swap(tile_width, tile_height);
2354                 } else {
2355                         pitch_tiles = pitch / (tile_width * cpp);
2356                 }
2357
2358                 tile_rows = *y / tile_height;
2359                 *y %= tile_height;
2360
2361                 tiles = *x / tile_width;
2362                 *x %= tile_width;
2363
2364                 offset = (tile_rows * pitch_tiles + tiles) * tile_size;
2365                 offset_aligned = offset & ~alignment;
2366
2367                 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2368                                          tile_size, pitch_tiles,
2369                                          offset, offset_aligned);
2370         } else {
2371                 offset = *y * pitch + *x * cpp;
2372                 offset_aligned = offset & ~alignment;
2373
2374                 *y = (offset & alignment) / pitch;
2375                 *x = ((offset & alignment) - *y * pitch) / cpp;
2376         }
2377
2378         return offset_aligned;
2379 }
2380
2381 static u32 intel_plane_compute_aligned_offset(int *x, int *y,
2382                                               const struct intel_plane_state *state,
2383                                               int color_plane)
2384 {
2385         struct intel_plane *intel_plane = to_intel_plane(state->base.plane);
2386         struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
2387         const struct drm_framebuffer *fb = state->base.fb;
2388         unsigned int rotation = state->base.rotation;
2389         int pitch = state->color_plane[color_plane].stride;
2390         u32 alignment;
2391
2392         if (intel_plane->id == PLANE_CURSOR)
2393                 alignment = intel_cursor_alignment(dev_priv);
2394         else
2395                 alignment = intel_surf_alignment(fb, color_plane);
2396
2397         return intel_compute_aligned_offset(dev_priv, x, y, fb, color_plane,
2398                                             pitch, rotation, alignment);
2399 }
2400
2401 /* Convert the fb->offset[] into x/y offsets */
2402 static int intel_fb_offset_to_xy(int *x, int *y,
2403                                  const struct drm_framebuffer *fb,
2404                                  int color_plane)
2405 {
2406         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2407         unsigned int height;
2408
2409         if (fb->modifier != DRM_FORMAT_MOD_LINEAR &&
2410             fb->offsets[color_plane] % intel_tile_size(dev_priv)) {
2411                 DRM_DEBUG_KMS("Misaligned offset 0x%08x for color plane %d\n",
2412                               fb->offsets[color_plane], color_plane);
2413                 return -EINVAL;
2414         }
2415
2416         height = drm_framebuffer_plane_height(fb->height, fb, color_plane);
2417         height = ALIGN(height, intel_tile_height(fb, color_plane));
2418
2419         /* Catch potential overflows early */
2420         if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]),
2421                             fb->offsets[color_plane])) {
2422                 DRM_DEBUG_KMS("Bad offset 0x%08x or pitch %d for color plane %d\n",
2423                               fb->offsets[color_plane], fb->pitches[color_plane],
2424                               color_plane);
2425                 return -ERANGE;
2426         }
2427
2428         *x = 0;
2429         *y = 0;
2430
2431         intel_adjust_aligned_offset(x, y,
2432                                     fb, color_plane, DRM_MODE_ROTATE_0,
2433                                     fb->pitches[color_plane],
2434                                     fb->offsets[color_plane], 0);
2435
2436         return 0;
2437 }
2438
2439 static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
2440 {
2441         switch (fb_modifier) {
2442         case I915_FORMAT_MOD_X_TILED:
2443                 return I915_TILING_X;
2444         case I915_FORMAT_MOD_Y_TILED:
2445         case I915_FORMAT_MOD_Y_TILED_CCS:
2446                 return I915_TILING_Y;
2447         default:
2448                 return I915_TILING_NONE;
2449         }
2450 }
2451
2452 /*
2453  * From the Sky Lake PRM:
2454  * "The Color Control Surface (CCS) contains the compression status of
2455  *  the cache-line pairs. The compression state of the cache-line pair
2456  *  is specified by 2 bits in the CCS. Each CCS cache-line represents
2457  *  an area on the main surface of 16 x16 sets of 128 byte Y-tiled
2458  *  cache-line-pairs. CCS is always Y tiled."
2459  *
2460  * Since cache line pairs refers to horizontally adjacent cache lines,
2461  * each cache line in the CCS corresponds to an area of 32x16 cache
2462  * lines on the main surface. Since each pixel is 4 bytes, this gives
2463  * us a ratio of one byte in the CCS for each 8x16 pixels in the
2464  * main surface.
2465  */
2466 static const struct drm_format_info ccs_formats[] = {
2467         { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
2468           .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2469         { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
2470           .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2471         { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
2472           .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
2473         { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
2474           .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
2475 };
2476
2477 static const struct drm_format_info *
2478 lookup_format_info(const struct drm_format_info formats[],
2479                    int num_formats, u32 format)
2480 {
2481         int i;
2482
2483         for (i = 0; i < num_formats; i++) {
2484                 if (formats[i].format == format)
2485                         return &formats[i];
2486         }
2487
2488         return NULL;
2489 }
2490
2491 static const struct drm_format_info *
2492 intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
2493 {
2494         switch (cmd->modifier[0]) {
2495         case I915_FORMAT_MOD_Y_TILED_CCS:
2496         case I915_FORMAT_MOD_Yf_TILED_CCS:
2497                 return lookup_format_info(ccs_formats,
2498                                           ARRAY_SIZE(ccs_formats),
2499                                           cmd->pixel_format);
2500         default:
2501                 return NULL;
2502         }
2503 }
2504
2505 bool is_ccs_modifier(u64 modifier)
2506 {
2507         return modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
2508                modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
2509 }
2510
2511 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
2512                               u32 pixel_format, u64 modifier)
2513 {
2514         struct intel_crtc *crtc;
2515         struct intel_plane *plane;
2516
2517         /*
2518          * We assume the primary plane for pipe A has
2519          * the highest stride limits of them all.
2520          */
2521         crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
2522         plane = to_intel_plane(crtc->base.primary);
2523
2524         return plane->max_stride(plane, pixel_format, modifier,
2525                                  DRM_MODE_ROTATE_0);
2526 }
2527
2528 static
2529 u32 intel_fb_max_stride(struct drm_i915_private *dev_priv,
2530                         u32 pixel_format, u64 modifier)
2531 {
2532         /*
2533          * Arbitrary limit for gen4+ chosen to match the
2534          * render engine max stride.
2535          *
2536          * The new CCS hash mode makes remapping impossible
2537          */
2538         if (!is_ccs_modifier(modifier)) {
2539                 if (INTEL_GEN(dev_priv) >= 7)
2540                         return 256*1024;
2541                 else if (INTEL_GEN(dev_priv) >= 4)
2542                         return 128*1024;
2543         }
2544
2545         return intel_plane_fb_max_stride(dev_priv, pixel_format, modifier);
2546 }
2547
2548 static u32
2549 intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
2550 {
2551         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2552
2553         if (fb->modifier == DRM_FORMAT_MOD_LINEAR) {
2554                 u32 max_stride = intel_plane_fb_max_stride(dev_priv,
2555                                                            fb->format->format,
2556                                                            fb->modifier);
2557
2558                 /*
2559                  * To make remapping with linear generally feasible
2560                  * we need the stride to be page aligned.
2561                  */
2562                 if (fb->pitches[color_plane] > max_stride)
2563                         return intel_tile_size(dev_priv);
2564                 else
2565                         return 64;
2566         } else {
2567                 return intel_tile_width_bytes(fb, color_plane);
2568         }
2569 }
2570
2571 bool intel_plane_can_remap(const struct intel_plane_state *plane_state)
2572 {
2573         struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
2574         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
2575         const struct drm_framebuffer *fb = plane_state->base.fb;
2576         int i;
2577
2578         /* We don't want to deal with remapping with cursors */
2579         if (plane->id == PLANE_CURSOR)
2580                 return false;
2581
2582         /*
2583          * The display engine limits already match/exceed the
2584          * render engine limits, so not much point in remapping.
2585          * Would also need to deal with the fence POT alignment
2586          * and gen2 2KiB GTT tile size.
2587          */
2588         if (INTEL_GEN(dev_priv) < 4)
2589                 return false;
2590
2591         /*
2592          * The new CCS hash mode isn't compatible with remapping as
2593          * the virtual address of the pages affects the compressed data.
2594          */
2595         if (is_ccs_modifier(fb->modifier))
2596                 return false;
2597
2598         /* Linear needs a page aligned stride for remapping */
2599         if (fb->modifier == DRM_FORMAT_MOD_LINEAR) {
2600                 unsigned int alignment = intel_tile_size(dev_priv) - 1;
2601
2602                 for (i = 0; i < fb->format->num_planes; i++) {
2603                         if (fb->pitches[i] & alignment)
2604                                 return false;
2605                 }
2606         }
2607
2608         return true;
2609 }
2610
2611 static bool intel_plane_needs_remap(const struct intel_plane_state *plane_state)
2612 {
2613         struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
2614         const struct drm_framebuffer *fb = plane_state->base.fb;
2615         unsigned int rotation = plane_state->base.rotation;
2616         u32 stride, max_stride;
2617
2618         /*
2619          * No remapping for invisible planes since we don't have
2620          * an actual source viewport to remap.
2621          */
2622         if (!plane_state->base.visible)
2623                 return false;
2624
2625         if (!intel_plane_can_remap(plane_state))
2626                 return false;
2627
2628         /*
2629          * FIXME: aux plane limits on gen9+ are
2630          * unclear in Bspec, for now no checking.
2631          */
2632         stride = intel_fb_pitch(fb, 0, rotation);
2633         max_stride = plane->max_stride(plane, fb->format->format,
2634                                        fb->modifier, rotation);
2635
2636         return stride > max_stride;
2637 }
2638
2639 static int
2640 intel_fill_fb_info(struct drm_i915_private *dev_priv,
2641                    struct drm_framebuffer *fb)
2642 {
2643         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2644         struct intel_rotation_info *rot_info = &intel_fb->rot_info;
2645         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2646         u32 gtt_offset_rotated = 0;
2647         unsigned int max_size = 0;
2648         int i, num_planes = fb->format->num_planes;
2649         unsigned int tile_size = intel_tile_size(dev_priv);
2650
2651         for (i = 0; i < num_planes; i++) {
2652                 unsigned int width, height;
2653                 unsigned int cpp, size;
2654                 u32 offset;
2655                 int x, y;
2656                 int ret;
2657
2658                 cpp = fb->format->cpp[i];
2659                 width = drm_framebuffer_plane_width(fb->width, fb, i);
2660                 height = drm_framebuffer_plane_height(fb->height, fb, i);
2661
2662                 ret = intel_fb_offset_to_xy(&x, &y, fb, i);
2663                 if (ret) {
2664                         DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
2665                                       i, fb->offsets[i]);
2666                         return ret;
2667                 }
2668
2669                 if (is_ccs_modifier(fb->modifier) && i == 1) {
2670                         int hsub = fb->format->hsub;
2671                         int vsub = fb->format->vsub;
2672                         int tile_width, tile_height;
2673                         int main_x, main_y;
2674                         int ccs_x, ccs_y;
2675
2676                         intel_tile_dims(fb, i, &tile_width, &tile_height);
2677                         tile_width *= hsub;
2678                         tile_height *= vsub;
2679
2680                         ccs_x = (x * hsub) % tile_width;
2681                         ccs_y = (y * vsub) % tile_height;
2682                         main_x = intel_fb->normal[0].x % tile_width;
2683                         main_y = intel_fb->normal[0].y % tile_height;
2684
2685                         /*
2686                          * CCS doesn't have its own x/y offset register, so the intra CCS tile
2687                          * x/y offsets must match between CCS and the main surface.
2688                          */
2689                         if (main_x != ccs_x || main_y != ccs_y) {
2690                                 DRM_DEBUG_KMS("Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
2691                                               main_x, main_y,
2692                                               ccs_x, ccs_y,
2693                                               intel_fb->normal[0].x,
2694                                               intel_fb->normal[0].y,
2695                                               x, y);
2696                                 return -EINVAL;
2697                         }
2698                 }
2699
2700                 /*
2701                  * The fence (if used) is aligned to the start of the object
2702                  * so having the framebuffer wrap around across the edge of the
2703                  * fenced region doesn't really work. We have no API to configure
2704                  * the fence start offset within the object (nor could we probably
2705                  * on gen2/3). So it's just easier if we just require that the
2706                  * fb layout agrees with the fence layout. We already check that the
2707                  * fb stride matches the fence stride elsewhere.
2708                  */
2709                 if (i == 0 && i915_gem_object_is_tiled(obj) &&
2710                     (x + width) * cpp > fb->pitches[i]) {
2711                         DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
2712                                       i, fb->offsets[i]);
2713                         return -EINVAL;
2714                 }
2715
2716                 /*
2717                  * First pixel of the framebuffer from
2718                  * the start of the normal gtt mapping.
2719                  */
2720                 intel_fb->normal[i].x = x;
2721                 intel_fb->normal[i].y = y;
2722
2723                 offset = intel_compute_aligned_offset(dev_priv, &x, &y, fb, i,
2724                                                       fb->pitches[i],
2725                                                       DRM_MODE_ROTATE_0,
2726                                                       tile_size);
2727                 offset /= tile_size;
2728
2729                 if (!is_surface_linear(fb->modifier, i)) {
2730                         unsigned int tile_width, tile_height;
2731                         unsigned int pitch_tiles;
2732                         struct drm_rect r;
2733
2734                         intel_tile_dims(fb, i, &tile_width, &tile_height);
2735
2736                         rot_info->plane[i].offset = offset;
2737                         rot_info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], tile_width * cpp);
2738                         rot_info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
2739                         rot_info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
2740
2741                         intel_fb->rotated[i].pitch =
2742                                 rot_info->plane[i].height * tile_height;
2743
2744                         /* how many tiles does this plane need */
2745                         size = rot_info->plane[i].stride * rot_info->plane[i].height;
2746                         /*
2747                          * If the plane isn't horizontally tile aligned,
2748                          * we need one more tile.
2749                          */
2750                         if (x != 0)
2751                                 size++;
2752
2753                         /* rotate the x/y offsets to match the GTT view */
2754                         r.x1 = x;
2755                         r.y1 = y;
2756                         r.x2 = x + width;
2757                         r.y2 = y + height;
2758                         drm_rect_rotate(&r,
2759                                         rot_info->plane[i].width * tile_width,
2760                                         rot_info->plane[i].height * tile_height,
2761                                         DRM_MODE_ROTATE_270);
2762                         x = r.x1;
2763                         y = r.y1;
2764
2765                         /* rotate the tile dimensions to match the GTT view */
2766                         pitch_tiles = intel_fb->rotated[i].pitch / tile_height;
2767                         swap(tile_width, tile_height);
2768
2769                         /*
2770                          * We only keep the x/y offsets, so push all of the
2771                          * gtt offset into the x/y offsets.
2772                          */
2773                         intel_adjust_tile_offset(&x, &y,
2774                                                  tile_width, tile_height,
2775                                                  tile_size, pitch_tiles,
2776                                                  gtt_offset_rotated * tile_size, 0);
2777
2778                         gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height;
2779
2780                         /*
2781                          * First pixel of the framebuffer from
2782                          * the start of the rotated gtt mapping.
2783                          */
2784                         intel_fb->rotated[i].x = x;
2785                         intel_fb->rotated[i].y = y;
2786                 } else {
2787                         size = DIV_ROUND_UP((y + height) * fb->pitches[i] +
2788                                             x * cpp, tile_size);
2789                 }
2790
2791                 /* how many tiles in total needed in the bo */
2792                 max_size = max(max_size, offset + size);
2793         }
2794
2795         if (mul_u32_u32(max_size, tile_size) > obj->base.size) {
2796                 DRM_DEBUG_KMS("fb too big for bo (need %llu bytes, have %zu bytes)\n",
2797                               mul_u32_u32(max_size, tile_size), obj->base.size);
2798                 return -EINVAL;
2799         }
2800
2801         return 0;
2802 }
2803
2804 static void
2805 intel_plane_remap_gtt(struct intel_plane_state *plane_state)
2806 {
2807         struct drm_i915_private *dev_priv =
2808                 to_i915(plane_state->base.plane->dev);
2809         struct drm_framebuffer *fb = plane_state->base.fb;
2810         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2811         struct intel_rotation_info *info = &plane_state->view.rotated;
2812         unsigned int rotation = plane_state->base.rotation;
2813         int i, num_planes = fb->format->num_planes;
2814         unsigned int tile_size = intel_tile_size(dev_priv);
2815         unsigned int src_x, src_y;
2816         unsigned int src_w, src_h;
2817         u32 gtt_offset = 0;
2818
2819         memset(&plane_state->view, 0, sizeof(plane_state->view));
2820         plane_state->view.type = drm_rotation_90_or_270(rotation) ?
2821                 I915_GGTT_VIEW_ROTATED : I915_GGTT_VIEW_REMAPPED;
2822
2823         src_x = plane_state->base.src.x1 >> 16;
2824         src_y = plane_state->base.src.y1 >> 16;
2825         src_w = drm_rect_width(&plane_state->base.src) >> 16;
2826         src_h = drm_rect_height(&plane_state->base.src) >> 16;
2827
2828         WARN_ON(is_ccs_modifier(fb->modifier));
2829
2830         /* Make src coordinates relative to the viewport */
2831         drm_rect_translate(&plane_state->base.src,
2832                            -(src_x << 16), -(src_y << 16));
2833
2834         /* Rotate src coordinates to match rotated GTT view */
2835         if (drm_rotation_90_or_270(rotation))
2836                 drm_rect_rotate(&plane_state->base.src,
2837                                 src_w << 16, src_h << 16,
2838                                 DRM_MODE_ROTATE_270);
2839
2840         for (i = 0; i < num_planes; i++) {
2841                 unsigned int hsub = i ? fb->format->hsub : 1;
2842                 unsigned int vsub = i ? fb->format->vsub : 1;
2843                 unsigned int cpp = fb->format->cpp[i];
2844                 unsigned int tile_width, tile_height;
2845                 unsigned int width, height;
2846                 unsigned int pitch_tiles;
2847                 unsigned int x, y;
2848                 u32 offset;
2849
2850                 intel_tile_dims(fb, i, &tile_width, &tile_height);
2851
2852                 x = src_x / hsub;
2853                 y = src_y / vsub;
2854                 width = src_w / hsub;
2855                 height = src_h / vsub;
2856
2857                 /*
2858                  * First pixel of the src viewport from the
2859                  * start of the normal gtt mapping.
2860                  */
2861                 x += intel_fb->normal[i].x;
2862                 y += intel_fb->normal[i].y;
2863
2864                 offset = intel_compute_aligned_offset(dev_priv, &x, &y,
2865                                                       fb, i, fb->pitches[i],
2866                                                       DRM_MODE_ROTATE_0, tile_size);
2867                 offset /= tile_size;
2868
2869                 info->plane[i].offset = offset;
2870                 info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i],
2871                                                      tile_width * cpp);
2872                 info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
2873                 info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
2874
2875                 if (drm_rotation_90_or_270(rotation)) {
2876                         struct drm_rect r;
2877
2878                         /* rotate the x/y offsets to match the GTT view */
2879                         r.x1 = x;
2880                         r.y1 = y;
2881                         r.x2 = x + width;
2882                         r.y2 = y + height;
2883                         drm_rect_rotate(&r,
2884                                         info->plane[i].width * tile_width,
2885                                         info->plane[i].height * tile_height,
2886                                         DRM_MODE_ROTATE_270);
2887                         x = r.x1;
2888                         y = r.y1;
2889
2890                         pitch_tiles = info->plane[i].height;
2891                         plane_state->color_plane[i].stride = pitch_tiles * tile_height;
2892
2893                         /* rotate the tile dimensions to match the GTT view */
2894                         swap(tile_width, tile_height);
2895                 } else {
2896                         pitch_tiles = info->plane[i].width;
2897                         plane_state->color_plane[i].stride = pitch_tiles * tile_width * cpp;
2898                 }
2899
2900                 /*
2901                  * We only keep the x/y offsets, so push all of the
2902                  * gtt offset into the x/y offsets.
2903                  */
2904                 intel_adjust_tile_offset(&x, &y,
2905                                          tile_width, tile_height,
2906                                          tile_size, pitch_tiles,
2907                                          gtt_offset * tile_size, 0);
2908
2909                 gtt_offset += info->plane[i].width * info->plane[i].height;
2910
2911                 plane_state->color_plane[i].offset = 0;
2912                 plane_state->color_plane[i].x = x;
2913                 plane_state->color_plane[i].y = y;
2914         }
2915 }
2916
2917 static int
2918 intel_plane_compute_gtt(struct intel_plane_state *plane_state)
2919 {
2920         const struct intel_framebuffer *fb =
2921                 to_intel_framebuffer(plane_state->base.fb);
2922         unsigned int rotation = plane_state->base.rotation;
2923         int i, num_planes;
2924
2925         if (!fb)
2926                 return 0;
2927
2928         num_planes = fb->base.format->num_planes;
2929
2930         if (intel_plane_needs_remap(plane_state)) {
2931                 intel_plane_remap_gtt(plane_state);
2932
2933                 /*
2934                  * Sometimes even remapping can't overcome
2935                  * the stride limitations :( Can happen with
2936                  * big plane sizes and suitably misaligned
2937                  * offsets.
2938                  */
2939                 return intel_plane_check_stride(plane_state);
2940         }
2941
2942         intel_fill_fb_ggtt_view(&plane_state->view, &fb->base, rotation);
2943
2944         for (i = 0; i < num_planes; i++) {
2945                 plane_state->color_plane[i].stride = intel_fb_pitch(&fb->base, i, rotation);
2946                 plane_state->color_plane[i].offset = 0;
2947
2948                 if (drm_rotation_90_or_270(rotation)) {
2949                         plane_state->color_plane[i].x = fb->rotated[i].x;
2950                         plane_state->color_plane[i].y = fb->rotated[i].y;
2951                 } else {
2952                         plane_state->color_plane[i].x = fb->normal[i].x;
2953                         plane_state->color_plane[i].y = fb->normal[i].y;
2954                 }
2955         }
2956
2957         /* Rotate src coordinates to match rotated GTT view */
2958         if (drm_rotation_90_or_270(rotation))
2959                 drm_rect_rotate(&plane_state->base.src,
2960                                 fb->base.width << 16, fb->base.height << 16,
2961                                 DRM_MODE_ROTATE_270);
2962
2963         return intel_plane_check_stride(plane_state);
2964 }
2965
2966 static int i9xx_format_to_fourcc(int format)
2967 {
2968         switch (format) {
2969         case DISPPLANE_8BPP:
2970                 return DRM_FORMAT_C8;
2971         case DISPPLANE_BGRX555:
2972                 return DRM_FORMAT_XRGB1555;
2973         case DISPPLANE_BGRX565:
2974                 return DRM_FORMAT_RGB565;
2975         default:
2976         case DISPPLANE_BGRX888:
2977                 return DRM_FORMAT_XRGB8888;
2978         case DISPPLANE_RGBX888:
2979                 return DRM_FORMAT_XBGR8888;
2980         case DISPPLANE_BGRX101010:
2981                 return DRM_FORMAT_XRGB2101010;
2982         case DISPPLANE_RGBX101010:
2983                 return DRM_FORMAT_XBGR2101010;
2984         }
2985 }
2986
2987 int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
2988 {
2989         switch (format) {
2990         case PLANE_CTL_FORMAT_RGB_565:
2991                 return DRM_FORMAT_RGB565;
2992         case PLANE_CTL_FORMAT_NV12:
2993                 return DRM_FORMAT_NV12;
2994         case PLANE_CTL_FORMAT_P010:
2995                 return DRM_FORMAT_P010;
2996         case PLANE_CTL_FORMAT_P012:
2997                 return DRM_FORMAT_P012;
2998         case PLANE_CTL_FORMAT_P016:
2999                 return DRM_FORMAT_P016;
3000         case PLANE_CTL_FORMAT_Y210:
3001                 return DRM_FORMAT_Y210;
3002         case PLANE_CTL_FORMAT_Y212:
3003                 return DRM_FORMAT_Y212;
3004         case PLANE_CTL_FORMAT_Y216:
3005                 return DRM_FORMAT_Y216;
3006         case PLANE_CTL_FORMAT_Y410:
3007                 return DRM_FORMAT_XVYU2101010;
3008         case PLANE_CTL_FORMAT_Y412:
3009                 return DRM_FORMAT_XVYU12_16161616;
3010         case PLANE_CTL_FORMAT_Y416:
3011                 return DRM_FORMAT_XVYU16161616;
3012         default:
3013         case PLANE_CTL_FORMAT_XRGB_8888:
3014                 if (rgb_order) {
3015                         if (alpha)
3016                                 return DRM_FORMAT_ABGR8888;
3017                         else
3018                                 return DRM_FORMAT_XBGR8888;
3019                 } else {
3020                         if (alpha)
3021                                 return DRM_FORMAT_ARGB8888;
3022                         else
3023                                 return DRM_FORMAT_XRGB8888;
3024                 }
3025         case PLANE_CTL_FORMAT_XRGB_2101010:
3026                 if (rgb_order)
3027                         return DRM_FORMAT_XBGR2101010;
3028                 else
3029                         return DRM_FORMAT_XRGB2101010;
3030         case PLANE_CTL_FORMAT_XRGB_16161616F:
3031                 if (rgb_order) {
3032                         if (alpha)
3033                                 return DRM_FORMAT_ABGR16161616F;
3034                         else
3035                                 return DRM_FORMAT_XBGR16161616F;
3036                 } else {
3037                         if (alpha)
3038                                 return DRM_FORMAT_ARGB16161616F;
3039                         else
3040                                 return DRM_FORMAT_XRGB16161616F;
3041                 }
3042         }
3043 }
3044
3045 static bool
3046 intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
3047                               struct intel_initial_plane_config *plane_config)
3048 {
3049         struct drm_device *dev = crtc->base.dev;
3050         struct drm_i915_private *dev_priv = to_i915(dev);
3051         struct drm_i915_gem_object *obj = NULL;
3052         struct drm_mode_fb_cmd2 mode_cmd = { 0 };
3053         struct drm_framebuffer *fb = &plane_config->fb->base;
3054         u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
3055         u32 size_aligned = round_up(plane_config->base + plane_config->size,
3056                                     PAGE_SIZE);
3057
3058         size_aligned -= base_aligned;
3059
3060         if (plane_config->size == 0)
3061                 return false;
3062
3063         /* If the FB is too big, just don't use it since fbdev is not very
3064          * important and we should probably use that space with FBC or other
3065          * features. */
3066         if (size_aligned * 2 > dev_priv->stolen_usable_size)
3067                 return false;
3068
3069         switch (fb->modifier) {
3070         case DRM_FORMAT_MOD_LINEAR:
3071         case I915_FORMAT_MOD_X_TILED:
3072         case I915_FORMAT_MOD_Y_TILED:
3073                 break;
3074         default:
3075                 DRM_DEBUG_DRIVER("Unsupported modifier for initial FB: 0x%llx\n",
3076                                  fb->modifier);
3077                 return false;
3078         }
3079
3080         mutex_lock(&dev->struct_mutex);
3081         obj = i915_gem_object_create_stolen_for_preallocated(dev_priv,
3082                                                              base_aligned,
3083                                                              base_aligned,
3084                                                              size_aligned);
3085         mutex_unlock(&dev->struct_mutex);
3086         if (!obj)
3087                 return false;
3088
3089         switch (plane_config->tiling) {
3090         case I915_TILING_NONE:
3091                 break;
3092         case I915_TILING_X:
3093         case I915_TILING_Y:
3094                 obj->tiling_and_stride = fb->pitches[0] | plane_config->tiling;
3095                 break;
3096         default:
3097                 MISSING_CASE(plane_config->tiling);
3098                 return false;
3099         }
3100
3101         mode_cmd.pixel_format = fb->format->format;
3102         mode_cmd.width = fb->width;
3103         mode_cmd.height = fb->height;
3104         mode_cmd.pitches[0] = fb->pitches[0];
3105         mode_cmd.modifier[0] = fb->modifier;
3106         mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
3107
3108         if (intel_framebuffer_init(to_intel_framebuffer(fb), obj, &mode_cmd)) {
3109                 DRM_DEBUG_KMS("intel fb init failed\n");
3110                 goto out_unref_obj;
3111         }
3112
3113
3114         DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
3115         return true;
3116
3117 out_unref_obj:
3118         i915_gem_object_put(obj);
3119         return false;
3120 }
3121
3122 static void
3123 intel_set_plane_visible(struct intel_crtc_state *crtc_state,
3124                         struct intel_plane_state *plane_state,
3125                         bool visible)
3126 {
3127         struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
3128
3129         plane_state->base.visible = visible;
3130
3131         if (visible)
3132                 crtc_state->base.plane_mask |= drm_plane_mask(&plane->base);
3133         else
3134                 crtc_state->base.plane_mask &= ~drm_plane_mask(&plane->base);
3135 }
3136
3137 static void fixup_active_planes(struct intel_crtc_state *crtc_state)
3138 {
3139         struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
3140         struct drm_plane *plane;
3141
3142         /*
3143          * Active_planes aliases if multiple "primary" or cursor planes
3144          * have been used on the same (or wrong) pipe. plane_mask uses
3145          * unique ids, hence we can use that to reconstruct active_planes.
3146          */
3147         crtc_state->active_planes = 0;
3148
3149         drm_for_each_plane_mask(plane, &dev_priv->drm,
3150                                 crtc_state->base.plane_mask)
3151                 crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
3152 }
3153
3154 static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
3155                                          struct intel_plane *plane)
3156 {
3157         struct intel_crtc_state *crtc_state =
3158                 to_intel_crtc_state(crtc->base.state);
3159         struct intel_plane_state *plane_state =
3160                 to_intel_plane_state(plane->base.state);
3161
3162         DRM_DEBUG_KMS("Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
3163                       plane->base.base.id, plane->base.name,
3164                       crtc->base.base.id, crtc->base.name);
3165
3166         intel_set_plane_visible(crtc_state, plane_state, false);
3167         fixup_active_planes(crtc_state);
3168         crtc_state->data_rate[plane->id] = 0;
3169
3170         if (plane->id == PLANE_PRIMARY)
3171                 intel_pre_disable_primary_noatomic(&crtc->base);
3172
3173         intel_disable_plane(plane, crtc_state);
3174 }
3175
3176 static void
3177 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
3178                              struct intel_initial_plane_config *plane_config)
3179 {
3180         struct drm_device *dev = intel_crtc->base.dev;
3181         struct drm_i915_private *dev_priv = to_i915(dev);
3182         struct drm_crtc *c;
3183         struct drm_i915_gem_object *obj;
3184         struct drm_plane *primary = intel_crtc->base.primary;
3185         struct drm_plane_state *plane_state = primary->state;
3186         struct intel_plane *intel_plane = to_intel_plane(primary);
3187         struct intel_plane_state *intel_state =
3188                 to_intel_plane_state(plane_state);
3189         struct drm_framebuffer *fb;
3190
3191         if (!plane_config->fb)
3192                 return;
3193
3194         if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
3195                 fb = &plane_config->fb->base;
3196                 goto valid_fb;
3197         }
3198
3199         kfree(plane_config->fb);
3200
3201         /*
3202          * Failed to alloc the obj, check to see if we should share
3203          * an fb with another CRTC instead
3204          */
3205         for_each_crtc(dev, c) {
3206                 struct intel_plane_state *state;
3207
3208                 if (c == &intel_crtc->base)
3209                         continue;
3210
3211                 if (!to_intel_crtc(c)->active)
3212                         continue;
3213
3214                 state = to_intel_plane_state(c->primary->state);
3215                 if (!state->vma)
3216                         continue;
3217
3218                 if (intel_plane_ggtt_offset(state) == plane_config->base) {
3219                         fb = state->base.fb;
3220                         drm_framebuffer_get(fb);
3221                         goto valid_fb;
3222                 }
3223         }
3224
3225         /*
3226          * We've failed to reconstruct the BIOS FB.  Current display state
3227          * indicates that the primary plane is visible, but has a NULL FB,
3228          * which will lead to problems later if we don't fix it up.  The
3229          * simplest solution is to just disable the primary plane now and
3230          * pretend the BIOS never had it enabled.
3231          */
3232         intel_plane_disable_noatomic(intel_crtc, intel_plane);
3233
3234         return;
3235
3236 valid_fb:
3237         intel_state->base.rotation = plane_config->rotation;
3238         intel_fill_fb_ggtt_view(&intel_state->view, fb,
3239                                 intel_state->base.rotation);
3240         intel_state->color_plane[0].stride =
3241                 intel_fb_pitch(fb, 0, intel_state->base.rotation);
3242
3243         mutex_lock(&dev->struct_mutex);
3244         intel_state->vma =
3245                 intel_pin_and_fence_fb_obj(fb,
3246                                            &intel_state->view,
3247                                            intel_plane_uses_fence(intel_state),
3248                                            &intel_state->flags);
3249         mutex_unlock(&dev->struct_mutex);
3250         if (IS_ERR(intel_state->vma)) {
3251                 DRM_ERROR("failed to pin boot fb on pipe %d: %li\n",
3252                           intel_crtc->pipe, PTR_ERR(intel_state->vma));
3253
3254                 intel_state->vma = NULL;
3255                 drm_framebuffer_put(fb);
3256                 return;
3257         }
3258
3259         obj = intel_fb_obj(fb);
3260         intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
3261
3262         plane_state->src_x = 0;
3263         plane_state->src_y = 0;
3264         plane_state->src_w = fb->width << 16;
3265         plane_state->src_h = fb->height << 16;
3266
3267         plane_state->crtc_x = 0;
3268         plane_state->crtc_y = 0;
3269         plane_state->crtc_w = fb->width;
3270         plane_state->crtc_h = fb->height;
3271
3272         intel_state->base.src = drm_plane_state_src(plane_state);
3273         intel_state->base.dst = drm_plane_state_dest(plane_state);
3274
3275         if (i915_gem_object_is_tiled(obj))
3276                 dev_priv->preserve_bios_swizzle = true;
3277
3278         plane_state->fb = fb;
3279         plane_state->crtc = &intel_crtc->base;
3280
3281         atomic_or(to_intel_plane(primary)->frontbuffer_bit,
3282                   &obj->frontbuffer_bits);
3283 }
3284
3285 static int skl_max_plane_width(const struct drm_framebuffer *fb,
3286                                int color_plane,
3287                                unsigned int rotation)
3288 {
3289         int cpp = fb->format->cpp[color_plane];
3290
3291         switch (fb->modifier) {
3292         case DRM_FORMAT_MOD_LINEAR:
3293         case I915_FORMAT_MOD_X_TILED:
3294                 return 4096;
3295         case I915_FORMAT_MOD_Y_TILED_CCS:
3296         case I915_FORMAT_MOD_Yf_TILED_CCS:
3297                 /* FIXME AUX plane? */
3298         case I915_FORMAT_MOD_Y_TILED:
3299         case I915_FORMAT_MOD_Yf_TILED:
3300                 if (cpp == 8)
3301                         return 2048;
3302                 else
3303                         return 4096;
3304         default:
3305                 MISSING_CASE(fb->modifier);
3306                 return 2048;
3307         }
3308 }
3309
3310 static int glk_max_plane_width(const struct drm_framebuffer *fb,
3311                                int color_plane,
3312                                unsigned int rotation)
3313 {
3314         int cpp = fb->format->cpp[color_plane];
3315
3316         switch (fb->modifier) {
3317         case DRM_FORMAT_MOD_LINEAR:
3318         case I915_FORMAT_MOD_X_TILED:
3319                 if (cpp == 8)
3320                         return 4096;
3321                 else
3322                         return 5120;
3323         case I915_FORMAT_MOD_Y_TILED_CCS:
3324         case I915_FORMAT_MOD_Yf_TILED_CCS:
3325                 /* FIXME AUX plane? */
3326         case I915_FORMAT_MOD_Y_TILED:
3327         case I915_FORMAT_MOD_Yf_TILED:
3328                 if (cpp == 8)
3329                         return 2048;
3330                 else
3331                         return 5120;
3332         default:
3333                 MISSING_CASE(fb->modifier);
3334                 return 2048;
3335         }
3336 }
3337
3338 static int icl_max_plane_width(const struct drm_framebuffer *fb,
3339                                int color_plane,
3340                                unsigned int rotation)
3341 {
3342         return 5120;
3343 }
3344
3345 static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state,
3346                                            int main_x, int main_y, u32 main_offset)
3347 {
3348         const struct drm_framebuffer *fb = plane_state->base.fb;
3349         int hsub = fb->format->hsub;
3350         int vsub = fb->format->vsub;
3351         int aux_x = plane_state->color_plane[1].x;
3352         int aux_y = plane_state->color_plane[1].y;
3353         u32 aux_offset = plane_state->color_plane[1].offset;
3354         u32 alignment = intel_surf_alignment(fb, 1);
3355
3356         while (aux_offset >= main_offset && aux_y <= main_y) {
3357                 int x, y;
3358
3359                 if (aux_x == main_x && aux_y == main_y)
3360                         break;
3361
3362                 if (aux_offset == 0)
3363                         break;
3364
3365                 x = aux_x / hsub;
3366                 y = aux_y / vsub;
3367                 aux_offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 1,
3368                                                                aux_offset, aux_offset - alignment);
3369                 aux_x = x * hsub + aux_x % hsub;
3370                 aux_y = y * vsub + aux_y % vsub;
3371         }
3372
3373         if (aux_x != main_x || aux_y != main_y)
3374                 return false;
3375
3376         plane_state->color_plane[1].offset = aux_offset;
3377         plane_state->color_plane[1].x = aux_x;
3378         plane_state->color_plane[1].y = aux_y;
3379
3380         return true;
3381 }
3382
3383 static int skl_check_main_surface(struct intel_plane_state *plane_state)
3384 {
3385         struct drm_i915_private *dev_priv = to_i915(plane_state->base.plane->dev);
3386         const struct drm_framebuffer *fb = plane_state->base.fb;
3387         unsigned int rotation = plane_state->base.rotation;
3388         int x = plane_state->base.src.x1 >> 16;
3389         int y = plane_state->base.src.y1 >> 16;
3390         int w = drm_rect_width(&plane_state->base.src) >> 16;
3391         int h = drm_rect_height(&plane_state->base.src) >> 16;
3392         int max_width;
3393         int max_height = 4096;
3394         u32 alignment, offset, aux_offset = plane_state->color_plane[1].offset;
3395
3396         if (INTEL_GEN(dev_priv) >= 11)
3397                 max_width = icl_max_plane_width(fb, 0, rotation);
3398         else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
3399                 max_width = glk_max_plane_width(fb, 0, rotation);
3400         else
3401                 max_width = skl_max_plane_width(fb, 0, rotation);
3402
3403         if (w > max_width || h > max_height) {
3404                 DRM_DEBUG_KMS("requested Y/RGB source size %dx%d too big (limit %dx%d)\n",
3405                               w, h, max_width, max_height);
3406                 return -EINVAL;
3407         }
3408
3409         intel_add_fb_offsets(&x, &y, plane_state, 0);
3410         offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 0);
3411         alignment = intel_surf_alignment(fb, 0);
3412
3413         /*
3414          * AUX surface offset is specified as the distance from the
3415          * main surface offset, and it must be non-negative. Make
3416          * sure that is what we will get.
3417          */
3418         if (offset > aux_offset)
3419                 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3420                                                            offset, aux_offset & ~(alignment - 1));
3421
3422         /*
3423          * When using an X-tiled surface, the plane blows up
3424          * if the x offset + width exceed the stride.
3425          *
3426          * TODO: linear and Y-tiled seem fine, Yf untested,
3427          */
3428         if (fb->modifier == I915_FORMAT_MOD_X_TILED) {
3429                 int cpp = fb->format->cpp[0];
3430
3431                 while ((x + w) * cpp > plane_state->color_plane[0].stride) {
3432                         if (offset == 0) {
3433                                 DRM_DEBUG_KMS("Unable to find suitable display surface offset due to X-tiling\n");
3434                                 return -EINVAL;
3435                         }
3436
3437                         offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3438                                                                    offset, offset - alignment);
3439                 }
3440         }
3441
3442         /*
3443          * CCS AUX surface doesn't have its own x/y offsets, we must make sure
3444          * they match with the main surface x/y offsets.
3445          */
3446         if (is_ccs_modifier(fb->modifier)) {
3447                 while (!skl_check_main_ccs_coordinates(plane_state, x, y, offset)) {
3448                         if (offset == 0)
3449                                 break;
3450
3451                         offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3452                                                                    offset, offset - alignment);
3453                 }
3454
3455                 if (x != plane_state->color_plane[1].x || y != plane_state->color_plane[1].y) {
3456                         DRM_DEBUG_KMS("Unable to find suitable display surface offset due to CCS\n");
3457                         return -EINVAL;
3458                 }
3459         }
3460
3461         plane_state->color_plane[0].offset = offset;
3462         plane_state->color_plane[0].x = x;
3463         plane_state->color_plane[0].y = y;
3464
3465         /*
3466          * Put the final coordinates back so that the src
3467          * coordinate checks will see the right values.
3468          */
3469         drm_rect_translate(&plane_state->base.src,
3470                            (x << 16) - plane_state->base.src.x1,
3471                            (y << 16) - plane_state->base.src.y1);
3472
3473         return 0;
3474 }
3475
3476 static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
3477 {
3478         const struct drm_framebuffer *fb = plane_state->base.fb;
3479         unsigned int rotation = plane_state->base.rotation;
3480         int max_width = skl_max_plane_width(fb, 1, rotation);
3481         int max_height = 4096;
3482         int x = plane_state->base.src.x1 >> 17;
3483         int y = plane_state->base.src.y1 >> 17;
3484         int w = drm_rect_width(&plane_state->base.src) >> 17;
3485         int h = drm_rect_height(&plane_state->base.src) >> 17;
3486         u32 offset;
3487
3488         intel_add_fb_offsets(&x, &y, plane_state, 1);
3489         offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1);
3490
3491         /* FIXME not quite sure how/if these apply to the chroma plane */
3492         if (w > max_width || h > max_height) {
3493                 DRM_DEBUG_KMS("CbCr source size %dx%d too big (limit %dx%d)\n",
3494                               w, h, max_width, max_height);
3495                 return -EINVAL;
3496         }
3497
3498         plane_state->color_plane[1].offset = offset;
3499         plane_state->color_plane[1].x = x;
3500         plane_state->color_plane[1].y = y;
3501
3502         return 0;
3503 }
3504
3505 static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
3506 {
3507         const struct drm_framebuffer *fb = plane_state->base.fb;
3508         int src_x = plane_state->base.src.x1 >> 16;
3509         int src_y = plane_state->base.src.y1 >> 16;
3510         int hsub = fb->format->hsub;
3511         int vsub = fb->format->vsub;
3512         int x = src_x / hsub;
3513         int y = src_y / vsub;
3514         u32 offset;
3515
3516         intel_add_fb_offsets(&x, &y, plane_state, 1);
3517         offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1);
3518
3519         plane_state->color_plane[1].offset = offset;
3520         plane_state->color_plane[1].x = x * hsub + src_x % hsub;
3521         plane_state->color_plane[1].y = y * vsub + src_y % vsub;
3522
3523         return 0;
3524 }
3525
3526 int skl_check_plane_surface(struct intel_plane_state *plane_state)
3527 {
3528         const struct drm_framebuffer *fb = plane_state->base.fb;
3529         int ret;
3530
3531         ret = intel_plane_compute_gtt(plane_state);
3532         if (ret)
3533                 return ret;
3534
3535         if (!plane_state->base.visible)
3536                 return 0;
3537
3538         /*
3539          * Handle the AUX surface first since
3540          * the main surface setup depends on it.
3541          */
3542         if (is_planar_yuv_format(fb->format->format)) {
3543                 ret = skl_check_nv12_aux_surface(plane_state);
3544                 if (ret)
3545                         return ret;
3546         } else if (is_ccs_modifier(fb->modifier)) {
3547                 ret = skl_check_ccs_aux_surface(plane_state);
3548                 if (ret)
3549                         return ret;
3550         } else {
3551                 plane_state->color_plane[1].offset = ~0xfff;
3552                 plane_state->color_plane[1].x = 0;
3553                 plane_state->color_plane[1].y = 0;
3554         }
3555
3556         ret = skl_check_main_surface(plane_state);
3557         if (ret)
3558                 return ret;
3559
3560         return 0;
3561 }
3562
3563 unsigned int
3564 i9xx_plane_max_stride(struct intel_plane *plane,
3565                       u32 pixel_format, u64 modifier,
3566                       unsigned int rotation)
3567 {
3568         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3569
3570         if (!HAS_GMCH(dev_priv)) {
3571                 return 32*1024;
3572         } else if (INTEL_GEN(dev_priv) >= 4) {
3573                 if (modifier == I915_FORMAT_MOD_X_TILED)
3574                         return 16*1024;
3575                 else
3576                         return 32*1024;
3577         } else if (INTEL_GEN(dev_priv) >= 3) {
3578                 if (modifier == I915_FORMAT_MOD_X_TILED)
3579                         return 8*1024;
3580                 else
3581                         return 16*1024;
3582         } else {
3583                 if (plane->i9xx_plane == PLANE_C)
3584                         return 4*1024;
3585                 else
3586                         return 8*1024;
3587         }
3588 }
3589
3590 static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
3591 {
3592         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
3593         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3594         u32 dspcntr = 0;
3595
3596         if (crtc_state->gamma_enable)
3597                 dspcntr |= DISPPLANE_GAMMA_ENABLE;
3598
3599         if (crtc_state->csc_enable)
3600                 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
3601
3602         if (INTEL_GEN(dev_priv) < 5)
3603                 dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe);
3604
3605         return dspcntr;
3606 }
3607
3608 static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
3609                           const struct intel_plane_state *plane_state)
3610 {
3611         struct drm_i915_private *dev_priv =
3612                 to_i915(plane_state->base.plane->dev);
3613         const struct drm_framebuffer *fb = plane_state->base.fb;
3614         unsigned int rotation = plane_state->base.rotation;
3615         u32 dspcntr;
3616
3617         dspcntr = DISPLAY_PLANE_ENABLE;
3618
3619         if (IS_G4X(dev_priv) || IS_GEN(dev_priv, 5) ||
3620             IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
3621                 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
3622
3623         switch (fb->format->format) {
3624         case DRM_FORMAT_C8:
3625                 dspcntr |= DISPPLANE_8BPP;
3626                 break;
3627         case DRM_FORMAT_XRGB1555:
3628                 dspcntr |= DISPPLANE_BGRX555;
3629                 break;
3630         case DRM_FORMAT_RGB565:
3631                 dspcntr |= DISPPLANE_BGRX565;
3632                 break;
3633         case DRM_FORMAT_XRGB8888:
3634                 dspcntr |= DISPPLANE_BGRX888;
3635                 break;
3636         case DRM_FORMAT_XBGR8888:
3637                 dspcntr |= DISPPLANE_RGBX888;
3638                 break;
3639         case DRM_FORMAT_XRGB2101010:
3640                 dspcntr |= DISPPLANE_BGRX101010;
3641                 break;
3642         case DRM_FORMAT_XBGR2101010:
3643                 dspcntr |= DISPPLANE_RGBX101010;
3644                 break;
3645         default:
3646                 MISSING_CASE(fb->format->format);
3647                 return 0;
3648         }
3649
3650         if (INTEL_GEN(dev_priv) >= 4 &&
3651             fb->modifier == I915_FORMAT_MOD_X_TILED)
3652                 dspcntr |= DISPPLANE_TILED;
3653
3654         if (rotation & DRM_MODE_ROTATE_180)
3655                 dspcntr |= DISPPLANE_ROTATE_180;
3656
3657         if (rotation & DRM_MODE_REFLECT_X)
3658                 dspcntr |= DISPPLANE_MIRROR;
3659
3660         return dspcntr;
3661 }
3662
3663 int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
3664 {
3665         struct drm_i915_private *dev_priv =
3666                 to_i915(plane_state->base.plane->dev);
3667         int src_x, src_y;
3668         u32 offset;
3669         int ret;
3670
3671         ret = intel_plane_compute_gtt(plane_state);
3672         if (ret)
3673                 return ret;
3674
3675         if (!plane_state->base.visible)
3676                 return 0;
3677
3678         src_x = plane_state->base.src.x1 >> 16;
3679         src_y = plane_state->base.src.y1 >> 16;
3680
3681         intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
3682
3683         if (INTEL_GEN(dev_priv) >= 4)
3684                 offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
3685                                                             plane_state, 0);
3686         else
3687                 offset = 0;
3688
3689         /*
3690          * Put the final coordinates back so that the src
3691          * coordinate checks will see the right values.
3692          */
3693         drm_rect_translate(&plane_state->base.src,
3694                            (src_x << 16) - plane_state->base.src.x1,
3695                            (src_y << 16) - plane_state->base.src.y1);
3696
3697         /* HSW/BDW do this automagically in hardware */
3698         if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
3699                 unsigned int rotation = plane_state->base.rotation;
3700                 int src_w = drm_rect_width(&plane_state->base.src) >> 16;
3701                 int src_h = drm_rect_height(&plane_state->base.src) >> 16;
3702
3703                 if (rotation & DRM_MODE_ROTATE_180) {
3704                         src_x += src_w - 1;
3705                         src_y += src_h - 1;
3706                 } else if (rotation & DRM_MODE_REFLECT_X) {
3707                         src_x += src_w - 1;
3708                 }
3709         }
3710
3711         plane_state->color_plane[0].offset = offset;
3712         plane_state->color_plane[0].x = src_x;
3713         plane_state->color_plane[0].y = src_y;
3714
3715         return 0;
3716 }
3717
3718 static int
3719 i9xx_plane_check(struct intel_crtc_state *crtc_state,
3720                  struct intel_plane_state *plane_state)
3721 {
3722         int ret;
3723
3724         ret = chv_plane_check_rotation(plane_state);
3725         if (ret)
3726                 return ret;
3727
3728         ret = drm_atomic_helper_check_plane_state(&plane_state->base,
3729                                                   &crtc_state->base,
3730                                                   DRM_PLANE_HELPER_NO_SCALING,
3731                                                   DRM_PLANE_HELPER_NO_SCALING,
3732                                                   false, true);
3733         if (ret)
3734                 return ret;
3735
3736         ret = i9xx_check_plane_surface(plane_state);
3737         if (ret)
3738                 return ret;
3739
3740         if (!plane_state->base.visible)
3741                 return 0;
3742
3743         ret = intel_plane_check_src_coordinates(plane_state);
3744         if (ret)
3745                 return ret;
3746
3747         plane_state->ctl = i9xx_plane_ctl(crtc_state, plane_state);
3748
3749         return 0;
3750 }
3751
3752 static void i9xx_update_plane(struct intel_plane *plane,
3753                               const struct intel_crtc_state *crtc_state,
3754                               const struct intel_plane_state *plane_state)
3755 {
3756         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3757         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3758         u32 linear_offset;
3759         int x = plane_state->color_plane[0].x;
3760         int y = plane_state->color_plane[0].y;
3761         unsigned long irqflags;
3762         u32 dspaddr_offset;
3763         u32 dspcntr;
3764
3765         dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state);
3766
3767         linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
3768
3769         if (INTEL_GEN(dev_priv) >= 4)
3770                 dspaddr_offset = plane_state->color_plane[0].offset;
3771         else
3772                 dspaddr_offset = linear_offset;
3773
3774         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
3775
3776         I915_WRITE_FW(DSPSTRIDE(i9xx_plane), plane_state->color_plane[0].stride);
3777
3778         if (INTEL_GEN(dev_priv) < 4) {
3779                 /* pipesrc and dspsize control the size that is scaled from,
3780                  * which should always be the user's requested size.
3781                  */
3782                 I915_WRITE_FW(DSPPOS(i9xx_plane), 0);
3783                 I915_WRITE_FW(DSPSIZE(i9xx_plane),
3784                               ((crtc_state->pipe_src_h - 1) << 16) |
3785                               (crtc_state->pipe_src_w - 1));
3786         } else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
3787                 I915_WRITE_FW(PRIMPOS(i9xx_plane), 0);
3788                 I915_WRITE_FW(PRIMSIZE(i9xx_plane),
3789                               ((crtc_state->pipe_src_h - 1) << 16) |
3790                               (crtc_state->pipe_src_w - 1));
3791                 I915_WRITE_FW(PRIMCNSTALPHA(i9xx_plane), 0);
3792         }
3793
3794         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
3795                 I915_WRITE_FW(DSPOFFSET(i9xx_plane), (y << 16) | x);
3796         } else if (INTEL_GEN(dev_priv) >= 4) {
3797                 I915_WRITE_FW(DSPLINOFF(i9xx_plane), linear_offset);
3798                 I915_WRITE_FW(DSPTILEOFF(i9xx_plane), (y << 16) | x);
3799         }
3800
3801         /*
3802          * The control register self-arms if the plane was previously
3803          * disabled. Try to make the plane enable atomic by writing
3804          * the control register just before the surface register.
3805          */
3806         I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr);
3807         if (INTEL_GEN(dev_priv) >= 4)
3808                 I915_WRITE_FW(DSPSURF(i9xx_plane),
3809                               intel_plane_ggtt_offset(plane_state) +
3810                               dspaddr_offset);
3811         else
3812                 I915_WRITE_FW(DSPADDR(i9xx_plane),
3813                               intel_plane_ggtt_offset(plane_state) +
3814                               dspaddr_offset);
3815
3816         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
3817 }
3818
3819 static void i9xx_disable_plane(struct intel_plane *plane,
3820                                const struct intel_crtc_state *crtc_state)
3821 {
3822         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3823         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3824         unsigned long irqflags;
3825         u32 dspcntr;
3826
3827         /*
3828          * DSPCNTR pipe gamma enable on g4x+ and pipe csc
3829          * enable on ilk+ affect the pipe bottom color as
3830          * well, so we must configure them even if the plane
3831          * is disabled.
3832          *
3833          * On pre-g4x there is no way to gamma correct the
3834          * pipe bottom color but we'll keep on doing this
3835          * anyway so that the crtc state readout works correctly.
3836          */
3837         dspcntr = i9xx_plane_ctl_crtc(crtc_state);
3838
3839         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
3840
3841         I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr);
3842         if (INTEL_GEN(dev_priv) >= 4)
3843                 I915_WRITE_FW(DSPSURF(i9xx_plane), 0);
3844         else
3845                 I915_WRITE_FW(DSPADDR(i9xx_plane), 0);
3846
3847         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
3848 }
3849
3850 static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
3851                                     enum pipe *pipe)
3852 {
3853         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3854         enum intel_display_power_domain power_domain;
3855         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3856         intel_wakeref_t wakeref;
3857         bool ret;
3858         u32 val;
3859
3860         /*
3861          * Not 100% correct for planes that can move between pipes,
3862          * but that's only the case for gen2-4 which don't have any
3863          * display power wells.
3864          */
3865         power_domain = POWER_DOMAIN_PIPE(plane->pipe);
3866         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
3867         if (!wakeref)
3868                 return false;
3869
3870         val = I915_READ(DSPCNTR(i9xx_plane));
3871
3872         ret = val & DISPLAY_PLANE_ENABLE;
3873
3874         if (INTEL_GEN(dev_priv) >= 5)
3875                 *pipe = plane->pipe;
3876         else
3877                 *pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
3878                         DISPPLANE_SEL_PIPE_SHIFT;
3879
3880         intel_display_power_put(dev_priv, power_domain, wakeref);
3881
3882         return ret;
3883 }
3884
3885 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
3886 {
3887         struct drm_device *dev = intel_crtc->base.dev;
3888         struct drm_i915_private *dev_priv = to_i915(dev);
3889
3890         I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
3891         I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
3892         I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
3893 }
3894
3895 /*
3896  * This function detaches (aka. unbinds) unused scalers in hardware
3897  */
3898 static void skl_detach_scalers(const struct intel_crtc_state *crtc_state)
3899 {
3900         struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
3901         const struct intel_crtc_scaler_state *scaler_state =
3902                 &crtc_state->scaler_state;
3903         int i;
3904
3905         /* loop through and disable scalers that aren't in use */
3906         for (i = 0; i < intel_crtc->num_scalers; i++) {
3907                 if (!scaler_state->scalers[i].in_use)
3908                         skl_detach_scaler(intel_crtc, i);
3909         }
3910 }
3911
3912 static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb,
3913                                           int color_plane, unsigned int rotation)
3914 {
3915         /*
3916          * The stride is either expressed as a multiple of 64 bytes chunks for
3917          * linear buffers or in number of tiles for tiled buffers.
3918          */
3919         if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
3920                 return 64;
3921         else if (drm_rotation_90_or_270(rotation))
3922                 return intel_tile_height(fb, color_plane);
3923         else
3924                 return intel_tile_width_bytes(fb, color_plane);
3925 }
3926
3927 u32 skl_plane_stride(const struct intel_plane_state *plane_state,
3928                      int color_plane)
3929 {
3930         const struct drm_framebuffer *fb = plane_state->base.fb;
3931         unsigned int rotation = plane_state->base.rotation;
3932         u32 stride = plane_state->color_plane[color_plane].stride;
3933
3934         if (color_plane >= fb->format->num_planes)
3935                 return 0;
3936
3937         return stride / skl_plane_stride_mult(fb, color_plane, rotation);
3938 }
3939
3940 static u32 skl_plane_ctl_format(u32 pixel_format)
3941 {
3942         switch (pixel_format) {
3943         case DRM_FORMAT_C8:
3944                 return PLANE_CTL_FORMAT_INDEXED;
3945         case DRM_FORMAT_RGB565:
3946                 return PLANE_CTL_FORMAT_RGB_565;
3947         case DRM_FORMAT_XBGR8888:
3948         case DRM_FORMAT_ABGR8888:
3949                 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
3950         case DRM_FORMAT_XRGB8888:
3951         case DRM_FORMAT_ARGB8888:
3952                 return PLANE_CTL_FORMAT_XRGB_8888;
3953         case DRM_FORMAT_XRGB2101010:
3954                 return PLANE_CTL_FORMAT_XRGB_2101010;
3955         case DRM_FORMAT_XBGR2101010:
3956                 return PLANE_CTL_ORDER_RGBX | PLANE_CTL_FORMAT_XRGB_2101010;
3957         case DRM_FORMAT_XBGR16161616F:
3958         case DRM_FORMAT_ABGR16161616F:
3959                 return PLANE_CTL_FORMAT_XRGB_16161616F | PLANE_CTL_ORDER_RGBX;
3960         case DRM_FORMAT_XRGB16161616F:
3961         case DRM_FORMAT_ARGB16161616F:
3962                 return PLANE_CTL_FORMAT_XRGB_16161616F;
3963         case DRM_FORMAT_YUYV:
3964                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
3965         case DRM_FORMAT_YVYU:
3966                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
3967         case DRM_FORMAT_UYVY:
3968                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
3969         case DRM_FORMAT_VYUY:
3970                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
3971         case DRM_FORMAT_NV12:
3972                 return PLANE_CTL_FORMAT_NV12;
3973         case DRM_FORMAT_P010:
3974                 return PLANE_CTL_FORMAT_P010;
3975         case DRM_FORMAT_P012:
3976                 return PLANE_CTL_FORMAT_P012;
3977         case DRM_FORMAT_P016:
3978                 return PLANE_CTL_FORMAT_P016;
3979         case DRM_FORMAT_Y210:
3980                 return PLANE_CTL_FORMAT_Y210;
3981         case DRM_FORMAT_Y212:
3982                 return PLANE_CTL_FORMAT_Y212;
3983         case DRM_FORMAT_Y216:
3984                 return PLANE_CTL_FORMAT_Y216;
3985         case DRM_FORMAT_XVYU2101010:
3986                 return PLANE_CTL_FORMAT_Y410;
3987         case DRM_FORMAT_XVYU12_16161616:
3988                 return PLANE_CTL_FORMAT_Y412;
3989         case DRM_FORMAT_XVYU16161616:
3990                 return PLANE_CTL_FORMAT_Y416;
3991         default:
3992                 MISSING_CASE(pixel_format);
3993         }
3994
3995         return 0;
3996 }
3997
3998 static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state)
3999 {
4000         if (!plane_state->base.fb->format->has_alpha)
4001                 return PLANE_CTL_ALPHA_DISABLE;
4002
4003         switch (plane_state->base.pixel_blend_mode) {
4004         case DRM_MODE_BLEND_PIXEL_NONE:
4005                 return PLANE_CTL_ALPHA_DISABLE;
4006         case DRM_MODE_BLEND_PREMULTI:
4007                 return PLANE_CTL_ALPHA_SW_PREMULTIPLY;
4008         case DRM_MODE_BLEND_COVERAGE:
4009                 return PLANE_CTL_ALPHA_HW_PREMULTIPLY;
4010         default:
4011                 MISSING_CASE(plane_state->base.pixel_blend_mode);
4012                 return PLANE_CTL_ALPHA_DISABLE;
4013         }
4014 }
4015
4016 static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state)
4017 {
4018         if (!plane_state->base.fb->format->has_alpha)
4019                 return PLANE_COLOR_ALPHA_DISABLE;
4020
4021         switch (plane_state->base.pixel_blend_mode) {
4022         case DRM_MODE_BLEND_PIXEL_NONE:
4023                 return PLANE_COLOR_ALPHA_DISABLE;
4024         case DRM_MODE_BLEND_PREMULTI:
4025                 return PLANE_COLOR_ALPHA_SW_PREMULTIPLY;
4026         case DRM_MODE_BLEND_COVERAGE:
4027                 return PLANE_COLOR_ALPHA_HW_PREMULTIPLY;
4028         default:
4029                 MISSING_CASE(plane_state->base.pixel_blend_mode);
4030                 return PLANE_COLOR_ALPHA_DISABLE;
4031         }
4032 }
4033
4034 static u32 skl_plane_ctl_tiling(u64 fb_modifier)
4035 {
4036         switch (fb_modifier) {
4037         case DRM_FORMAT_MOD_LINEAR:
4038                 break;
4039         case I915_FORMAT_MOD_X_TILED:
4040                 return PLANE_CTL_TILED_X;
4041         case I915_FORMAT_MOD_Y_TILED:
4042                 return PLANE_CTL_TILED_Y;
4043         case I915_FORMAT_MOD_Y_TILED_CCS:
4044                 return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
4045         case I915_FORMAT_MOD_Yf_TILED:
4046                 return PLANE_CTL_TILED_YF;
4047         case I915_FORMAT_MOD_Yf_TILED_CCS:
4048                 return PLANE_CTL_TILED_YF | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
4049         default:
4050                 MISSING_CASE(fb_modifier);
4051         }
4052
4053         return 0;
4054 }
4055
4056 static u32 skl_plane_ctl_rotate(unsigned int rotate)
4057 {
4058         switch (rotate) {
4059         case DRM_MODE_ROTATE_0:
4060                 break;
4061         /*
4062          * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
4063          * while i915 HW rotation is clockwise, thats why this swapping.
4064          */
4065         case DRM_MODE_ROTATE_90:
4066                 return PLANE_CTL_ROTATE_270;
4067         case DRM_MODE_ROTATE_180:
4068                 return PLANE_CTL_ROTATE_180;
4069         case DRM_MODE_ROTATE_270:
4070                 return PLANE_CTL_ROTATE_90;
4071         default:
4072                 MISSING_CASE(rotate);
4073         }
4074
4075         return 0;
4076 }
4077
4078 static u32 cnl_plane_ctl_flip(unsigned int reflect)
4079 {
4080         switch (reflect) {
4081         case 0:
4082                 break;
4083         case DRM_MODE_REFLECT_X:
4084                 return PLANE_CTL_FLIP_HORIZONTAL;
4085         case DRM_MODE_REFLECT_Y:
4086         default:
4087                 MISSING_CASE(reflect);
4088         }
4089
4090         return 0;
4091 }
4092
4093 u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
4094 {
4095         struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
4096         u32 plane_ctl = 0;
4097
4098         if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
4099                 return plane_ctl;
4100
4101         if (crtc_state->gamma_enable)
4102                 plane_ctl |= PLANE_CTL_PIPE_GAMMA_ENABLE;
4103
4104         if (crtc_state->csc_enable)
4105                 plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE;
4106
4107         return plane_ctl;
4108 }
4109
4110 u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
4111                   const struct intel_plane_state *plane_state)
4112 {
4113         struct drm_i915_private *dev_priv =
4114                 to_i915(plane_state->base.plane->dev);
4115         const struct drm_framebuffer *fb = plane_state->base.fb;
4116         unsigned int rotation = plane_state->base.rotation;
4117         const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
4118         u32 plane_ctl;
4119
4120         plane_ctl = PLANE_CTL_ENABLE;
4121
4122         if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) {
4123                 plane_ctl |= skl_plane_ctl_alpha(plane_state);
4124                 plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
4125
4126                 if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
4127                         plane_ctl |= PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709;
4128
4129                 if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
4130                         plane_ctl |= PLANE_CTL_YUV_RANGE_CORRECTION_DISABLE;
4131         }
4132
4133         plane_ctl |= skl_plane_ctl_format(fb->format->format);
4134         plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
4135         plane_ctl |= skl_plane_ctl_rotate(rotation & DRM_MODE_ROTATE_MASK);
4136
4137         if (INTEL_GEN(dev_priv) >= 10)
4138                 plane_ctl |= cnl_plane_ctl_flip(rotation &
4139                                                 DRM_MODE_REFLECT_MASK);
4140
4141         if (key->flags & I915_SET_COLORKEY_DESTINATION)
4142                 plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
4143         else if (key->flags & I915_SET_COLORKEY_SOURCE)
4144                 plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
4145
4146         return plane_ctl;
4147 }
4148
4149 u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state)
4150 {
4151         struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
4152         u32 plane_color_ctl = 0;
4153
4154         if (INTEL_GEN(dev_priv) >= 11)
4155                 return plane_color_ctl;
4156
4157         if (crtc_state->gamma_enable)
4158                 plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE;
4159
4160         if (crtc_state->csc_enable)
4161                 plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
4162
4163         return plane_color_ctl;
4164 }
4165
4166 u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
4167                         const struct intel_plane_state *plane_state)
4168 {
4169         struct drm_i915_private *dev_priv =
4170                 to_i915(plane_state->base.plane->dev);
4171         const struct drm_framebuffer *fb = plane_state->base.fb;
4172         struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
4173         u32 plane_color_ctl = 0;
4174
4175         plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
4176         plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state);
4177
4178         if (fb->format->is_yuv && !icl_is_hdr_plane(dev_priv, plane->id)) {
4179                 if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
4180                         plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
4181                 else
4182                         plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV601_TO_RGB709;
4183
4184                 if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
4185                         plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
4186         } else if (fb->format->is_yuv) {
4187                 plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE;
4188         }
4189
4190         return plane_color_ctl;
4191 }
4192
4193 static int
4194 __intel_display_resume(struct drm_device *dev,
4195                        struct drm_atomic_state *state,
4196                        struct drm_modeset_acquire_ctx *ctx)
4197 {
4198         struct drm_crtc_state *crtc_state;
4199         struct drm_crtc *crtc;
4200         int i, ret;
4201
4202         intel_modeset_setup_hw_state(dev, ctx);
4203         i915_redisable_vga(to_i915(dev));
4204
4205         if (!state)
4206                 return 0;
4207
4208         /*
4209          * We've duplicated the state, pointers to the old state are invalid.
4210          *
4211          * Don't attempt to use the old state until we commit the duplicated state.
4212          */
4213         for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
4214                 /*
4215                  * Force recalculation even if we restore
4216                  * current state. With fast modeset this may not result
4217                  * in a modeset when the state is compatible.
4218                  */
4219                 crtc_state->mode_changed = true;
4220         }
4221
4222         /* ignore any reset values/BIOS leftovers in the WM registers */
4223         if (!HAS_GMCH(to_i915(dev)))
4224                 to_intel_atomic_state(state)->skip_intermediate_wm = true;
4225
4226         ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
4227
4228         WARN_ON(ret == -EDEADLK);
4229         return ret;
4230 }
4231
4232 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
4233 {
4234         return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
4235                 intel_has_gpu_reset(dev_priv));
4236 }
4237
4238 void intel_prepare_reset(struct drm_i915_private *dev_priv)
4239 {
4240         struct drm_device *dev = &dev_priv->drm;
4241         struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
4242         struct drm_atomic_state *state;
4243         int ret;
4244
4245         /* reset doesn't touch the display */
4246         if (!i915_modparams.force_reset_modeset_test &&
4247             !gpu_reset_clobbers_display(dev_priv))
4248                 return;
4249
4250         /* We have a modeset vs reset deadlock, defensively unbreak it. */
4251         set_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags);
4252         wake_up_all(&dev_priv->gpu_error.wait_queue);
4253
4254         if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
4255                 DRM_DEBUG_KMS("Modeset potentially stuck, unbreaking through wedging\n");
4256                 i915_gem_set_wedged(dev_priv);
4257         }
4258
4259         /*
4260          * Need mode_config.mutex so that we don't
4261          * trample ongoing ->detect() and whatnot.
4262          */
4263         mutex_lock(&dev->mode_config.mutex);
4264         drm_modeset_acquire_init(ctx, 0);
4265         while (1) {
4266                 ret = drm_modeset_lock_all_ctx(dev, ctx);
4267                 if (ret != -EDEADLK)
4268                         break;
4269
4270                 drm_modeset_backoff(ctx);
4271         }
4272         /*
4273          * Disabling the crtcs gracefully seems nicer. Also the
4274          * g33 docs say we should at least disable all the planes.
4275          */
4276         state = drm_atomic_helper_duplicate_state(dev, ctx);
4277         if (IS_ERR(state)) {
4278                 ret = PTR_ERR(state);
4279                 DRM_ERROR("Duplicating state failed with %i\n", ret);
4280                 return;
4281         }
4282
4283         ret = drm_atomic_helper_disable_all(dev, ctx);
4284         if (ret) {
4285                 DRM_ERROR("Suspending crtc's failed with %i\n", ret);
4286                 drm_atomic_state_put(state);
4287                 return;
4288         }
4289
4290         dev_priv->modeset_restore_state = state;
4291         state->acquire_ctx = ctx;
4292 }
4293
4294 void intel_finish_reset(struct drm_i915_private *dev_priv)
4295 {
4296         struct drm_device *dev = &dev_priv->drm;
4297         struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
4298         struct drm_atomic_state *state;
4299         int ret;
4300
4301         /* reset doesn't touch the display */
4302         if (!test_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags))
4303                 return;
4304
4305         state = fetch_and_zero(&dev_priv->modeset_restore_state);
4306         if (!state)
4307                 goto unlock;
4308
4309         /* reset doesn't touch the display */
4310         if (!gpu_reset_clobbers_display(dev_priv)) {
4311                 /* for testing only restore the display */
4312                 ret = __intel_display_resume(dev, state, ctx);
4313                 if (ret)
4314                         DRM_ERROR("Restoring old state failed with %i\n", ret);
4315         } else {
4316                 /*
4317                  * The display has been reset as well,
4318                  * so need a full re-initialization.
4319                  */
4320                 intel_pps_unlock_regs_wa(dev_priv);
4321                 intel_modeset_init_hw(dev);
4322                 intel_init_clock_gating(dev_priv);
4323
4324                 spin_lock_irq(&dev_priv->irq_lock);
4325                 if (dev_priv->display.hpd_irq_setup)
4326                         dev_priv->display.hpd_irq_setup(dev_priv);
4327                 spin_unlock_irq(&dev_priv->irq_lock);
4328
4329                 ret = __intel_display_resume(dev, state, ctx);
4330                 if (ret)
4331                         DRM_ERROR("Restoring old state failed with %i\n", ret);
4332
4333                 intel_hpd_init(dev_priv);
4334         }
4335
4336         drm_atomic_state_put(state);
4337 unlock:
4338         drm_modeset_drop_locks(ctx);
4339         drm_modeset_acquire_fini(ctx);
4340         mutex_unlock(&dev->mode_config.mutex);
4341
4342         clear_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags);
4343 }
4344
4345 static void icl_set_pipe_chicken(struct intel_crtc *crtc)
4346 {
4347         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4348         enum pipe pipe = crtc->pipe;
4349         u32 tmp;
4350
4351         tmp = I915_READ(PIPE_CHICKEN(pipe));
4352
4353         /*
4354          * Display WA #1153: icl
4355          * enable hardware to bypass the alpha math
4356          * and rounding for per-pixel values 00 and 0xff
4357          */
4358         tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
4359         /*
4360          * Display WA # 1605353570: icl
4361          * Set the pixel rounding bit to 1 for allowing
4362          * passthrough of Frame buffer pixels unmodified
4363          * across pipe
4364          */
4365         tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
4366         I915_WRITE(PIPE_CHICKEN(pipe), tmp);
4367 }
4368
4369 static void intel_update_pipe_config(const struct intel_crtc_state *old_crtc_state,
4370                                      const struct intel_crtc_state *new_crtc_state)
4371 {
4372         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
4373         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4374
4375         /* drm_atomic_helper_update_legacy_modeset_state might not be called. */
4376         crtc->base.mode = new_crtc_state->base.mode;
4377
4378         /*
4379          * Update pipe size and adjust fitter if needed: the reason for this is
4380          * that in compute_mode_changes we check the native mode (not the pfit
4381          * mode) to see if we can flip rather than do a full mode set. In the
4382          * fastboot case, we'll flip, but if we don't update the pipesrc and
4383          * pfit state, we'll end up with a big fb scanned out into the wrong
4384          * sized surface.
4385          */
4386
4387         I915_WRITE(PIPESRC(crtc->pipe),
4388                    ((new_crtc_state->pipe_src_w - 1) << 16) |
4389                    (new_crtc_state->pipe_src_h - 1));
4390
4391         /* on skylake this is done by detaching scalers */
4392         if (INTEL_GEN(dev_priv) >= 9) {
4393                 skl_detach_scalers(new_crtc_state);
4394
4395                 if (new_crtc_state->pch_pfit.enabled)
4396                         skylake_pfit_enable(new_crtc_state);
4397         } else if (HAS_PCH_SPLIT(dev_priv)) {
4398                 if (new_crtc_state->pch_pfit.enabled)
4399                         ironlake_pfit_enable(new_crtc_state);
4400                 else if (old_crtc_state->pch_pfit.enabled)
4401                         ironlake_pfit_disable(old_crtc_state);
4402         }
4403
4404         if (INTEL_GEN(dev_priv) >= 11)
4405                 icl_set_pipe_chicken(crtc);
4406 }
4407
4408 static void intel_fdi_normal_train(struct intel_crtc *crtc)
4409 {
4410         struct drm_device *dev = crtc->base.dev;
4411         struct drm_i915_private *dev_priv = to_i915(dev);
4412         int pipe = crtc->pipe;
4413         i915_reg_t reg;
4414         u32 temp;
4415
4416         /* enable normal train */
4417         reg = FDI_TX_CTL(pipe);
4418         temp = I915_READ(reg);
4419         if (IS_IVYBRIDGE(dev_priv)) {
4420                 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
4421                 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
4422         } else {
4423                 temp &= ~FDI_LINK_TRAIN_NONE;
4424                 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
4425         }
4426         I915_WRITE(reg, temp);
4427
4428         reg = FDI_RX_CTL(pipe);
4429         temp = I915_READ(reg);
4430         if (HAS_PCH_CPT(dev_priv)) {
4431                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4432                 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
4433         } else {
4434                 temp &= ~FDI_LINK_TRAIN_NONE;
4435                 temp |= FDI_LINK_TRAIN_NONE;
4436         }
4437         I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
4438
4439         /* wait one idle pattern time */
4440         POSTING_READ(reg);
4441         udelay(1000);
4442
4443         /* IVB wants error correction enabled */
4444         if (IS_IVYBRIDGE(dev_priv))
4445                 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
4446                            FDI_FE_ERRC_ENABLE);
4447 }
4448
4449 /* The FDI link training functions for ILK/Ibexpeak. */
4450 static void ironlake_fdi_link_train(struct intel_crtc *crtc,
4451                                     const struct intel_crtc_state *crtc_state)
4452 {
4453         struct drm_device *dev = crtc->base.dev;
4454         struct drm_i915_private *dev_priv = to_i915(dev);
4455         int pipe = crtc->pipe;
4456         i915_reg_t reg;
4457         u32 temp, tries;
4458
4459         /* FDI needs bits from pipe first */
4460         assert_pipe_enabled(dev_priv, pipe);
4461
4462         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4463            for train result */
4464         reg = FDI_RX_IMR(pipe);
4465         temp = I915_READ(reg);
4466         temp &= ~FDI_RX_SYMBOL_LOCK;
4467         temp &= ~FDI_RX_BIT_LOCK;
4468         I915_WRITE(reg, temp);
4469         I915_READ(reg);
4470         udelay(150);
4471
4472         /* enable CPU FDI TX and PCH FDI RX */
4473         reg = FDI_TX_CTL(pipe);
4474         temp = I915_READ(reg);
4475         temp &= ~FDI_DP_PORT_WIDTH_MASK;
4476         temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4477         temp &= ~FDI_LINK_TRAIN_NONE;
4478         temp |= FDI_LINK_TRAIN_PATTERN_1;
4479         I915_WRITE(reg, temp | FDI_TX_ENABLE);
4480
4481         reg = FDI_RX_CTL(pipe);
4482         temp = I915_READ(reg);
4483         temp &= ~FDI_LINK_TRAIN_NONE;
4484         temp |= FDI_LINK_TRAIN_PATTERN_1;
4485         I915_WRITE(reg, temp | FDI_RX_ENABLE);
4486
4487         POSTING_READ(reg);
4488         udelay(150);
4489
4490         /* Ironlake workaround, enable clock pointer after FDI enable*/
4491         I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
4492         I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
4493                    FDI_RX_PHASE_SYNC_POINTER_EN);
4494
4495         reg = FDI_RX_IIR(pipe);
4496         for (tries = 0; tries < 5; tries++) {
4497                 temp = I915_READ(reg);
4498                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4499
4500                 if ((temp & FDI_RX_BIT_LOCK)) {
4501                         DRM_DEBUG_KMS("FDI train 1 done.\n");
4502                         I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4503                         break;
4504                 }
4505         }
4506         if (tries == 5)
4507                 DRM_ERROR("FDI train 1 fail!\n");
4508
4509         /* Train 2 */
4510         reg = FDI_TX_CTL(pipe);
4511         temp = I915_READ(reg);
4512         temp &= ~FDI_LINK_TRAIN_NONE;
4513         temp |= FDI_LINK_TRAIN_PATTERN_2;
4514         I915_WRITE(reg, temp);
4515
4516         reg = FDI_RX_CTL(pipe);
4517         temp = I915_READ(reg);
4518         temp &= ~FDI_LINK_TRAIN_NONE;
4519         temp |= FDI_LINK_TRAIN_PATTERN_2;
4520         I915_WRITE(reg, temp);
4521
4522         POSTING_READ(reg);
4523         udelay(150);
4524
4525         reg = FDI_RX_IIR(pipe);
4526         for (tries = 0; tries < 5; tries++) {
4527                 temp = I915_READ(reg);
4528                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4529
4530                 if (temp & FDI_RX_SYMBOL_LOCK) {
4531                         I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4532                         DRM_DEBUG_KMS("FDI train 2 done.\n");
4533                         break;
4534                 }
4535         }
4536         if (tries == 5)
4537                 DRM_ERROR("FDI train 2 fail!\n");
4538
4539         DRM_DEBUG_KMS("FDI train done\n");
4540
4541 }
4542
4543 static const int snb_b_fdi_train_param[] = {
4544         FDI_LINK_TRAIN_400MV_0DB_SNB_B,
4545         FDI_LINK_TRAIN_400MV_6DB_SNB_B,
4546         FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
4547         FDI_LINK_TRAIN_800MV_0DB_SNB_B,
4548 };
4549
4550 /* The FDI link training functions for SNB/Cougarpoint. */
4551 static void gen6_fdi_link_train(struct intel_crtc *crtc,
4552                                 const struct intel_crtc_state *crtc_state)
4553 {
4554         struct drm_device *dev = crtc->base.dev;
4555         struct drm_i915_private *dev_priv = to_i915(dev);
4556         int pipe = crtc->pipe;
4557         i915_reg_t reg;
4558         u32 temp, i, retry;
4559
4560         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4561            for train result */
4562         reg = FDI_RX_IMR(pipe);
4563         temp = I915_READ(reg);
4564         temp &= ~FDI_RX_SYMBOL_LOCK;
4565         temp &= ~FDI_RX_BIT_LOCK;
4566         I915_WRITE(reg, temp);
4567
4568         POSTING_READ(reg);
4569         udelay(150);
4570
4571         /* enable CPU FDI TX and PCH FDI RX */
4572         reg = FDI_TX_CTL(pipe);
4573         temp = I915_READ(reg);
4574         temp &= ~FDI_DP_PORT_WIDTH_MASK;
4575         temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4576         temp &= ~FDI_LINK_TRAIN_NONE;
4577         temp |= FDI_LINK_TRAIN_PATTERN_1;
4578         temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4579         /* SNB-B */
4580         temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
4581         I915_WRITE(reg, temp | FDI_TX_ENABLE);
4582
4583         I915_WRITE(FDI_RX_MISC(pipe),
4584                    FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
4585
4586         reg = FDI_RX_CTL(pipe);
4587         temp = I915_READ(reg);
4588         if (HAS_PCH_CPT(dev_priv)) {
4589                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4590                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4591         } else {
4592                 temp &= ~FDI_LINK_TRAIN_NONE;
4593                 temp |= FDI_LINK_TRAIN_PATTERN_1;
4594         }
4595         I915_WRITE(reg, temp | FDI_RX_ENABLE);
4596
4597         POSTING_READ(reg);
4598         udelay(150);
4599
4600         for (i = 0; i < 4; i++) {
4601                 reg = FDI_TX_CTL(pipe);
4602                 temp = I915_READ(reg);
4603                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4604                 temp |= snb_b_fdi_train_param[i];
4605                 I915_WRITE(reg, temp);
4606
4607                 POSTING_READ(reg);
4608                 udelay(500);
4609
4610                 for (retry = 0; retry < 5; retry++) {
4611                         reg = FDI_RX_IIR(pipe);
4612                         temp = I915_READ(reg);
4613                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4614                         if (temp & FDI_RX_BIT_LOCK) {
4615                                 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4616                                 DRM_DEBUG_KMS("FDI train 1 done.\n");
4617                                 break;
4618                         }
4619                         udelay(50);
4620                 }
4621                 if (retry < 5)
4622                         break;
4623         }
4624         if (i == 4)
4625                 DRM_ERROR("FDI train 1 fail!\n");
4626
4627         /* Train 2 */
4628         reg = FDI_TX_CTL(pipe);
4629         temp = I915_READ(reg);
4630         temp &= ~FDI_LINK_TRAIN_NONE;
4631         temp |= FDI_LINK_TRAIN_PATTERN_2;
4632         if (IS_GEN(dev_priv, 6)) {
4633                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4634                 /* SNB-B */
4635                 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
4636         }
4637         I915_WRITE(reg, temp);
4638
4639         reg = FDI_RX_CTL(pipe);
4640         temp = I915_READ(reg);
4641         if (HAS_PCH_CPT(dev_priv)) {
4642                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4643                 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
4644         } else {
4645                 temp &= ~FDI_LINK_TRAIN_NONE;
4646                 temp |= FDI_LINK_TRAIN_PATTERN_2;
4647         }
4648         I915_WRITE(reg, temp);
4649
4650         POSTING_READ(reg);
4651         udelay(150);
4652
4653         for (i = 0; i < 4; i++) {
4654                 reg = FDI_TX_CTL(pipe);
4655                 temp = I915_READ(reg);
4656                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4657                 temp |= snb_b_fdi_train_param[i];
4658                 I915_WRITE(reg, temp);
4659
4660                 POSTING_READ(reg);
4661                 udelay(500);
4662
4663                 for (retry = 0; retry < 5; retry++) {
4664                         reg = FDI_RX_IIR(pipe);
4665                         temp = I915_READ(reg);
4666                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4667                         if (temp & FDI_RX_SYMBOL_LOCK) {
4668                                 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4669                                 DRM_DEBUG_KMS("FDI train 2 done.\n");
4670                                 break;
4671                         }
4672                         udelay(50);
4673                 }
4674                 if (retry < 5)
4675                         break;
4676         }
4677         if (i == 4)
4678                 DRM_ERROR("FDI train 2 fail!\n");
4679
4680         DRM_DEBUG_KMS("FDI train done.\n");
4681 }
4682
4683 /* Manual link training for Ivy Bridge A0 parts */
4684 static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
4685                                       const struct intel_crtc_state *crtc_state)
4686 {
4687         struct drm_device *dev = crtc->base.dev;
4688         struct drm_i915_private *dev_priv = to_i915(dev);
4689         int pipe = crtc->pipe;
4690         i915_reg_t reg;
4691         u32 temp, i, j;
4692
4693         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4694            for train result */
4695         reg = FDI_RX_IMR(pipe);
4696         temp = I915_READ(reg);
4697         temp &= ~FDI_RX_SYMBOL_LOCK;
4698         temp &= ~FDI_RX_BIT_LOCK;
4699         I915_WRITE(reg, temp);
4700
4701         POSTING_READ(reg);
4702         udelay(150);
4703
4704         DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
4705                       I915_READ(FDI_RX_IIR(pipe)));
4706
4707         /* Try each vswing and preemphasis setting twice before moving on */
4708         for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
4709                 /* disable first in case we need to retry */
4710                 reg = FDI_TX_CTL(pipe);
4711                 temp = I915_READ(reg);
4712                 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
4713                 temp &= ~FDI_TX_ENABLE;
4714                 I915_WRITE(reg, temp);
4715
4716                 reg = FDI_RX_CTL(pipe);
4717                 temp = I915_READ(reg);
4718                 temp &= ~FDI_LINK_TRAIN_AUTO;
4719                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4720                 temp &= ~FDI_RX_ENABLE;
4721                 I915_WRITE(reg, temp);
4722
4723                 /* enable CPU FDI TX and PCH FDI RX */
4724                 reg = FDI_TX_CTL(pipe);
4725                 temp = I915_READ(reg);
4726                 temp &= ~FDI_DP_PORT_WIDTH_MASK;
4727                 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4728                 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
4729                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4730                 temp |= snb_b_fdi_train_param[j/2];
4731                 temp |= FDI_COMPOSITE_SYNC;
4732                 I915_WRITE(reg, temp | FDI_TX_ENABLE);
4733
4734                 I915_WRITE(FDI_RX_MISC(pipe),
4735                            FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
4736
4737                 reg = FDI_RX_CTL(pipe);
4738                 temp = I915_READ(reg);
4739                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4740                 temp |= FDI_COMPOSITE_SYNC;
4741                 I915_WRITE(reg, temp | FDI_RX_ENABLE);
4742
4743                 POSTING_READ(reg);
4744                 udelay(1); /* should be 0.5us */
4745
4746                 for (i = 0; i < 4; i++) {
4747                         reg = FDI_RX_IIR(pipe);
4748                         temp = I915_READ(reg);
4749                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4750
4751                         if (temp & FDI_RX_BIT_LOCK ||
4752                             (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
4753                                 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4754                                 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
4755                                               i);
4756                                 break;
4757                         }
4758                         udelay(1); /* should be 0.5us */
4759                 }
4760                 if (i == 4) {
4761                         DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
4762                         continue;
4763                 }
4764
4765                 /* Train 2 */
4766                 reg = FDI_TX_CTL(pipe);
4767                 temp = I915_READ(reg);
4768                 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
4769                 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
4770                 I915_WRITE(reg, temp);
4771
4772                 reg = FDI_RX_CTL(pipe);
4773                 temp = I915_READ(reg);
4774                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4775                 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
4776                 I915_WRITE(reg, temp);
4777
4778                 POSTING_READ(reg);
4779                 udelay(2); /* should be 1.5us */
4780
4781                 for (i = 0; i < 4; i++) {
4782                         reg = FDI_RX_IIR(pipe);
4783                         temp = I915_READ(reg);
4784                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4785
4786                         if (temp & FDI_RX_SYMBOL_LOCK ||
4787                             (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
4788                                 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4789                                 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
4790                                               i);
4791                                 goto train_done;
4792                         }
4793                         udelay(2); /* should be 1.5us */
4794                 }
4795                 if (i == 4)
4796                         DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
4797         }
4798
4799 train_done:
4800         DRM_DEBUG_KMS("FDI train done.\n");
4801 }
4802
4803 static void ironlake_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
4804 {
4805         struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
4806         struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
4807         int pipe = intel_crtc->pipe;
4808         i915_reg_t reg;
4809         u32 temp;
4810
4811         /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
4812         reg = FDI_RX_CTL(pipe);
4813         temp = I915_READ(reg);
4814         temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
4815         temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4816         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
4817         I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
4818
4819         POSTING_READ(reg);
4820         udelay(200);
4821
4822         /* Switch from Rawclk to PCDclk */
4823         temp = I915_READ(reg);
4824         I915_WRITE(reg, temp | FDI_PCDCLK);
4825
4826         POSTING_READ(reg);
4827         udelay(200);
4828
4829         /* Enable CPU FDI TX PLL, always on for Ironlake */
4830         reg = FDI_TX_CTL(pipe);
4831         temp = I915_READ(reg);
4832         if ((temp & FDI_TX_PLL_ENABLE) == 0) {
4833                 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
4834
4835                 POSTING_READ(reg);
4836                 udelay(100);
4837         }
4838 }
4839
4840 static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
4841 {
4842         struct drm_device *dev = intel_crtc->base.dev;
4843         struct drm_i915_private *dev_priv = to_i915(dev);
4844         int pipe = intel_crtc->pipe;
4845         i915_reg_t reg;
4846         u32 temp;
4847
4848         /* Switch from PCDclk to Rawclk */
4849         reg = FDI_RX_CTL(pipe);
4850         temp = I915_READ(reg);
4851         I915_WRITE(reg, temp & ~FDI_PCDCLK);
4852
4853         /* Disable CPU FDI TX PLL */
4854         reg = FDI_TX_CTL(pipe);
4855         temp = I915_READ(reg);
4856         I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
4857
4858         POSTING_READ(reg);
4859         udelay(100);
4860
4861         reg = FDI_RX_CTL(pipe);
4862         temp = I915_READ(reg);
4863         I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
4864
4865         /* Wait for the clocks to turn off. */
4866         POSTING_READ(reg);
4867         udelay(100);
4868 }
4869
4870 static void ironlake_fdi_disable(struct drm_crtc *crtc)
4871 {
4872         struct drm_device *dev = crtc->dev;
4873         struct drm_i915_private *dev_priv = to_i915(dev);
4874         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4875         int pipe = intel_crtc->pipe;
4876         i915_reg_t reg;
4877         u32 temp;
4878
4879         /* disable CPU FDI tx and PCH FDI rx */
4880         reg = FDI_TX_CTL(pipe);
4881         temp = I915_READ(reg);
4882         I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
4883         POSTING_READ(reg);
4884
4885         reg = FDI_RX_CTL(pipe);
4886         temp = I915_READ(reg);
4887         temp &= ~(0x7 << 16);
4888         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
4889         I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
4890
4891         POSTING_READ(reg);
4892         udelay(100);
4893
4894         /* Ironlake workaround, disable clock pointer after downing FDI */
4895         if (HAS_PCH_IBX(dev_priv))
4896                 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
4897
4898         /* still set train pattern 1 */
4899         reg = FDI_TX_CTL(pipe);
4900         temp = I915_READ(reg);
4901         temp &= ~FDI_LINK_TRAIN_NONE;
4902         temp |= FDI_LINK_TRAIN_PATTERN_1;
4903         I915_WRITE(reg, temp);
4904
4905         reg = FDI_RX_CTL(pipe);
4906         temp = I915_READ(reg);
4907         if (HAS_PCH_CPT(dev_priv)) {
4908                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4909                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4910         } else {
4911                 temp &= ~FDI_LINK_TRAIN_NONE;
4912                 temp |= FDI_LINK_TRAIN_PATTERN_1;
4913         }
4914         /* BPC in FDI rx is consistent with that in PIPECONF */
4915         temp &= ~(0x07 << 16);
4916         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
4917         I915_WRITE(reg, temp);
4918
4919         POSTING_READ(reg);
4920         udelay(100);
4921 }
4922
4923 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
4924 {
4925         struct drm_crtc *crtc;
4926         bool cleanup_done;
4927
4928         drm_for_each_crtc(crtc, &dev_priv->drm) {
4929                 struct drm_crtc_commit *commit;
4930                 spin_lock(&crtc->commit_lock);
4931                 commit = list_first_entry_or_null(&crtc->commit_list,
4932                                                   struct drm_crtc_commit, commit_entry);
4933                 cleanup_done = commit ?
4934                         try_wait_for_completion(&commit->cleanup_done) : true;
4935                 spin_unlock(&crtc->commit_lock);
4936
4937                 if (cleanup_done)
4938                         continue;
4939
4940                 drm_crtc_wait_one_vblank(crtc);
4941
4942                 return true;
4943         }
4944
4945         return false;
4946 }
4947
4948 void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
4949 {
4950         u32 temp;
4951
4952         I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
4953
4954         mutex_lock(&dev_priv->sb_lock);
4955
4956         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
4957         temp |= SBI_SSCCTL_DISABLE;
4958         intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
4959
4960         mutex_unlock(&dev_priv->sb_lock);
4961 }
4962
4963 /* Program iCLKIP clock to the desired frequency */
4964 static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
4965 {
4966         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
4967         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4968         int clock = crtc_state->base.adjusted_mode.crtc_clock;
4969         u32 divsel, phaseinc, auxdiv, phasedir = 0;
4970         u32 temp;
4971
4972         lpt_disable_iclkip(dev_priv);
4973
4974         /* The iCLK virtual clock root frequency is in MHz,
4975          * but the adjusted_mode->crtc_clock in in KHz. To get the
4976          * divisors, it is necessary to divide one by another, so we
4977          * convert the virtual clock precision to KHz here for higher
4978          * precision.
4979          */
4980         for (auxdiv = 0; auxdiv < 2; auxdiv++) {
4981                 u32 iclk_virtual_root_freq = 172800 * 1000;
4982                 u32 iclk_pi_range = 64;
4983                 u32 desired_divisor;
4984
4985                 desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
4986                                                     clock << auxdiv);
4987                 divsel = (desired_divisor / iclk_pi_range) - 2;
4988                 phaseinc = desired_divisor % iclk_pi_range;
4989
4990                 /*
4991                  * Near 20MHz is a corner case which is
4992                  * out of range for the 7-bit divisor
4993                  */
4994                 if (divsel <= 0x7f)
4995                         break;
4996         }
4997
4998         /* This should not happen with any sane values */
4999         WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
5000                 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
5001         WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
5002                 ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
5003
5004         DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
5005                         clock,
5006                         auxdiv,
5007                         divsel,
5008                         phasedir,
5009                         phaseinc);
5010
5011         mutex_lock(&dev_priv->sb_lock);
5012
5013         /* Program SSCDIVINTPHASE6 */
5014         temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
5015         temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
5016         temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
5017         temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
5018         temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
5019         temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
5020         temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
5021         intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
5022
5023         /* Program SSCAUXDIV */
5024         temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
5025         temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
5026         temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
5027         intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
5028
5029         /* Enable modulator and associated divider */
5030         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
5031         temp &= ~SBI_SSCCTL_DISABLE;
5032         intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
5033
5034         mutex_unlock(&dev_priv->sb_lock);
5035
5036         /* Wait for initialization time */
5037         udelay(24);
5038
5039         I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
5040 }
5041
5042 int lpt_get_iclkip(struct drm_i915_private *dev_priv)
5043 {
5044         u32 divsel, phaseinc, auxdiv;
5045         u32 iclk_virtual_root_freq = 172800 * 1000;
5046         u32 iclk_pi_range = 64;
5047         u32 desired_divisor;
5048         u32 temp;
5049
5050         if ((I915_READ(PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
5051                 return 0;
5052
5053         mutex_lock(&dev_priv->sb_lock);
5054
5055         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
5056         if (temp & SBI_SSCCTL_DISABLE) {
5057                 mutex_unlock(&dev_priv->sb_lock);
5058                 return 0;
5059         }
5060
5061         temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
5062         divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
5063                 SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
5064         phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
5065                 SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
5066
5067         temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
5068         auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
5069                 SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
5070
5071         mutex_unlock(&dev_priv->sb_lock);
5072
5073         desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
5074
5075         return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
5076                                  desired_divisor << auxdiv);
5077 }
5078
5079 static void ironlake_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
5080                                                 enum pipe pch_transcoder)
5081 {
5082         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5083         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5084         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
5085
5086         I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
5087                    I915_READ(HTOTAL(cpu_transcoder)));
5088         I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
5089                    I915_READ(HBLANK(cpu_transcoder)));
5090         I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
5091                    I915_READ(HSYNC(cpu_transcoder)));
5092
5093         I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
5094                    I915_READ(VTOTAL(cpu_transcoder)));
5095         I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
5096                    I915_READ(VBLANK(cpu_transcoder)));
5097         I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
5098                    I915_READ(VSYNC(cpu_transcoder)));
5099         I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
5100                    I915_READ(VSYNCSHIFT(cpu_transcoder)));
5101 }
5102
5103 static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
5104 {
5105         u32 temp;
5106
5107         temp = I915_READ(SOUTH_CHICKEN1);
5108         if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
5109                 return;
5110
5111         WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
5112         WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
5113
5114         temp &= ~FDI_BC_BIFURCATION_SELECT;
5115         if (enable)
5116                 temp |= FDI_BC_BIFURCATION_SELECT;
5117
5118         DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis");
5119         I915_WRITE(SOUTH_CHICKEN1, temp);
5120         POSTING_READ(SOUTH_CHICKEN1);
5121 }
5122
5123 static void ivybridge_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
5124 {
5125         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5126         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5127
5128         switch (crtc->pipe) {
5129         case PIPE_A:
5130                 break;
5131         case PIPE_B:
5132                 if (crtc_state->fdi_lanes > 2)
5133                         cpt_set_fdi_bc_bifurcation(dev_priv, false);
5134                 else
5135                         cpt_set_fdi_bc_bifurcation(dev_priv, true);
5136
5137                 break;
5138         case PIPE_C:
5139                 cpt_set_fdi_bc_bifurcation(dev_priv, true);
5140
5141                 break;
5142         default:
5143                 BUG();
5144         }
5145 }
5146
5147 /*
5148  * Finds the encoder associated with the given CRTC. This can only be
5149  * used when we know that the CRTC isn't feeding multiple encoders!
5150  */
5151 static struct intel_encoder *
5152 intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
5153                            const struct intel_crtc_state *crtc_state)
5154 {
5155         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5156         const struct drm_connector_state *connector_state;
5157         const struct drm_connector *connector;
5158         struct intel_encoder *encoder = NULL;
5159         int num_encoders = 0;
5160         int i;
5161
5162         for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
5163                 if (connector_state->crtc != &crtc->base)
5164                         continue;
5165
5166                 encoder = to_intel_encoder(connector_state->best_encoder);
5167                 num_encoders++;
5168         }
5169
5170         WARN(num_encoders != 1, "%d encoders for pipe %c\n",
5171              num_encoders, pipe_name(crtc->pipe));
5172
5173         return encoder;
5174 }
5175
5176 /*
5177  * Enable PCH resources required for PCH ports:
5178  *   - PCH PLLs
5179  *   - FDI training & RX/TX
5180  *   - update transcoder timings
5181  *   - DP transcoding bits
5182  *   - transcoder
5183  */
5184 static void ironlake_pch_enable(const struct intel_atomic_state *state,
5185                                 const struct intel_crtc_state *crtc_state)
5186 {
5187         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5188         struct drm_device *dev = crtc->base.dev;
5189         struct drm_i915_private *dev_priv = to_i915(dev);
5190         int pipe = crtc->pipe;
5191         u32 temp;
5192
5193         assert_pch_transcoder_disabled(dev_priv, pipe);
5194
5195         if (IS_IVYBRIDGE(dev_priv))
5196                 ivybridge_update_fdi_bc_bifurcation(crtc_state);
5197
5198         /* Write the TU size bits before fdi link training, so that error
5199          * detection works. */
5200         I915_WRITE(FDI_RX_TUSIZE1(pipe),
5201                    I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
5202
5203         /* For PCH output, training FDI link */
5204         dev_priv->display.fdi_link_train(crtc, crtc_state);
5205
5206         /* We need to program the right clock selection before writing the pixel
5207          * mutliplier into the DPLL. */
5208         if (HAS_PCH_CPT(dev_priv)) {
5209                 u32 sel;
5210
5211                 temp = I915_READ(PCH_DPLL_SEL);
5212                 temp |= TRANS_DPLL_ENABLE(pipe);
5213                 sel = TRANS_DPLLB_SEL(pipe);
5214                 if (crtc_state->shared_dpll ==
5215                     intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
5216                         temp |= sel;
5217                 else
5218                         temp &= ~sel;
5219                 I915_WRITE(PCH_DPLL_SEL, temp);
5220         }
5221
5222         /* XXX: pch pll's can be enabled any time before we enable the PCH
5223          * transcoder, and we actually should do this to not upset any PCH
5224          * transcoder that already use the clock when we share it.
5225          *
5226          * Note that enable_shared_dpll tries to do the right thing, but
5227          * get_shared_dpll unconditionally resets the pll - we need that to have
5228          * the right LVDS enable sequence. */
5229         intel_enable_shared_dpll(crtc_state);
5230
5231         /* set transcoder timing, panel must allow it */
5232         assert_panel_unlocked(dev_priv, pipe);
5233         ironlake_pch_transcoder_set_timings(crtc_state, pipe);
5234
5235         intel_fdi_normal_train(crtc);
5236
5237         /* For PCH DP, enable TRANS_DP_CTL */
5238         if (HAS_PCH_CPT(dev_priv) &&
5239             intel_crtc_has_dp_encoder(crtc_state)) {
5240                 const struct drm_display_mode *adjusted_mode =
5241                         &crtc_state->base.adjusted_mode;
5242                 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
5243                 i915_reg_t reg = TRANS_DP_CTL(pipe);
5244                 enum port port;
5245
5246                 temp = I915_READ(reg);
5247                 temp &= ~(TRANS_DP_PORT_SEL_MASK |
5248                           TRANS_DP_SYNC_MASK |
5249                           TRANS_DP_BPC_MASK);
5250                 temp |= TRANS_DP_OUTPUT_ENABLE;
5251                 temp |= bpc << 9; /* same format but at 11:9 */
5252
5253                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
5254                         temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
5255                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
5256                         temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
5257
5258                 port = intel_get_crtc_new_encoder(state, crtc_state)->port;
5259                 WARN_ON(port < PORT_B || port > PORT_D);
5260                 temp |= TRANS_DP_PORT_SEL(port);
5261
5262                 I915_WRITE(reg, temp);
5263         }
5264
5265         ironlake_enable_pch_transcoder(crtc_state);
5266 }
5267
5268 static void lpt_pch_enable(const struct intel_atomic_state *state,
5269                            const struct intel_crtc_state *crtc_state)
5270 {
5271         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5272         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5273         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
5274
5275         assert_pch_transcoder_disabled(dev_priv, PIPE_A);
5276
5277         lpt_program_iclkip(crtc_state);
5278
5279         /* Set transcoder timing. */
5280         ironlake_pch_transcoder_set_timings(crtc_state, PIPE_A);
5281
5282         lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
5283 }
5284
5285 static void cpt_verify_modeset(struct drm_device *dev, int pipe)
5286 {
5287         struct drm_i915_private *dev_priv = to_i915(dev);
5288         i915_reg_t dslreg = PIPEDSL(pipe);
5289         u32 temp;
5290
5291         temp = I915_READ(dslreg);
5292         udelay(500);
5293         if (wait_for(I915_READ(dslreg) != temp, 5)) {
5294                 if (wait_for(I915_READ(dslreg) != temp, 5))
5295                         DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
5296         }
5297 }
5298
5299 /*
5300  * The hardware phase 0.0 refers to the center of the pixel.
5301  * We want to start from the top/left edge which is phase
5302  * -0.5. That matches how the hardware calculates the scaling
5303  * factors (from top-left of the first pixel to bottom-right
5304  * of the last pixel, as opposed to the pixel centers).
5305  *
5306  * For 4:2:0 subsampled chroma planes we obviously have to
5307  * adjust that so that the chroma sample position lands in
5308  * the right spot.
5309  *
5310  * Note that for packed YCbCr 4:2:2 formats there is no way to
5311  * control chroma siting. The hardware simply replicates the
5312  * chroma samples for both of the luma samples, and thus we don't
5313  * actually get the expected MPEG2 chroma siting convention :(
5314  * The same behaviour is observed on pre-SKL platforms as well.
5315  *
5316  * Theory behind the formula (note that we ignore sub-pixel
5317  * source coordinates):
5318  * s = source sample position
5319  * d = destination sample position
5320  *
5321  * Downscaling 4:1:
5322  * -0.5
5323  * | 0.0
5324  * | |     1.5 (initial phase)
5325  * | |     |
5326  * v v     v
5327  * | s | s | s | s |
5328  * |       d       |
5329  *
5330  * Upscaling 1:4:
5331  * -0.5
5332  * | -0.375 (initial phase)
5333  * | |     0.0
5334  * | |     |
5335  * v v     v
5336  * |       s       |
5337  * | d | d | d | d |
5338  */
5339 u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited)
5340 {
5341         int phase = -0x8000;
5342         u16 trip = 0;
5343
5344         if (chroma_cosited)
5345                 phase += (sub - 1) * 0x8000 / sub;
5346
5347         phase += scale / (2 * sub);
5348
5349         /*
5350          * Hardware initial phase limited to [-0.5:1.5].
5351          * Since the max hardware scale factor is 3.0, we
5352          * should never actually excdeed 1.0 here.
5353          */
5354         WARN_ON(phase < -0x8000 || phase > 0x18000);
5355
5356         if (phase < 0)
5357                 phase = 0x10000 + phase;
5358         else
5359                 trip = PS_PHASE_TRIP;
5360
5361         return ((phase >> 2) & PS_PHASE_MASK) | trip;
5362 }
5363
5364 #define SKL_MIN_SRC_W 8
5365 #define SKL_MAX_SRC_W 4096
5366 #define SKL_MIN_SRC_H 8
5367 #define SKL_MAX_SRC_H 4096
5368 #define SKL_MIN_DST_W 8
5369 #define SKL_MAX_DST_W 4096
5370 #define SKL_MIN_DST_H 8
5371 #define SKL_MAX_DST_H 4096
5372 #define ICL_MAX_SRC_W 5120
5373 #define ICL_MAX_SRC_H 4096
5374 #define ICL_MAX_DST_W 5120
5375 #define ICL_MAX_DST_H 4096
5376 #define SKL_MIN_YUV_420_SRC_W 16
5377 #define SKL_MIN_YUV_420_SRC_H 16
5378
5379 static int
5380 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
5381                   unsigned int scaler_user, int *scaler_id,
5382                   int src_w, int src_h, int dst_w, int dst_h,
5383                   const struct drm_format_info *format, bool need_scaler)
5384 {
5385         struct intel_crtc_scaler_state *scaler_state =
5386                 &crtc_state->scaler_state;
5387         struct intel_crtc *intel_crtc =
5388                 to_intel_crtc(crtc_state->base.crtc);
5389         struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
5390         const struct drm_display_mode *adjusted_mode =
5391                 &crtc_state->base.adjusted_mode;
5392
5393         /*
5394          * Src coordinates are already rotated by 270 degrees for
5395          * the 90/270 degree plane rotation cases (to match the
5396          * GTT mapping), hence no need to account for rotation here.
5397          */
5398         if (src_w != dst_w || src_h != dst_h)
5399                 need_scaler = true;
5400
5401         /*
5402          * Scaling/fitting not supported in IF-ID mode in GEN9+
5403          * TODO: Interlace fetch mode doesn't support YUV420 planar formats.
5404          * Once NV12 is enabled, handle it here while allocating scaler
5405          * for NV12.
5406          */
5407         if (INTEL_GEN(dev_priv) >= 9 && crtc_state->base.enable &&
5408             need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
5409                 DRM_DEBUG_KMS("Pipe/Plane scaling not supported with IF-ID mode\n");
5410                 return -EINVAL;
5411         }
5412
5413         /*
5414          * if plane is being disabled or scaler is no more required or force detach
5415          *  - free scaler binded to this plane/crtc
5416          *  - in order to do this, update crtc->scaler_usage
5417          *
5418          * Here scaler state in crtc_state is set free so that
5419          * scaler can be assigned to other user. Actual register
5420          * update to free the scaler is done in plane/panel-fit programming.
5421          * For this purpose crtc/plane_state->scaler_id isn't reset here.
5422          */
5423         if (force_detach || !need_scaler) {
5424                 if (*scaler_id >= 0) {
5425                         scaler_state->scaler_users &= ~(1 << scaler_user);
5426                         scaler_state->scalers[*scaler_id].in_use = 0;
5427
5428                         DRM_DEBUG_KMS("scaler_user index %u.%u: "
5429                                 "Staged freeing scaler id %d scaler_users = 0x%x\n",
5430                                 intel_crtc->pipe, scaler_user, *scaler_id,
5431                                 scaler_state->scaler_users);
5432                         *scaler_id = -1;
5433                 }
5434                 return 0;
5435         }
5436
5437         if (format && is_planar_yuv_format(format->format) &&
5438             (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
5439                 DRM_DEBUG_KMS("Planar YUV: src dimensions not met\n");
5440                 return -EINVAL;
5441         }
5442
5443         /* range checks */
5444         if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
5445             dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
5446             (INTEL_GEN(dev_priv) >= 11 &&
5447              (src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H ||
5448               dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) ||
5449             (INTEL_GEN(dev_priv) < 11 &&
5450              (src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
5451               dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H))) {
5452                 DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
5453                         "size is out of scaler range\n",
5454                         intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h);
5455                 return -EINVAL;
5456         }
5457
5458         /* mark this plane as a scaler user in crtc_state */
5459         scaler_state->scaler_users |= (1 << scaler_user);
5460         DRM_DEBUG_KMS("scaler_user index %u.%u: "
5461                 "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
5462                 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
5463                 scaler_state->scaler_users);
5464
5465         return 0;
5466 }
5467
5468 /**
5469  * skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
5470  *
5471  * @state: crtc's scaler state
5472  *
5473  * Return
5474  *     0 - scaler_usage updated successfully
5475  *    error - requested scaling cannot be supported or other error condition
5476  */
5477 int skl_update_scaler_crtc(struct intel_crtc_state *state)
5478 {
5479         const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
5480         bool need_scaler = false;
5481
5482         if (state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
5483                 need_scaler = true;
5484
5485         return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
5486                                  &state->scaler_state.scaler_id,
5487                                  state->pipe_src_w, state->pipe_src_h,
5488                                  adjusted_mode->crtc_hdisplay,
5489                                  adjusted_mode->crtc_vdisplay, NULL, need_scaler);
5490 }
5491
5492 /**
5493  * skl_update_scaler_plane - Stages update to scaler state for a given plane.
5494  * @crtc_state: crtc's scaler state
5495  * @plane_state: atomic plane state to update
5496  *
5497  * Return
5498  *     0 - scaler_usage updated successfully
5499  *    error - requested scaling cannot be supported or other error condition
5500  */
5501 static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
5502                                    struct intel_plane_state *plane_state)
5503 {
5504         struct intel_plane *intel_plane =
5505                 to_intel_plane(plane_state->base.plane);
5506         struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
5507         struct drm_framebuffer *fb = plane_state->base.fb;
5508         int ret;
5509         bool force_detach = !fb || !plane_state->base.visible;
5510         bool need_scaler = false;
5511
5512         /* Pre-gen11 and SDR planes always need a scaler for planar formats. */
5513         if (!icl_is_hdr_plane(dev_priv, intel_plane->id) &&
5514             fb && is_planar_yuv_format(fb->format->format))
5515                 need_scaler = true;
5516
5517         ret = skl_update_scaler(crtc_state, force_detach,
5518                                 drm_plane_index(&intel_plane->base),
5519                                 &plane_state->scaler_id,
5520                                 drm_rect_width(&plane_state->base.src) >> 16,
5521                                 drm_rect_height(&plane_state->base.src) >> 16,
5522                                 drm_rect_width(&plane_state->base.dst),
5523                                 drm_rect_height(&plane_state->base.dst),
5524                                 fb ? fb->format : NULL, need_scaler);
5525
5526         if (ret || plane_state->scaler_id < 0)
5527                 return ret;
5528
5529         /* check colorkey */
5530         if (plane_state->ckey.flags) {
5531                 DRM_DEBUG_KMS("[PLANE:%d:%s] scaling with color key not allowed",
5532                               intel_plane->base.base.id,
5533                               intel_plane->base.name);
5534                 return -EINVAL;
5535         }
5536
5537         /* Check src format */
5538         switch (fb->format->format) {
5539         case DRM_FORMAT_RGB565:
5540         case DRM_FORMAT_XBGR8888:
5541         case DRM_FORMAT_XRGB8888:
5542         case DRM_FORMAT_ABGR8888:
5543         case DRM_FORMAT_ARGB8888:
5544         case DRM_FORMAT_XRGB2101010:
5545         case DRM_FORMAT_XBGR2101010:
5546         case DRM_FORMAT_XBGR16161616F:
5547         case DRM_FORMAT_ABGR16161616F:
5548         case DRM_FORMAT_XRGB16161616F:
5549         case DRM_FORMAT_ARGB16161616F:
5550         case DRM_FORMAT_YUYV:
5551         case DRM_FORMAT_YVYU:
5552         case DRM_FORMAT_UYVY:
5553         case DRM_FORMAT_VYUY:
5554         case DRM_FORMAT_NV12:
5555         case DRM_FORMAT_P010:
5556         case DRM_FORMAT_P012:
5557         case DRM_FORMAT_P016:
5558         case DRM_FORMAT_Y210:
5559         case DRM_FORMAT_Y212:
5560         case DRM_FORMAT_Y216:
5561         case DRM_FORMAT_XVYU2101010:
5562         case DRM_FORMAT_XVYU12_16161616:
5563         case DRM_FORMAT_XVYU16161616:
5564                 break;
5565         default:
5566                 DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
5567                               intel_plane->base.base.id, intel_plane->base.name,
5568                               fb->base.id, fb->format->format);
5569                 return -EINVAL;
5570         }
5571
5572         return 0;
5573 }
5574
5575 static void skylake_scaler_disable(struct intel_crtc *crtc)
5576 {
5577         int i;
5578
5579         for (i = 0; i < crtc->num_scalers; i++)
5580                 skl_detach_scaler(crtc, i);
5581 }
5582
5583 static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state)
5584 {
5585         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5586         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5587         enum pipe pipe = crtc->pipe;
5588         const struct intel_crtc_scaler_state *scaler_state =
5589                 &crtc_state->scaler_state;
5590
5591         if (crtc_state->pch_pfit.enabled) {
5592                 u16 uv_rgb_hphase, uv_rgb_vphase;
5593                 int pfit_w, pfit_h, hscale, vscale;
5594                 int id;
5595
5596                 if (WARN_ON(crtc_state->scaler_state.scaler_id < 0))
5597                         return;
5598
5599                 pfit_w = (crtc_state->pch_pfit.size >> 16) & 0xFFFF;
5600                 pfit_h = crtc_state->pch_pfit.size & 0xFFFF;
5601
5602                 hscale = (crtc_state->pipe_src_w << 16) / pfit_w;
5603                 vscale = (crtc_state->pipe_src_h << 16) / pfit_h;
5604
5605                 uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
5606                 uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
5607
5608                 id = scaler_state->scaler_id;
5609                 I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
5610                         PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
5611                 I915_WRITE_FW(SKL_PS_VPHASE(pipe, id),
5612                               PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
5613                 I915_WRITE_FW(SKL_PS_HPHASE(pipe, id),
5614                               PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
5615                 I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc_state->pch_pfit.pos);
5616                 I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc_state->pch_pfit.size);
5617         }
5618 }
5619
5620 static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state)
5621 {
5622         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5623         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5624         int pipe = crtc->pipe;
5625
5626         if (crtc_state->pch_pfit.enabled) {
5627                 /* Force use of hard-coded filter coefficients
5628                  * as some pre-programmed values are broken,
5629                  * e.g. x201.
5630                  */
5631                 if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
5632                         I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
5633                                                  PF_PIPE_SEL_IVB(pipe));
5634                 else
5635                         I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
5636                 I915_WRITE(PF_WIN_POS(pipe), crtc_state->pch_pfit.pos);
5637                 I915_WRITE(PF_WIN_SZ(pipe), crtc_state->pch_pfit.size);
5638         }
5639 }
5640
5641 void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
5642 {
5643         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5644         struct drm_device *dev = crtc->base.dev;
5645         struct drm_i915_private *dev_priv = to_i915(dev);
5646
5647         if (!crtc_state->ips_enabled)
5648                 return;
5649
5650         /*
5651          * We can only enable IPS after we enable a plane and wait for a vblank
5652          * This function is called from post_plane_update, which is run after
5653          * a vblank wait.
5654          */
5655         WARN_ON(!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
5656
5657         if (IS_BROADWELL(dev_priv)) {
5658                 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
5659                                                 IPS_ENABLE | IPS_PCODE_CONTROL));
5660                 /* Quoting Art Runyan: "its not safe to expect any particular
5661                  * value in IPS_CTL bit 31 after enabling IPS through the
5662                  * mailbox." Moreover, the mailbox may return a bogus state,
5663                  * so we need to just enable it and continue on.
5664                  */
5665         } else {
5666                 I915_WRITE(IPS_CTL, IPS_ENABLE);
5667                 /* The bit only becomes 1 in the next vblank, so this wait here
5668                  * is essentially intel_wait_for_vblank. If we don't have this
5669                  * and don't wait for vblanks until the end of crtc_enable, then
5670                  * the HW state readout code will complain that the expected
5671                  * IPS_CTL value is not the one we read. */
5672                 if (intel_wait_for_register(&dev_priv->uncore,
5673                                             IPS_CTL, IPS_ENABLE, IPS_ENABLE,
5674                                             50))
5675                         DRM_ERROR("Timed out waiting for IPS enable\n");
5676         }
5677 }
5678
5679 void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
5680 {
5681         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5682         struct drm_device *dev = crtc->base.dev;
5683         struct drm_i915_private *dev_priv = to_i915(dev);
5684
5685         if (!crtc_state->ips_enabled)
5686                 return;
5687
5688         if (IS_BROADWELL(dev_priv)) {
5689                 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
5690                 /*
5691                  * Wait for PCODE to finish disabling IPS. The BSpec specified
5692                  * 42ms timeout value leads to occasional timeouts so use 100ms
5693                  * instead.
5694                  */
5695                 if (intel_wait_for_register(&dev_priv->uncore,
5696                                             IPS_CTL, IPS_ENABLE, 0,
5697                                             100))
5698                         DRM_ERROR("Timed out waiting for IPS disable\n");
5699         } else {
5700                 I915_WRITE(IPS_CTL, 0);
5701                 POSTING_READ(IPS_CTL);
5702         }
5703
5704         /* We need to wait for a vblank before we can disable the plane. */
5705         intel_wait_for_vblank(dev_priv, crtc->pipe);
5706 }
5707
5708 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
5709 {
5710         if (intel_crtc->overlay) {
5711                 struct drm_device *dev = intel_crtc->base.dev;
5712
5713                 mutex_lock(&dev->struct_mutex);
5714                 (void) intel_overlay_switch_off(intel_crtc->overlay);
5715                 mutex_unlock(&dev->struct_mutex);
5716         }
5717
5718         /* Let userspace switch the overlay on again. In most cases userspace
5719          * has to recompute where to put it anyway.
5720          */
5721 }
5722
5723 /**
5724  * intel_post_enable_primary - Perform operations after enabling primary plane
5725  * @crtc: the CRTC whose primary plane was just enabled
5726  * @new_crtc_state: the enabling state
5727  *
5728  * Performs potentially sleeping operations that must be done after the primary
5729  * plane is enabled, such as updating FBC and IPS.  Note that this may be
5730  * called due to an explicit primary plane update, or due to an implicit
5731  * re-enable that is caused when a sprite plane is updated to no longer
5732  * completely hide the primary plane.
5733  */
5734 static void
5735 intel_post_enable_primary(struct drm_crtc *crtc,
5736                           const struct intel_crtc_state *new_crtc_state)
5737 {
5738         struct drm_device *dev = crtc->dev;
5739         struct drm_i915_private *dev_priv = to_i915(dev);
5740         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5741         int pipe = intel_crtc->pipe;
5742
5743         /*
5744          * Gen2 reports pipe underruns whenever all planes are disabled.
5745          * So don't enable underrun reporting before at least some planes
5746          * are enabled.
5747          * FIXME: Need to fix the logic to work when we turn off all planes
5748          * but leave the pipe running.
5749          */
5750         if (IS_GEN(dev_priv, 2))
5751                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5752
5753         /* Underruns don't always raise interrupts, so check manually. */
5754         intel_check_cpu_fifo_underruns(dev_priv);
5755         intel_check_pch_fifo_underruns(dev_priv);
5756 }
5757
5758 /* FIXME get rid of this and use pre_plane_update */
5759 static void
5760 intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
5761 {
5762         struct drm_device *dev = crtc->dev;
5763         struct drm_i915_private *dev_priv = to_i915(dev);
5764         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5765         int pipe = intel_crtc->pipe;
5766
5767         /*
5768          * Gen2 reports pipe underruns whenever all planes are disabled.
5769          * So disable underrun reporting before all the planes get disabled.
5770          */
5771         if (IS_GEN(dev_priv, 2))
5772                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5773
5774         hsw_disable_ips(to_intel_crtc_state(crtc->state));
5775
5776         /*
5777          * Vblank time updates from the shadow to live plane control register
5778          * are blocked if the memory self-refresh mode is active at that
5779          * moment. So to make sure the plane gets truly disabled, disable
5780          * first the self-refresh mode. The self-refresh enable bit in turn
5781          * will be checked/applied by the HW only at the next frame start
5782          * event which is after the vblank start event, so we need to have a
5783          * wait-for-vblank between disabling the plane and the pipe.
5784          */
5785         if (HAS_GMCH(dev_priv) &&
5786             intel_set_memory_cxsr(dev_priv, false))
5787                 intel_wait_for_vblank(dev_priv, pipe);
5788 }
5789
5790 static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
5791                                        const struct intel_crtc_state *new_crtc_state)
5792 {
5793         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
5794         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5795
5796         if (!old_crtc_state->ips_enabled)
5797                 return false;
5798
5799         if (needs_modeset(new_crtc_state))
5800                 return true;
5801
5802         /*
5803          * Workaround : Do not read or write the pipe palette/gamma data while
5804          * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
5805          *
5806          * Disable IPS before we program the LUT.
5807          */
5808         if (IS_HASWELL(dev_priv) &&
5809             (new_crtc_state->base.color_mgmt_changed ||
5810              new_crtc_state->update_pipe) &&
5811             new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
5812                 return true;
5813
5814         return !new_crtc_state->ips_enabled;
5815 }
5816
5817 static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
5818                                        const struct intel_crtc_state *new_crtc_state)
5819 {
5820         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
5821         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5822
5823         if (!new_crtc_state->ips_enabled)
5824                 return false;
5825
5826         if (needs_modeset(new_crtc_state))
5827                 return true;
5828
5829         /*
5830          * Workaround : Do not read or write the pipe palette/gamma data while
5831          * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
5832          *
5833          * Re-enable IPS after the LUT has been programmed.
5834          */
5835         if (IS_HASWELL(dev_priv) &&
5836             (new_crtc_state->base.color_mgmt_changed ||
5837              new_crtc_state->update_pipe) &&
5838             new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
5839                 return true;
5840
5841         /*
5842          * We can't read out IPS on broadwell, assume the worst and
5843          * forcibly enable IPS on the first fastset.
5844          */
5845         if (new_crtc_state->update_pipe &&
5846             old_crtc_state->base.adjusted_mode.private_flags & I915_MODE_FLAG_INHERITED)
5847                 return true;
5848
5849         return !old_crtc_state->ips_enabled;
5850 }
5851
5852 static bool needs_nv12_wa(struct drm_i915_private *dev_priv,
5853                           const struct intel_crtc_state *crtc_state)
5854 {
5855         if (!crtc_state->nv12_planes)
5856                 return false;
5857
5858         /* WA Display #0827: Gen9:all */
5859         if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv))
5860                 return true;
5861
5862         return false;
5863 }
5864
5865 static bool needs_scalerclk_wa(struct drm_i915_private *dev_priv,
5866                                const struct intel_crtc_state *crtc_state)
5867 {
5868         /* Wa_2006604312:icl */
5869         if (crtc_state->scaler_state.scaler_users > 0 && IS_ICELAKE(dev_priv))
5870                 return true;
5871
5872         return false;
5873 }
5874
5875 static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
5876 {
5877         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
5878         struct drm_device *dev = crtc->base.dev;
5879         struct drm_i915_private *dev_priv = to_i915(dev);
5880         struct drm_atomic_state *state = old_crtc_state->base.state;
5881         struct intel_crtc_state *pipe_config =
5882                 intel_atomic_get_new_crtc_state(to_intel_atomic_state(state),
5883                                                 crtc);
5884         struct drm_plane *primary = crtc->base.primary;
5885         struct drm_plane_state *old_primary_state =
5886                 drm_atomic_get_old_plane_state(state, primary);
5887
5888         intel_frontbuffer_flip(to_i915(crtc->base.dev), pipe_config->fb_bits);
5889
5890         if (pipe_config->update_wm_post && pipe_config->base.active)
5891                 intel_update_watermarks(crtc);
5892
5893         if (hsw_post_update_enable_ips(old_crtc_state, pipe_config))
5894                 hsw_enable_ips(pipe_config);
5895
5896         if (old_primary_state) {
5897                 struct drm_plane_state *new_primary_state =
5898                         drm_atomic_get_new_plane_state(state, primary);
5899
5900                 intel_fbc_post_update(crtc);
5901
5902                 if (new_primary_state->visible &&
5903                     (needs_modeset(pipe_config) ||
5904                      !old_primary_state->visible))
5905                         intel_post_enable_primary(&crtc->base, pipe_config);
5906         }
5907
5908         if (needs_nv12_wa(dev_priv, old_crtc_state) &&
5909             !needs_nv12_wa(dev_priv, pipe_config))
5910                 skl_wa_827(dev_priv, crtc->pipe, false);
5911
5912         if (needs_scalerclk_wa(dev_priv, old_crtc_state) &&
5913             !needs_scalerclk_wa(dev_priv, pipe_config))
5914                 icl_wa_scalerclkgating(dev_priv, crtc->pipe, false);
5915 }
5916
5917 static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
5918                                    struct intel_crtc_state *pipe_config)
5919 {
5920         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
5921         struct drm_device *dev = crtc->base.dev;
5922         struct drm_i915_private *dev_priv = to_i915(dev);
5923         struct drm_atomic_state *state = old_crtc_state->base.state;
5924         struct drm_plane *primary = crtc->base.primary;
5925         struct drm_plane_state *old_primary_state =
5926                 drm_atomic_get_old_plane_state(state, primary);
5927         bool modeset = needs_modeset(pipe_config);
5928         struct intel_atomic_state *intel_state =
5929                 to_intel_atomic_state(state);
5930
5931         if (hsw_pre_update_disable_ips(old_crtc_state, pipe_config))
5932                 hsw_disable_ips(old_crtc_state);
5933
5934         if (old_primary_state) {
5935                 struct intel_plane_state *new_primary_state =
5936                         intel_atomic_get_new_plane_state(intel_state,
5937                                                          to_intel_plane(primary));
5938
5939                 intel_fbc_pre_update(crtc, pipe_config, new_primary_state);
5940                 /*
5941                  * Gen2 reports pipe underruns whenever all planes are disabled.
5942                  * So disable underrun reporting before all the planes get disabled.
5943                  */
5944                 if (IS_GEN(dev_priv, 2) && old_primary_state->visible &&
5945                     (modeset || !new_primary_state->base.visible))
5946                         intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
5947         }
5948
5949         /* Display WA 827 */
5950         if (!needs_nv12_wa(dev_priv, old_crtc_state) &&
5951             needs_nv12_wa(dev_priv, pipe_config))
5952                 skl_wa_827(dev_priv, crtc->pipe, true);
5953
5954         /* Wa_2006604312:icl */
5955         if (!needs_scalerclk_wa(dev_priv, old_crtc_state) &&
5956             needs_scalerclk_wa(dev_priv, pipe_config))
5957                 icl_wa_scalerclkgating(dev_priv, crtc->pipe, true);
5958
5959         /*
5960          * Vblank time updates from the shadow to live plane control register
5961          * are blocked if the memory self-refresh mode is active at that
5962          * moment. So to make sure the plane gets truly disabled, disable
5963          * first the self-refresh mode. The self-refresh enable bit in turn
5964          * will be checked/applied by the HW only at the next frame start
5965          * event which is after the vblank start event, so we need to have a
5966          * wait-for-vblank between disabling the plane and the pipe.
5967          */
5968         if (HAS_GMCH(dev_priv) && old_crtc_state->base.active &&
5969             pipe_config->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
5970                 intel_wait_for_vblank(dev_priv, crtc->pipe);
5971
5972         /*
5973          * IVB workaround: must disable low power watermarks for at least
5974          * one frame before enabling scaling.  LP watermarks can be re-enabled
5975          * when scaling is disabled.
5976          *
5977          * WaCxSRDisabledForSpriteScaling:ivb
5978          */
5979         if (pipe_config->disable_lp_wm && ilk_disable_lp_wm(dev) &&
5980             old_crtc_state->base.active)
5981                 intel_wait_for_vblank(dev_priv, crtc->pipe);
5982
5983         /*
5984          * If we're doing a modeset, we're done.  No need to do any pre-vblank
5985          * watermark programming here.
5986          */
5987         if (needs_modeset(pipe_config))
5988                 return;
5989
5990         /*
5991          * For platforms that support atomic watermarks, program the
5992          * 'intermediate' watermarks immediately.  On pre-gen9 platforms, these
5993          * will be the intermediate values that are safe for both pre- and
5994          * post- vblank; when vblank happens, the 'active' values will be set
5995          * to the final 'target' values and we'll do this again to get the
5996          * optimal watermarks.  For gen9+ platforms, the values we program here
5997          * will be the final target values which will get automatically latched
5998          * at vblank time; no further programming will be necessary.
5999          *
6000          * If a platform hasn't been transitioned to atomic watermarks yet,
6001          * we'll continue to update watermarks the old way, if flags tell
6002          * us to.
6003          */
6004         if (dev_priv->display.initial_watermarks != NULL)
6005                 dev_priv->display.initial_watermarks(intel_state,
6006                                                      pipe_config);
6007         else if (pipe_config->update_wm_pre)
6008                 intel_update_watermarks(crtc);
6009 }
6010
6011 static void intel_crtc_disable_planes(struct intel_atomic_state *state,
6012                                       struct intel_crtc *crtc)
6013 {
6014         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6015         const struct intel_crtc_state *new_crtc_state =
6016                 intel_atomic_get_new_crtc_state(state, crtc);
6017         unsigned int update_mask = new_crtc_state->update_planes;
6018         const struct intel_plane_state *old_plane_state;
6019         struct intel_plane *plane;
6020         unsigned fb_bits = 0;
6021         int i;
6022
6023         intel_crtc_dpms_overlay_disable(crtc);
6024
6025         for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
6026                 if (crtc->pipe != plane->pipe ||
6027                     !(update_mask & BIT(plane->id)))
6028                         continue;
6029
6030                 intel_disable_plane(plane, new_crtc_state);
6031
6032                 if (old_plane_state->base.visible)
6033                         fb_bits |= plane->frontbuffer_bit;
6034         }
6035
6036         intel_frontbuffer_flip(dev_priv, fb_bits);
6037 }
6038
6039 static void intel_encoders_pre_pll_enable(struct intel_crtc *crtc,
6040                                           struct intel_crtc_state *crtc_state,
6041                                           struct intel_atomic_state *state)
6042 {
6043         struct drm_connector_state *conn_state;
6044         struct drm_connector *conn;
6045         int i;
6046
6047         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6048                 struct intel_encoder *encoder =
6049                         to_intel_encoder(conn_state->best_encoder);
6050
6051                 if (conn_state->crtc != &crtc->base)
6052                         continue;
6053
6054                 if (encoder->pre_pll_enable)
6055                         encoder->pre_pll_enable(encoder, crtc_state, conn_state);
6056         }
6057 }
6058
6059 static void intel_encoders_pre_enable(struct intel_crtc *crtc,
6060                                       struct intel_crtc_state *crtc_state,
6061                                       struct intel_atomic_state *state)
6062 {
6063         struct drm_connector_state *conn_state;
6064         struct drm_connector *conn;
6065         int i;
6066
6067         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6068                 struct intel_encoder *encoder =
6069                         to_intel_encoder(conn_state->best_encoder);
6070
6071                 if (conn_state->crtc != &crtc->base)
6072                         continue;
6073
6074                 if (encoder->pre_enable)
6075                         encoder->pre_enable(encoder, crtc_state, conn_state);
6076         }
6077 }
6078
6079 static void intel_encoders_enable(struct intel_crtc *crtc,
6080                                   struct intel_crtc_state *crtc_state,
6081                                   struct intel_atomic_state *state)
6082 {
6083         struct drm_connector_state *conn_state;
6084         struct drm_connector *conn;
6085         int i;
6086
6087         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6088                 struct intel_encoder *encoder =
6089                         to_intel_encoder(conn_state->best_encoder);
6090
6091                 if (conn_state->crtc != &crtc->base)
6092                         continue;
6093
6094                 if (encoder->enable)
6095                         encoder->enable(encoder, crtc_state, conn_state);
6096                 intel_opregion_notify_encoder(encoder, true);
6097         }
6098 }
6099
6100 static void intel_encoders_disable(struct intel_crtc *crtc,
6101                                    struct intel_crtc_state *old_crtc_state,
6102                                    struct intel_atomic_state *state)
6103 {
6104         struct drm_connector_state *old_conn_state;
6105         struct drm_connector *conn;
6106         int i;
6107
6108         for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
6109                 struct intel_encoder *encoder =
6110                         to_intel_encoder(old_conn_state->best_encoder);
6111
6112                 if (old_conn_state->crtc != &crtc->base)
6113                         continue;
6114
6115                 intel_opregion_notify_encoder(encoder, false);
6116                 if (encoder->disable)
6117                         encoder->disable(encoder, old_crtc_state, old_conn_state);
6118         }
6119 }
6120
6121 static void intel_encoders_post_disable(struct intel_crtc *crtc,
6122                                         struct intel_crtc_state *old_crtc_state,
6123                                         struct intel_atomic_state *state)
6124 {
6125         struct drm_connector_state *old_conn_state;
6126         struct drm_connector *conn;
6127         int i;
6128
6129         for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
6130                 struct intel_encoder *encoder =
6131                         to_intel_encoder(old_conn_state->best_encoder);
6132
6133                 if (old_conn_state->crtc != &crtc->base)
6134                         continue;
6135
6136                 if (encoder->post_disable)
6137                         encoder->post_disable(encoder, old_crtc_state, old_conn_state);
6138         }
6139 }
6140
6141 static void intel_encoders_post_pll_disable(struct intel_crtc *crtc,
6142                                             struct intel_crtc_state *old_crtc_state,
6143                                             struct intel_atomic_state *state)
6144 {
6145         struct drm_connector_state *old_conn_state;
6146         struct drm_connector *conn;
6147         int i;
6148
6149         for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
6150                 struct intel_encoder *encoder =
6151                         to_intel_encoder(old_conn_state->best_encoder);
6152
6153                 if (old_conn_state->crtc != &crtc->base)
6154                         continue;
6155
6156                 if (encoder->post_pll_disable)
6157                         encoder->post_pll_disable(encoder, old_crtc_state, old_conn_state);
6158         }
6159 }
6160
6161 static void intel_encoders_update_pipe(struct intel_crtc *crtc,
6162                                        struct intel_crtc_state *crtc_state,
6163                                        struct intel_atomic_state *state)
6164 {
6165         struct drm_connector_state *conn_state;
6166         struct drm_connector *conn;
6167         int i;
6168
6169         for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
6170                 struct intel_encoder *encoder =
6171                         to_intel_encoder(conn_state->best_encoder);
6172
6173                 if (conn_state->crtc != &crtc->base)
6174                         continue;
6175
6176                 if (encoder->update_pipe)
6177                         encoder->update_pipe(encoder, crtc_state, conn_state);
6178         }
6179 }
6180
6181 static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state)
6182 {
6183         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
6184         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
6185
6186         plane->disable_plane(plane, crtc_state);
6187 }
6188
6189 static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
6190                                  struct intel_atomic_state *state)
6191 {
6192         struct drm_crtc *crtc = pipe_config->base.crtc;
6193         struct drm_device *dev = crtc->dev;
6194         struct drm_i915_private *dev_priv = to_i915(dev);
6195         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6196         int pipe = intel_crtc->pipe;
6197
6198         if (WARN_ON(intel_crtc->active))
6199                 return;
6200
6201         /*
6202          * Sometimes spurious CPU pipe underruns happen during FDI
6203          * training, at least with VGA+HDMI cloning. Suppress them.
6204          *
6205          * On ILK we get an occasional spurious CPU pipe underruns
6206          * between eDP port A enable and vdd enable. Also PCH port
6207          * enable seems to result in the occasional CPU pipe underrun.
6208          *
6209          * Spurious PCH underruns also occur during PCH enabling.
6210          */
6211         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6212         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
6213
6214         if (pipe_config->has_pch_encoder)
6215                 intel_prepare_shared_dpll(pipe_config);
6216
6217         if (intel_crtc_has_dp_encoder(pipe_config))
6218                 intel_dp_set_m_n(pipe_config, M1_N1);
6219
6220         intel_set_pipe_timings(pipe_config);
6221         intel_set_pipe_src_size(pipe_config);
6222
6223         if (pipe_config->has_pch_encoder) {
6224                 intel_cpu_transcoder_set_m_n(pipe_config,
6225                                              &pipe_config->fdi_m_n, NULL);
6226         }
6227
6228         ironlake_set_pipeconf(pipe_config);
6229
6230         intel_crtc->active = true;
6231
6232         intel_encoders_pre_enable(intel_crtc, pipe_config, state);
6233
6234         if (pipe_config->has_pch_encoder) {
6235                 /* Note: FDI PLL enabling _must_ be done before we enable the
6236                  * cpu pipes, hence this is separate from all the other fdi/pch
6237                  * enabling. */
6238                 ironlake_fdi_pll_enable(pipe_config);
6239         } else {
6240                 assert_fdi_tx_disabled(dev_priv, pipe);
6241                 assert_fdi_rx_disabled(dev_priv, pipe);
6242         }
6243
6244         ironlake_pfit_enable(pipe_config);
6245
6246         /*
6247          * On ILK+ LUT must be loaded before the pipe is running but with
6248          * clocks enabled
6249          */
6250         intel_color_load_luts(pipe_config);
6251         intel_color_commit(pipe_config);
6252         /* update DSPCNTR to configure gamma for pipe bottom color */
6253         intel_disable_primary_plane(pipe_config);
6254
6255         if (dev_priv->display.initial_watermarks != NULL)
6256                 dev_priv->display.initial_watermarks(state, pipe_config);
6257         intel_enable_pipe(pipe_config);
6258
6259         if (pipe_config->has_pch_encoder)
6260                 ironlake_pch_enable(state, pipe_config);
6261
6262         assert_vblank_disabled(crtc);
6263         intel_crtc_vblank_on(pipe_config);
6264
6265         intel_encoders_enable(intel_crtc, pipe_config, state);
6266
6267         if (HAS_PCH_CPT(dev_priv))
6268                 cpt_verify_modeset(dev, intel_crtc->pipe);
6269
6270         /*
6271          * Must wait for vblank to avoid spurious PCH FIFO underruns.
6272          * And a second vblank wait is needed at least on ILK with
6273          * some interlaced HDMI modes. Let's do the double wait always
6274          * in case there are more corner cases we don't know about.
6275          */
6276         if (pipe_config->has_pch_encoder) {
6277                 intel_wait_for_vblank(dev_priv, pipe);
6278                 intel_wait_for_vblank(dev_priv, pipe);
6279         }
6280         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6281         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
6282 }
6283
6284 /* IPS only exists on ULT machines and is tied to pipe A. */
6285 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
6286 {
6287         return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
6288 }
6289
6290 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
6291                                             enum pipe pipe, bool apply)
6292 {
6293         u32 val = I915_READ(CLKGATE_DIS_PSL(pipe));
6294         u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
6295
6296         if (apply)
6297                 val |= mask;
6298         else
6299                 val &= ~mask;
6300
6301         I915_WRITE(CLKGATE_DIS_PSL(pipe), val);
6302 }
6303
6304 static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
6305 {
6306         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6307         enum pipe pipe = crtc->pipe;
6308         u32 val;
6309
6310         val = MBUS_DBOX_A_CREDIT(2);
6311         val |= MBUS_DBOX_BW_CREDIT(1);
6312         val |= MBUS_DBOX_B_CREDIT(8);
6313
6314         I915_WRITE(PIPE_MBUS_DBOX_CTL(pipe), val);
6315 }
6316
6317 static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
6318                                 struct intel_atomic_state *state)
6319 {
6320         struct drm_crtc *crtc = pipe_config->base.crtc;
6321         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6322         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6323         int pipe = intel_crtc->pipe, hsw_workaround_pipe;
6324         enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
6325         bool psl_clkgate_wa;
6326
6327         if (WARN_ON(intel_crtc->active))
6328                 return;
6329
6330         intel_encoders_pre_pll_enable(intel_crtc, pipe_config, state);
6331
6332         if (pipe_config->shared_dpll)
6333                 intel_enable_shared_dpll(pipe_config);
6334
6335         intel_encoders_pre_enable(intel_crtc, pipe_config, state);
6336
6337         if (intel_crtc_has_dp_encoder(pipe_config))
6338                 intel_dp_set_m_n(pipe_config, M1_N1);
6339
6340         if (!transcoder_is_dsi(cpu_transcoder))
6341                 intel_set_pipe_timings(pipe_config);
6342
6343         intel_set_pipe_src_size(pipe_config);
6344
6345         if (cpu_transcoder != TRANSCODER_EDP &&
6346             !transcoder_is_dsi(cpu_transcoder)) {
6347                 I915_WRITE(PIPE_MULT(cpu_transcoder),
6348                            pipe_config->pixel_multiplier - 1);
6349         }
6350
6351         if (pipe_config->has_pch_encoder) {
6352                 intel_cpu_transcoder_set_m_n(pipe_config,
6353                                              &pipe_config->fdi_m_n, NULL);
6354         }
6355
6356         if (!transcoder_is_dsi(cpu_transcoder))
6357                 haswell_set_pipeconf(pipe_config);
6358
6359         if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
6360                 bdw_set_pipemisc(pipe_config);
6361
6362         intel_crtc->active = true;
6363
6364         /* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
6365         psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
6366                          pipe_config->pch_pfit.enabled;
6367         if (psl_clkgate_wa)
6368                 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
6369
6370         if (INTEL_GEN(dev_priv) >= 9)
6371                 skylake_pfit_enable(pipe_config);
6372         else
6373                 ironlake_pfit_enable(pipe_config);
6374
6375         /*
6376          * On ILK+ LUT must be loaded before the pipe is running but with
6377          * clocks enabled
6378          */
6379         intel_color_load_luts(pipe_config);
6380         intel_color_commit(pipe_config);
6381         /* update DSPCNTR to configure gamma/csc for pipe bottom color */
6382         if (INTEL_GEN(dev_priv) < 9)
6383                 intel_disable_primary_plane(pipe_config);
6384
6385         if (INTEL_GEN(dev_priv) >= 11)
6386                 icl_set_pipe_chicken(intel_crtc);
6387
6388         intel_ddi_set_pipe_settings(pipe_config);
6389         if (!transcoder_is_dsi(cpu_transcoder))
6390                 intel_ddi_enable_transcoder_func(pipe_config);
6391
6392         if (dev_priv->display.initial_watermarks != NULL)
6393                 dev_priv->display.initial_watermarks(state, pipe_config);
6394
6395         if (INTEL_GEN(dev_priv) >= 11)
6396                 icl_pipe_mbus_enable(intel_crtc);
6397
6398         /* XXX: Do the pipe assertions at the right place for BXT DSI. */
6399         if (!transcoder_is_dsi(cpu_transcoder))
6400                 intel_enable_pipe(pipe_config);
6401
6402         if (pipe_config->has_pch_encoder)
6403                 lpt_pch_enable(state, pipe_config);
6404
6405         if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST))
6406                 intel_ddi_set_vc_payload_alloc(pipe_config, true);
6407
6408         assert_vblank_disabled(crtc);
6409         intel_crtc_vblank_on(pipe_config);
6410
6411         intel_encoders_enable(intel_crtc, pipe_config, state);
6412
6413         if (psl_clkgate_wa) {
6414                 intel_wait_for_vblank(dev_priv, pipe);
6415                 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
6416         }
6417
6418         /* If we change the relative order between pipe/planes enabling, we need
6419          * to change the workaround. */
6420         hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
6421         if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
6422                 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
6423                 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
6424         }
6425 }
6426
6427 static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state)
6428 {
6429         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
6430         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6431         enum pipe pipe = crtc->pipe;
6432
6433         /* To avoid upsetting the power well on haswell only disable the pfit if
6434          * it's in use. The hw state code will make sure we get this right. */
6435         if (old_crtc_state->pch_pfit.enabled) {
6436                 I915_WRITE(PF_CTL(pipe), 0);
6437                 I915_WRITE(PF_WIN_POS(pipe), 0);
6438                 I915_WRITE(PF_WIN_SZ(pipe), 0);
6439         }
6440 }
6441
6442 static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
6443                                   struct intel_atomic_state *state)
6444 {
6445         struct drm_crtc *crtc = old_crtc_state->base.crtc;
6446         struct drm_device *dev = crtc->dev;
6447         struct drm_i915_private *dev_priv = to_i915(dev);
6448         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6449         int pipe = intel_crtc->pipe;
6450
6451         /*
6452          * Sometimes spurious CPU pipe underruns happen when the
6453          * pipe is already disabled, but FDI RX/TX is still enabled.
6454          * Happens at least with VGA+HDMI cloning. Suppress them.
6455          */
6456         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6457         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
6458
6459         intel_encoders_disable(intel_crtc, old_crtc_state, state);
6460
6461         drm_crtc_vblank_off(crtc);
6462         assert_vblank_disabled(crtc);
6463
6464         intel_disable_pipe(old_crtc_state);
6465
6466         ironlake_pfit_disable(old_crtc_state);
6467
6468         if (old_crtc_state->has_pch_encoder)
6469                 ironlake_fdi_disable(crtc);
6470
6471         intel_encoders_post_disable(intel_crtc, old_crtc_state, state);
6472
6473         if (old_crtc_state->has_pch_encoder) {
6474                 ironlake_disable_pch_transcoder(dev_priv, pipe);
6475
6476                 if (HAS_PCH_CPT(dev_priv)) {
6477                         i915_reg_t reg;
6478                         u32 temp;
6479
6480                         /* disable TRANS_DP_CTL */
6481                         reg = TRANS_DP_CTL(pipe);
6482                         temp = I915_READ(reg);
6483                         temp &= ~(TRANS_DP_OUTPUT_ENABLE |
6484                                   TRANS_DP_PORT_SEL_MASK);
6485                         temp |= TRANS_DP_PORT_SEL_NONE;
6486                         I915_WRITE(reg, temp);
6487
6488                         /* disable DPLL_SEL */
6489                         temp = I915_READ(PCH_DPLL_SEL);
6490                         temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
6491                         I915_WRITE(PCH_DPLL_SEL, temp);
6492                 }
6493
6494                 ironlake_fdi_pll_disable(intel_crtc);
6495         }
6496
6497         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6498         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
6499 }
6500
6501 static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
6502                                  struct intel_atomic_state *state)
6503 {
6504         struct drm_crtc *crtc = old_crtc_state->base.crtc;
6505         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6506         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6507         enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
6508
6509         intel_encoders_disable(intel_crtc, old_crtc_state, state);
6510
6511         drm_crtc_vblank_off(crtc);
6512         assert_vblank_disabled(crtc);
6513
6514         /* XXX: Do the pipe assertions at the right place for BXT DSI. */
6515         if (!transcoder_is_dsi(cpu_transcoder))
6516                 intel_disable_pipe(old_crtc_state);
6517
6518         if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST))
6519                 intel_ddi_set_vc_payload_alloc(old_crtc_state, false);
6520
6521         if (!transcoder_is_dsi(cpu_transcoder))
6522                 intel_ddi_disable_transcoder_func(old_crtc_state);
6523
6524         intel_dsc_disable(old_crtc_state);
6525
6526         if (INTEL_GEN(dev_priv) >= 9)
6527                 skylake_scaler_disable(intel_crtc);
6528         else
6529                 ironlake_pfit_disable(old_crtc_state);
6530
6531         intel_encoders_post_disable(intel_crtc, old_crtc_state, state);
6532
6533         intel_encoders_post_pll_disable(intel_crtc, old_crtc_state, state);
6534 }
6535
6536 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
6537 {
6538         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
6539         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6540
6541         if (!crtc_state->gmch_pfit.control)
6542                 return;
6543
6544         /*
6545          * The panel fitter should only be adjusted whilst the pipe is disabled,
6546          * according to register description and PRM.
6547          */
6548         WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
6549         assert_pipe_disabled(dev_priv, crtc->pipe);
6550
6551         I915_WRITE(PFIT_PGM_RATIOS, crtc_state->gmch_pfit.pgm_ratios);
6552         I915_WRITE(PFIT_CONTROL, crtc_state->gmch_pfit.control);
6553
6554         /* Border color in case we don't scale up to the full screen. Black by
6555          * default, change to something else for debugging. */
6556         I915_WRITE(BCLRPAT(crtc->pipe), 0);
6557 }
6558
6559 bool intel_port_is_combophy(struct drm_i915_private *dev_priv, enum port port)
6560 {
6561         if (port == PORT_NONE)
6562                 return false;
6563
6564         if (IS_ELKHARTLAKE(dev_priv))
6565                 return port <= PORT_C;
6566
6567         if (INTEL_GEN(dev_priv) >= 11)
6568                 return port <= PORT_B;
6569
6570         return false;
6571 }
6572
6573 bool intel_port_is_tc(struct drm_i915_private *dev_priv, enum port port)
6574 {
6575         if (INTEL_GEN(dev_priv) >= 11 && !IS_ELKHARTLAKE(dev_priv))
6576                 return port >= PORT_C && port <= PORT_F;
6577
6578         return false;
6579 }
6580
6581 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
6582 {
6583         if (!intel_port_is_tc(dev_priv, port))
6584                 return PORT_TC_NONE;
6585
6586         return port - PORT_C;
6587 }
6588
6589 enum intel_display_power_domain intel_port_to_power_domain(enum port port)
6590 {
6591         switch (port) {
6592         case PORT_A:
6593                 return POWER_DOMAIN_PORT_DDI_A_LANES;
6594         case PORT_B:
6595                 return POWER_DOMAIN_PORT_DDI_B_LANES;
6596         case PORT_C:
6597                 return POWER_DOMAIN_PORT_DDI_C_LANES;
6598         case PORT_D:
6599                 return POWER_DOMAIN_PORT_DDI_D_LANES;
6600         case PORT_E:
6601                 return POWER_DOMAIN_PORT_DDI_E_LANES;
6602         case PORT_F:
6603                 return POWER_DOMAIN_PORT_DDI_F_LANES;
6604         default:
6605                 MISSING_CASE(port);
6606                 return POWER_DOMAIN_PORT_OTHER;
6607         }
6608 }
6609
6610 enum intel_display_power_domain
6611 intel_aux_power_domain(struct intel_digital_port *dig_port)
6612 {
6613         switch (dig_port->aux_ch) {
6614         case AUX_CH_A:
6615                 return POWER_DOMAIN_AUX_A;
6616         case AUX_CH_B:
6617                 return POWER_DOMAIN_AUX_B;
6618         case AUX_CH_C:
6619                 return POWER_DOMAIN_AUX_C;
6620         case AUX_CH_D:
6621                 return POWER_DOMAIN_AUX_D;
6622         case AUX_CH_E:
6623                 return POWER_DOMAIN_AUX_E;
6624         case AUX_CH_F:
6625                 return POWER_DOMAIN_AUX_F;
6626         default:
6627                 MISSING_CASE(dig_port->aux_ch);
6628                 return POWER_DOMAIN_AUX_A;
6629         }
6630 }
6631
6632 static u64 get_crtc_power_domains(struct intel_crtc *crtc,
6633                                   struct intel_crtc_state *crtc_state)
6634 {
6635         struct drm_device *dev = crtc->base.dev;
6636         struct drm_i915_private *dev_priv = to_i915(dev);
6637         struct drm_encoder *encoder;
6638         enum pipe pipe = crtc->pipe;
6639         u64 mask;
6640         enum transcoder transcoder = crtc_state->cpu_transcoder;
6641
6642         if (!crtc_state->base.active)
6643                 return 0;
6644
6645         mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
6646         mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder));
6647         if (crtc_state->pch_pfit.enabled ||
6648             crtc_state->pch_pfit.force_thru)
6649                 mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
6650
6651         drm_for_each_encoder_mask(encoder, dev, crtc_state->base.encoder_mask) {
6652                 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
6653
6654                 mask |= BIT_ULL(intel_encoder->power_domain);
6655         }
6656
6657         if (HAS_DDI(dev_priv) && crtc_state->has_audio)
6658                 mask |= BIT_ULL(POWER_DOMAIN_AUDIO);
6659
6660         if (crtc_state->shared_dpll)
6661                 mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE);
6662
6663         return mask;
6664 }
6665
6666 static u64
6667 modeset_get_crtc_power_domains(struct intel_crtc *crtc,
6668                                struct intel_crtc_state *crtc_state)
6669 {
6670         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6671         enum intel_display_power_domain domain;
6672         u64 domains, new_domains, old_domains;
6673
6674         old_domains = crtc->enabled_power_domains;
6675         crtc->enabled_power_domains = new_domains =
6676                 get_crtc_power_domains(crtc, crtc_state);
6677
6678         domains = new_domains & ~old_domains;
6679
6680         for_each_power_domain(domain, domains)
6681                 intel_display_power_get(dev_priv, domain);
6682
6683         return old_domains & ~new_domains;
6684 }
6685
6686 static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
6687                                       u64 domains)
6688 {
6689         enum intel_display_power_domain domain;
6690
6691         for_each_power_domain(domain, domains)
6692                 intel_display_power_put_unchecked(dev_priv, domain);
6693 }
6694
6695 static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
6696                                    struct intel_atomic_state *state)
6697 {
6698         struct drm_crtc *crtc = pipe_config->base.crtc;
6699         struct drm_device *dev = crtc->dev;
6700         struct drm_i915_private *dev_priv = to_i915(dev);
6701         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6702         int pipe = intel_crtc->pipe;
6703
6704         if (WARN_ON(intel_crtc->active))
6705                 return;
6706
6707         if (intel_crtc_has_dp_encoder(pipe_config))
6708                 intel_dp_set_m_n(pipe_config, M1_N1);
6709
6710         intel_set_pipe_timings(pipe_config);
6711         intel_set_pipe_src_size(pipe_config);
6712
6713         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
6714                 I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
6715                 I915_WRITE(CHV_CANVAS(pipe), 0);
6716         }
6717
6718         i9xx_set_pipeconf(pipe_config);
6719
6720         intel_crtc->active = true;
6721
6722         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6723
6724         intel_encoders_pre_pll_enable(intel_crtc, pipe_config, state);
6725
6726         if (IS_CHERRYVIEW(dev_priv)) {
6727                 chv_prepare_pll(intel_crtc, pipe_config);
6728                 chv_enable_pll(intel_crtc, pipe_config);
6729         } else {
6730                 vlv_prepare_pll(intel_crtc, pipe_config);
6731                 vlv_enable_pll(intel_crtc, pipe_config);
6732         }
6733
6734         intel_encoders_pre_enable(intel_crtc, pipe_config, state);
6735
6736         i9xx_pfit_enable(pipe_config);
6737
6738         intel_color_load_luts(pipe_config);
6739         intel_color_commit(pipe_config);
6740         /* update DSPCNTR to configure gamma for pipe bottom color */
6741         intel_disable_primary_plane(pipe_config);
6742
6743         dev_priv->display.initial_watermarks(state, pipe_config);
6744         intel_enable_pipe(pipe_config);
6745
6746         assert_vblank_disabled(crtc);
6747         intel_crtc_vblank_on(pipe_config);
6748
6749         intel_encoders_enable(intel_crtc, pipe_config, state);
6750 }
6751
6752 static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state)
6753 {
6754         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
6755         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6756
6757         I915_WRITE(FP0(crtc->pipe), crtc_state->dpll_hw_state.fp0);
6758         I915_WRITE(FP1(crtc->pipe), crtc_state->dpll_hw_state.fp1);
6759 }
6760
6761 static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
6762                              struct intel_atomic_state *state)
6763 {
6764         struct drm_crtc *crtc = pipe_config->base.crtc;
6765         struct drm_device *dev = crtc->dev;
6766         struct drm_i915_private *dev_priv = to_i915(dev);
6767         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6768         enum pipe pipe = intel_crtc->pipe;
6769
6770         if (WARN_ON(intel_crtc->active))
6771                 return;
6772
6773         i9xx_set_pll_dividers(pipe_config);
6774
6775         if (intel_crtc_has_dp_encoder(pipe_config))
6776                 intel_dp_set_m_n(pipe_config, M1_N1);
6777
6778         intel_set_pipe_timings(pipe_config);
6779         intel_set_pipe_src_size(pipe_config);
6780
6781         i9xx_set_pipeconf(pipe_config);
6782
6783         intel_crtc->active = true;
6784
6785         if (!IS_GEN(dev_priv, 2))
6786                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6787
6788         intel_encoders_pre_enable(intel_crtc, pipe_config, state);
6789
6790         i9xx_enable_pll(intel_crtc, pipe_config);
6791
6792         i9xx_pfit_enable(pipe_config);
6793
6794         intel_color_load_luts(pipe_config);
6795         intel_color_commit(pipe_config);
6796         /* update DSPCNTR to configure gamma for pipe bottom color */
6797         intel_disable_primary_plane(pipe_config);
6798
6799         if (dev_priv->display.initial_watermarks != NULL)
6800                 dev_priv->display.initial_watermarks(state,
6801                                                      pipe_config);
6802         else
6803                 intel_update_watermarks(intel_crtc);
6804         intel_enable_pipe(pipe_config);
6805
6806         assert_vblank_disabled(crtc);
6807         intel_crtc_vblank_on(pipe_config);
6808
6809         intel_encoders_enable(intel_crtc, pipe_config, state);
6810 }
6811
6812 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
6813 {
6814         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
6815         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6816
6817         if (!old_crtc_state->gmch_pfit.control)
6818                 return;
6819
6820         assert_pipe_disabled(dev_priv, crtc->pipe);
6821
6822         DRM_DEBUG_KMS("disabling pfit, current: 0x%08x\n",
6823                       I915_READ(PFIT_CONTROL));
6824         I915_WRITE(PFIT_CONTROL, 0);
6825 }
6826
6827 static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
6828                               struct intel_atomic_state *state)
6829 {
6830         struct drm_crtc *crtc = old_crtc_state->base.crtc;
6831         struct drm_device *dev = crtc->dev;
6832         struct drm_i915_private *dev_priv = to_i915(dev);
6833         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6834         int pipe = intel_crtc->pipe;
6835
6836         /*
6837          * On gen2 planes are double buffered but the pipe isn't, so we must
6838          * wait for planes to fully turn off before disabling the pipe.
6839          */
6840         if (IS_GEN(dev_priv, 2))
6841                 intel_wait_for_vblank(dev_priv, pipe);
6842
6843         intel_encoders_disable(intel_crtc, old_crtc_state, state);
6844
6845         drm_crtc_vblank_off(crtc);
6846         assert_vblank_disabled(crtc);
6847
6848         intel_disable_pipe(old_crtc_state);
6849
6850         i9xx_pfit_disable(old_crtc_state);
6851
6852         intel_encoders_post_disable(intel_crtc, old_crtc_state, state);
6853
6854         if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
6855                 if (IS_CHERRYVIEW(dev_priv))
6856                         chv_disable_pll(dev_priv, pipe);
6857                 else if (IS_VALLEYVIEW(dev_priv))
6858                         vlv_disable_pll(dev_priv, pipe);
6859                 else
6860                         i9xx_disable_pll(old_crtc_state);
6861         }
6862
6863         intel_encoders_post_pll_disable(intel_crtc, old_crtc_state, state);
6864
6865         if (!IS_GEN(dev_priv, 2))
6866                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6867
6868         if (!dev_priv->display.initial_watermarks)
6869                 intel_update_watermarks(intel_crtc);
6870
6871         /* clock the pipe down to 640x480@60 to potentially save power */
6872         if (IS_I830(dev_priv))
6873                 i830_enable_pipe(dev_priv, pipe);
6874 }
6875
6876 static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
6877                                         struct drm_modeset_acquire_ctx *ctx)
6878 {
6879         struct intel_encoder *encoder;
6880         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6881         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6882         struct intel_bw_state *bw_state =
6883                 to_intel_bw_state(dev_priv->bw_obj.state);
6884         enum intel_display_power_domain domain;
6885         struct intel_plane *plane;
6886         u64 domains;
6887         struct drm_atomic_state *state;
6888         struct intel_crtc_state *crtc_state;
6889         int ret;
6890
6891         if (!intel_crtc->active)
6892                 return;
6893
6894         for_each_intel_plane_on_crtc(&dev_priv->drm, intel_crtc, plane) {
6895                 const struct intel_plane_state *plane_state =
6896                         to_intel_plane_state(plane->base.state);
6897
6898                 if (plane_state->base.visible)
6899                         intel_plane_disable_noatomic(intel_crtc, plane);
6900         }
6901
6902         state = drm_atomic_state_alloc(crtc->dev);
6903         if (!state) {
6904                 DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory",
6905                               crtc->base.id, crtc->name);
6906                 return;
6907         }
6908
6909         state->acquire_ctx = ctx;
6910
6911         /* Everything's already locked, -EDEADLK can't happen. */
6912         crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
6913         ret = drm_atomic_add_affected_connectors(state, crtc);
6914
6915         WARN_ON(IS_ERR(crtc_state) || ret);
6916
6917         dev_priv->display.crtc_disable(crtc_state, to_intel_atomic_state(state));
6918
6919         drm_atomic_state_put(state);
6920
6921         DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
6922                       crtc->base.id, crtc->name);
6923
6924         WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0);
6925         crtc->state->active = false;
6926         intel_crtc->active = false;
6927         crtc->enabled = false;
6928         crtc->state->connector_mask = 0;
6929         crtc->state->encoder_mask = 0;
6930
6931         for_each_encoder_on_crtc(crtc->dev, crtc, encoder)
6932                 encoder->base.crtc = NULL;
6933
6934         intel_fbc_disable(intel_crtc);
6935         intel_update_watermarks(intel_crtc);
6936         intel_disable_shared_dpll(to_intel_crtc_state(crtc->state));
6937
6938         domains = intel_crtc->enabled_power_domains;
6939         for_each_power_domain(domain, domains)
6940                 intel_display_power_put_unchecked(dev_priv, domain);
6941         intel_crtc->enabled_power_domains = 0;
6942
6943         dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe);
6944         dev_priv->min_cdclk[intel_crtc->pipe] = 0;
6945         dev_priv->min_voltage_level[intel_crtc->pipe] = 0;
6946
6947         bw_state->data_rate[intel_crtc->pipe] = 0;
6948         bw_state->num_active_planes[intel_crtc->pipe] = 0;
6949 }
6950
6951 /*
6952  * turn all crtc's off, but do not adjust state
6953  * This has to be paired with a call to intel_modeset_setup_hw_state.
6954  */
6955 int intel_display_suspend(struct drm_device *dev)
6956 {
6957         struct drm_i915_private *dev_priv = to_i915(dev);
6958         struct drm_atomic_state *state;
6959         int ret;
6960
6961         state = drm_atomic_helper_suspend(dev);
6962         ret = PTR_ERR_OR_ZERO(state);
6963         if (ret)
6964                 DRM_ERROR("Suspending crtc's failed with %i\n", ret);
6965         else
6966                 dev_priv->modeset_restore_state = state;
6967         return ret;
6968 }
6969
6970 void intel_encoder_destroy(struct drm_encoder *encoder)
6971 {
6972         struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
6973
6974         drm_encoder_cleanup(encoder);
6975         kfree(intel_encoder);
6976 }
6977
6978 /* Cross check the actual hw state with our own modeset state tracking (and it's
6979  * internal consistency). */
6980 static void intel_connector_verify_state(struct intel_crtc_state *crtc_state,
6981                                          struct drm_connector_state *conn_state)
6982 {
6983         struct intel_connector *connector = to_intel_connector(conn_state->connector);
6984
6985         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
6986                       connector->base.base.id,
6987                       connector->base.name);
6988
6989         if (connector->get_hw_state(connector)) {
6990                 struct intel_encoder *encoder = connector->encoder;
6991
6992                 I915_STATE_WARN(!crtc_state,
6993                          "connector enabled without attached crtc\n");
6994
6995                 if (!crtc_state)
6996                         return;
6997
6998                 I915_STATE_WARN(!crtc_state->base.active,
6999                       "connector is active, but attached crtc isn't\n");
7000
7001                 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
7002                         return;
7003
7004                 I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
7005                         "atomic encoder doesn't match attached encoder\n");
7006
7007                 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
7008                         "attached encoder crtc differs from connector crtc\n");
7009         } else {
7010                 I915_STATE_WARN(crtc_state && crtc_state->base.active,
7011                         "attached crtc is active, but connector isn't\n");
7012                 I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
7013                         "best encoder set without crtc!\n");
7014         }
7015 }
7016
7017 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
7018 {
7019         if (crtc_state->base.enable && crtc_state->has_pch_encoder)
7020                 return crtc_state->fdi_lanes;
7021
7022         return 0;
7023 }
7024
7025 static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
7026                                      struct intel_crtc_state *pipe_config)
7027 {
7028         struct drm_i915_private *dev_priv = to_i915(dev);
7029         struct drm_atomic_state *state = pipe_config->base.state;
7030         struct intel_crtc *other_crtc;
7031         struct intel_crtc_state *other_crtc_state;
7032
7033         DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
7034                       pipe_name(pipe), pipe_config->fdi_lanes);
7035         if (pipe_config->fdi_lanes > 4) {
7036                 DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
7037                               pipe_name(pipe), pipe_config->fdi_lanes);
7038                 return -EINVAL;
7039         }
7040
7041         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
7042                 if (pipe_config->fdi_lanes > 2) {
7043                         DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
7044                                       pipe_config->fdi_lanes);
7045                         return -EINVAL;
7046                 } else {
7047                         return 0;
7048                 }
7049         }
7050
7051         if (INTEL_INFO(dev_priv)->num_pipes == 2)
7052                 return 0;
7053
7054         /* Ivybridge 3 pipe is really complicated */
7055         switch (pipe) {
7056         case PIPE_A:
7057                 return 0;
7058         case PIPE_B:
7059                 if (pipe_config->fdi_lanes <= 2)
7060                         return 0;
7061
7062                 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C);
7063                 other_crtc_state =
7064                         intel_atomic_get_crtc_state(state, other_crtc);
7065                 if (IS_ERR(other_crtc_state))
7066                         return PTR_ERR(other_crtc_state);
7067
7068                 if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
7069                         DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
7070                                       pipe_name(pipe), pipe_config->fdi_lanes);
7071                         return -EINVAL;
7072                 }
7073                 return 0;
7074         case PIPE_C:
7075                 if (pipe_config->fdi_lanes > 2) {
7076                         DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n",
7077                                       pipe_name(pipe), pipe_config->fdi_lanes);
7078                         return -EINVAL;
7079                 }
7080
7081                 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B);
7082                 other_crtc_state =
7083                         intel_atomic_get_crtc_state(state, other_crtc);
7084                 if (IS_ERR(other_crtc_state))
7085                         return PTR_ERR(other_crtc_state);
7086
7087                 if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
7088                         DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
7089                         return -EINVAL;
7090                 }
7091                 return 0;
7092         default:
7093                 BUG();
7094         }
7095 }
7096
7097 #define RETRY 1
7098 static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
7099                                        struct intel_crtc_state *pipe_config)
7100 {
7101         struct drm_device *dev = intel_crtc->base.dev;
7102         const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
7103         int lane, link_bw, fdi_dotclock, ret;
7104         bool needs_recompute = false;
7105
7106 retry:
7107         /* FDI is a binary signal running at ~2.7GHz, encoding
7108          * each output octet as 10 bits. The actual frequency
7109          * is stored as a divider into a 100MHz clock, and the
7110          * mode pixel clock is stored in units of 1KHz.
7111          * Hence the bw of each lane in terms of the mode signal
7112          * is:
7113          */
7114         link_bw = intel_fdi_link_freq(to_i915(dev), pipe_config);
7115
7116         fdi_dotclock = adjusted_mode->crtc_clock;
7117
7118         lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
7119                                            pipe_config->pipe_bpp);
7120
7121         pipe_config->fdi_lanes = lane;
7122
7123         intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
7124                                link_bw, &pipe_config->fdi_m_n, false);
7125
7126         ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
7127         if (ret == -EDEADLK)
7128                 return ret;
7129
7130         if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
7131                 pipe_config->pipe_bpp -= 2*3;
7132                 DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
7133                               pipe_config->pipe_bpp);
7134                 needs_recompute = true;
7135                 pipe_config->bw_constrained = true;
7136
7137                 goto retry;
7138         }
7139
7140         if (needs_recompute)
7141                 return RETRY;
7142
7143         return ret;
7144 }
7145
7146 bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
7147 {
7148         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7149         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7150
7151         /* IPS only exists on ULT machines and is tied to pipe A. */
7152         if (!hsw_crtc_supports_ips(crtc))
7153                 return false;
7154
7155         if (!i915_modparams.enable_ips)
7156                 return false;
7157
7158         if (crtc_state->pipe_bpp > 24)
7159                 return false;
7160
7161         /*
7162          * We compare against max which means we must take
7163          * the increased cdclk requirement into account when
7164          * calculating the new cdclk.
7165          *
7166          * Should measure whether using a lower cdclk w/o IPS
7167          */
7168         if (IS_BROADWELL(dev_priv) &&
7169             crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100)
7170                 return false;
7171
7172         return true;
7173 }
7174
7175 static bool hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
7176 {
7177         struct drm_i915_private *dev_priv =
7178                 to_i915(crtc_state->base.crtc->dev);
7179         struct intel_atomic_state *intel_state =
7180                 to_intel_atomic_state(crtc_state->base.state);
7181
7182         if (!hsw_crtc_state_ips_capable(crtc_state))
7183                 return false;
7184
7185         /*
7186          * When IPS gets enabled, the pipe CRC changes. Since IPS gets
7187          * enabled and disabled dynamically based on package C states,
7188          * user space can't make reliable use of the CRCs, so let's just
7189          * completely disable it.
7190          */
7191         if (crtc_state->crc_enabled)
7192                 return false;
7193
7194         /* IPS should be fine as long as at least one plane is enabled. */
7195         if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
7196                 return false;
7197
7198         /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
7199         if (IS_BROADWELL(dev_priv) &&
7200             crtc_state->pixel_rate > intel_state->cdclk.logical.cdclk * 95 / 100)
7201                 return false;
7202
7203         return true;
7204 }
7205
7206 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
7207 {
7208         const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7209
7210         /* GDG double wide on either pipe, otherwise pipe A only */
7211         return INTEL_GEN(dev_priv) < 4 &&
7212                 (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
7213 }
7214
7215 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
7216 {
7217         u32 pixel_rate;
7218
7219         pixel_rate = pipe_config->base.adjusted_mode.crtc_clock;
7220
7221         /*
7222          * We only use IF-ID interlacing. If we ever use
7223          * PF-ID we'll need to adjust the pixel_rate here.
7224          */
7225
7226         if (pipe_config->pch_pfit.enabled) {
7227                 u64 pipe_w, pipe_h, pfit_w, pfit_h;
7228                 u32 pfit_size = pipe_config->pch_pfit.size;
7229
7230                 pipe_w = pipe_config->pipe_src_w;
7231                 pipe_h = pipe_config->pipe_src_h;
7232
7233                 pfit_w = (pfit_size >> 16) & 0xFFFF;
7234                 pfit_h = pfit_size & 0xFFFF;
7235                 if (pipe_w < pfit_w)
7236                         pipe_w = pfit_w;
7237                 if (pipe_h < pfit_h)
7238                         pipe_h = pfit_h;
7239
7240                 if (WARN_ON(!pfit_w || !pfit_h))
7241                         return pixel_rate;
7242
7243                 pixel_rate = div_u64(mul_u32_u32(pixel_rate, pipe_w * pipe_h),
7244                                      pfit_w * pfit_h);
7245         }
7246
7247         return pixel_rate;
7248 }
7249
7250 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
7251 {
7252         struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
7253
7254         if (HAS_GMCH(dev_priv))
7255                 /* FIXME calculate proper pipe pixel rate for GMCH pfit */
7256                 crtc_state->pixel_rate =
7257                         crtc_state->base.adjusted_mode.crtc_clock;
7258         else
7259                 crtc_state->pixel_rate =
7260                         ilk_pipe_pixel_rate(crtc_state);
7261 }
7262
7263 static int intel_crtc_compute_config(struct intel_crtc *crtc,
7264                                      struct intel_crtc_state *pipe_config)
7265 {
7266         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7267         const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
7268         int clock_limit = dev_priv->max_dotclk_freq;
7269
7270         if (INTEL_GEN(dev_priv) < 4) {
7271                 clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
7272
7273                 /*
7274                  * Enable double wide mode when the dot clock
7275                  * is > 90% of the (display) core speed.
7276                  */
7277                 if (intel_crtc_supports_double_wide(crtc) &&
7278                     adjusted_mode->crtc_clock > clock_limit) {
7279                         clock_limit = dev_priv->max_dotclk_freq;
7280                         pipe_config->double_wide = true;
7281                 }
7282         }
7283
7284         if (adjusted_mode->crtc_clock > clock_limit) {
7285                 DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
7286                               adjusted_mode->crtc_clock, clock_limit,
7287                               yesno(pipe_config->double_wide));
7288                 return -EINVAL;
7289         }
7290
7291         if ((pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
7292              pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) &&
7293              pipe_config->base.ctm) {
7294                 /*
7295                  * There is only one pipe CSC unit per pipe, and we need that
7296                  * for output conversion from RGB->YCBCR. So if CTM is already
7297                  * applied we can't support YCBCR420 output.
7298                  */
7299                 DRM_DEBUG_KMS("YCBCR420 and CTM together are not possible\n");
7300                 return -EINVAL;
7301         }
7302
7303         /*
7304          * Pipe horizontal size must be even in:
7305          * - DVO ganged mode
7306          * - LVDS dual channel mode
7307          * - Double wide pipe
7308          */
7309         if (pipe_config->pipe_src_w & 1) {
7310                 if (pipe_config->double_wide) {
7311                         DRM_DEBUG_KMS("Odd pipe source width not supported with double wide pipe\n");
7312                         return -EINVAL;
7313                 }
7314
7315                 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
7316                     intel_is_dual_link_lvds(dev_priv)) {
7317                         DRM_DEBUG_KMS("Odd pipe source width not supported with dual link LVDS\n");
7318                         return -EINVAL;
7319                 }
7320         }
7321
7322         /* Cantiga+ cannot handle modes with a hsync front porch of 0.
7323          * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
7324          */
7325         if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) &&
7326                 adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
7327                 return -EINVAL;
7328
7329         intel_crtc_compute_pixel_rate(pipe_config);
7330
7331         if (pipe_config->has_pch_encoder)
7332                 return ironlake_fdi_compute_config(crtc, pipe_config);
7333
7334         return 0;
7335 }
7336
7337 static void
7338 intel_reduce_m_n_ratio(u32 *num, u32 *den)
7339 {
7340         while (*num > DATA_LINK_M_N_MASK ||
7341                *den > DATA_LINK_M_N_MASK) {
7342                 *num >>= 1;
7343                 *den >>= 1;
7344         }
7345 }
7346
7347 static void compute_m_n(unsigned int m, unsigned int n,
7348                         u32 *ret_m, u32 *ret_n,
7349                         bool constant_n)
7350 {
7351         /*
7352          * Several DP dongles in particular seem to be fussy about
7353          * too large link M/N values. Give N value as 0x8000 that
7354          * should be acceptable by specific devices. 0x8000 is the
7355          * specified fixed N value for asynchronous clock mode,
7356          * which the devices expect also in synchronous clock mode.
7357          */
7358         if (constant_n)
7359                 *ret_n = 0x8000;
7360         else
7361                 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
7362
7363         *ret_m = div_u64(mul_u32_u32(m, *ret_n), n);
7364         intel_reduce_m_n_ratio(ret_m, ret_n);
7365 }
7366
7367 void
7368 intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
7369                        int pixel_clock, int link_clock,
7370                        struct intel_link_m_n *m_n,
7371                        bool constant_n)
7372 {
7373         m_n->tu = 64;
7374
7375         compute_m_n(bits_per_pixel * pixel_clock,
7376                     link_clock * nlanes * 8,
7377                     &m_n->gmch_m, &m_n->gmch_n,
7378                     constant_n);
7379
7380         compute_m_n(pixel_clock, link_clock,
7381                     &m_n->link_m, &m_n->link_n,
7382                     constant_n);
7383 }
7384
7385 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
7386 {
7387         if (i915_modparams.panel_use_ssc >= 0)
7388                 return i915_modparams.panel_use_ssc != 0;
7389         return dev_priv->vbt.lvds_use_ssc
7390                 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
7391 }
7392
7393 static u32 pnv_dpll_compute_fp(struct dpll *dpll)
7394 {
7395         return (1 << dpll->n) << 16 | dpll->m2;
7396 }
7397
7398 static u32 i9xx_dpll_compute_fp(struct dpll *dpll)
7399 {
7400         return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
7401 }
7402
7403 static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
7404                                      struct intel_crtc_state *crtc_state,
7405                                      struct dpll *reduced_clock)
7406 {
7407         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7408         u32 fp, fp2 = 0;
7409
7410         if (IS_PINEVIEW(dev_priv)) {
7411                 fp = pnv_dpll_compute_fp(&crtc_state->dpll);
7412                 if (reduced_clock)
7413                         fp2 = pnv_dpll_compute_fp(reduced_clock);
7414         } else {
7415                 fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
7416                 if (reduced_clock)
7417                         fp2 = i9xx_dpll_compute_fp(reduced_clock);
7418         }
7419
7420         crtc_state->dpll_hw_state.fp0 = fp;
7421
7422         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7423             reduced_clock) {
7424                 crtc_state->dpll_hw_state.fp1 = fp2;
7425         } else {
7426                 crtc_state->dpll_hw_state.fp1 = fp;
7427         }
7428 }
7429
7430 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
7431                 pipe)
7432 {
7433         u32 reg_val;
7434
7435         /*
7436          * PLLB opamp always calibrates to max value of 0x3f, force enable it
7437          * and set it to a reasonable value instead.
7438          */
7439         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
7440         reg_val &= 0xffffff00;
7441         reg_val |= 0x00000030;
7442         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
7443
7444         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
7445         reg_val &= 0x00ffffff;
7446         reg_val |= 0x8c000000;
7447         vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
7448
7449         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
7450         reg_val &= 0xffffff00;
7451         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
7452
7453         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
7454         reg_val &= 0x00ffffff;
7455         reg_val |= 0xb0000000;
7456         vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
7457 }
7458
7459 static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
7460                                          const struct intel_link_m_n *m_n)
7461 {
7462         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7463         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7464         enum pipe pipe = crtc->pipe;
7465
7466         I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7467         I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
7468         I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
7469         I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
7470 }
7471
7472 static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
7473                                  enum transcoder transcoder)
7474 {
7475         if (IS_HASWELL(dev_priv))
7476                 return transcoder == TRANSCODER_EDP;
7477
7478         /*
7479          * Strictly speaking some registers are available before
7480          * gen7, but we only support DRRS on gen7+
7481          */
7482         return IS_GEN(dev_priv, 7) || IS_CHERRYVIEW(dev_priv);
7483 }
7484
7485 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
7486                                          const struct intel_link_m_n *m_n,
7487                                          const struct intel_link_m_n *m2_n2)
7488 {
7489         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7490         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7491         enum pipe pipe = crtc->pipe;
7492         enum transcoder transcoder = crtc_state->cpu_transcoder;
7493
7494         if (INTEL_GEN(dev_priv) >= 5) {
7495                 I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
7496                 I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
7497                 I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
7498                 I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
7499                 /*
7500                  *  M2_N2 registers are set only if DRRS is supported
7501                  * (to make sure the registers are not unnecessarily accessed).
7502                  */
7503                 if (m2_n2 && crtc_state->has_drrs &&
7504                     transcoder_has_m2_n2(dev_priv, transcoder)) {
7505                         I915_WRITE(PIPE_DATA_M2(transcoder),
7506                                         TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
7507                         I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
7508                         I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m);
7509                         I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n);
7510                 }
7511         } else {
7512                 I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7513                 I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
7514                 I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
7515                 I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
7516         }
7517 }
7518
7519 void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n)
7520 {
7521         const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
7522
7523         if (m_n == M1_N1) {
7524                 dp_m_n = &crtc_state->dp_m_n;
7525                 dp_m2_n2 = &crtc_state->dp_m2_n2;
7526         } else if (m_n == M2_N2) {
7527
7528                 /*
7529                  * M2_N2 registers are not supported. Hence m2_n2 divider value
7530                  * needs to be programmed into M1_N1.
7531                  */
7532                 dp_m_n = &crtc_state->dp_m2_n2;
7533         } else {
7534                 DRM_ERROR("Unsupported divider value\n");
7535                 return;
7536         }
7537
7538         if (crtc_state->has_pch_encoder)
7539                 intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n);
7540         else
7541                 intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2);
7542 }
7543
7544 static void vlv_compute_dpll(struct intel_crtc *crtc,
7545                              struct intel_crtc_state *pipe_config)
7546 {
7547         pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
7548                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
7549         if (crtc->pipe != PIPE_A)
7550                 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7551
7552         /* DPLL not used with DSI, but still need the rest set up */
7553         if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
7554                 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
7555                         DPLL_EXT_BUFFER_ENABLE_VLV;
7556
7557         pipe_config->dpll_hw_state.dpll_md =
7558                 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7559 }
7560
7561 static void chv_compute_dpll(struct intel_crtc *crtc,
7562                              struct intel_crtc_state *pipe_config)
7563 {
7564         pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
7565                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
7566         if (crtc->pipe != PIPE_A)
7567                 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7568
7569         /* DPLL not used with DSI, but still need the rest set up */
7570         if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
7571                 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
7572
7573         pipe_config->dpll_hw_state.dpll_md =
7574                 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7575 }
7576
7577 static void vlv_prepare_pll(struct intel_crtc *crtc,
7578                             const struct intel_crtc_state *pipe_config)
7579 {
7580         struct drm_device *dev = crtc->base.dev;
7581         struct drm_i915_private *dev_priv = to_i915(dev);
7582         enum pipe pipe = crtc->pipe;
7583         u32 mdiv;
7584         u32 bestn, bestm1, bestm2, bestp1, bestp2;
7585         u32 coreclk, reg_val;
7586
7587         /* Enable Refclk */
7588         I915_WRITE(DPLL(pipe),
7589                    pipe_config->dpll_hw_state.dpll &
7590                    ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
7591
7592         /* No need to actually set up the DPLL with DSI */
7593         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7594                 return;
7595
7596         vlv_dpio_get(dev_priv);
7597
7598         bestn = pipe_config->dpll.n;
7599         bestm1 = pipe_config->dpll.m1;
7600         bestm2 = pipe_config->dpll.m2;
7601         bestp1 = pipe_config->dpll.p1;
7602         bestp2 = pipe_config->dpll.p2;
7603
7604         /* See eDP HDMI DPIO driver vbios notes doc */
7605
7606         /* PLL B needs special handling */
7607         if (pipe == PIPE_B)
7608                 vlv_pllb_recal_opamp(dev_priv, pipe);
7609
7610         /* Set up Tx target for periodic Rcomp update */
7611         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
7612
7613         /* Disable target IRef on PLL */
7614         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
7615         reg_val &= 0x00ffffff;
7616         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
7617
7618         /* Disable fast lock */
7619         vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
7620
7621         /* Set idtafcrecal before PLL is enabled */
7622         mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
7623         mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
7624         mdiv |= ((bestn << DPIO_N_SHIFT));
7625         mdiv |= (1 << DPIO_K_SHIFT);
7626
7627         /*
7628          * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
7629          * but we don't support that).
7630          * Note: don't use the DAC post divider as it seems unstable.
7631          */
7632         mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
7633         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
7634
7635         mdiv |= DPIO_ENABLE_CALIBRATION;
7636         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
7637
7638         /* Set HBR and RBR LPF coefficients */
7639         if (pipe_config->port_clock == 162000 ||
7640             intel_crtc_has_type(pipe_config, INTEL_OUTPUT_ANALOG) ||
7641             intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
7642                 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
7643                                  0x009f0003);
7644         else
7645                 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
7646                                  0x00d0000f);
7647
7648         if (intel_crtc_has_dp_encoder(pipe_config)) {
7649                 /* Use SSC source */
7650                 if (pipe == PIPE_A)
7651                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7652                                          0x0df40000);
7653                 else
7654                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7655                                          0x0df70000);
7656         } else { /* HDMI or VGA */
7657                 /* Use bend source */
7658                 if (pipe == PIPE_A)
7659                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7660                                          0x0df70000);
7661                 else
7662                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7663                                          0x0df40000);
7664         }
7665
7666         coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
7667         coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
7668         if (intel_crtc_has_dp_encoder(pipe_config))
7669                 coreclk |= 0x01000000;
7670         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
7671
7672         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
7673
7674         vlv_dpio_put(dev_priv);
7675 }
7676
7677 static void chv_prepare_pll(struct intel_crtc *crtc,
7678                             const struct intel_crtc_state *pipe_config)
7679 {
7680         struct drm_device *dev = crtc->base.dev;
7681         struct drm_i915_private *dev_priv = to_i915(dev);
7682         enum pipe pipe = crtc->pipe;
7683         enum dpio_channel port = vlv_pipe_to_channel(pipe);
7684         u32 loopfilter, tribuf_calcntr;
7685         u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
7686         u32 dpio_val;
7687         int vco;
7688
7689         /* Enable Refclk and SSC */
7690         I915_WRITE(DPLL(pipe),
7691                    pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
7692
7693         /* No need to actually set up the DPLL with DSI */
7694         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7695                 return;
7696
7697         bestn = pipe_config->dpll.n;
7698         bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
7699         bestm1 = pipe_config->dpll.m1;
7700         bestm2 = pipe_config->dpll.m2 >> 22;
7701         bestp1 = pipe_config->dpll.p1;
7702         bestp2 = pipe_config->dpll.p2;
7703         vco = pipe_config->dpll.vco;
7704         dpio_val = 0;
7705         loopfilter = 0;
7706
7707         vlv_dpio_get(dev_priv);
7708
7709         /* p1 and p2 divider */
7710         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
7711                         5 << DPIO_CHV_S1_DIV_SHIFT |
7712                         bestp1 << DPIO_CHV_P1_DIV_SHIFT |
7713                         bestp2 << DPIO_CHV_P2_DIV_SHIFT |
7714                         1 << DPIO_CHV_K_DIV_SHIFT);
7715
7716         /* Feedback post-divider - m2 */
7717         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
7718
7719         /* Feedback refclk divider - n and m1 */
7720         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
7721                         DPIO_CHV_M1_DIV_BY_2 |
7722                         1 << DPIO_CHV_N_DIV_SHIFT);
7723
7724         /* M2 fraction division */
7725         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
7726
7727         /* M2 fraction division enable */
7728         dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
7729         dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
7730         dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
7731         if (bestm2_frac)
7732                 dpio_val |= DPIO_CHV_FRAC_DIV_EN;
7733         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
7734
7735         /* Program digital lock detect threshold */
7736         dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
7737         dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
7738                                         DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
7739         dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
7740         if (!bestm2_frac)
7741                 dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
7742         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
7743
7744         /* Loop filter */
7745         if (vco == 5400000) {
7746                 loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
7747                 loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
7748                 loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
7749                 tribuf_calcntr = 0x9;
7750         } else if (vco <= 6200000) {
7751                 loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
7752                 loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
7753                 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7754                 tribuf_calcntr = 0x9;
7755         } else if (vco <= 6480000) {
7756                 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7757                 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7758                 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7759                 tribuf_calcntr = 0x8;
7760         } else {
7761                 /* Not supported. Apply the same limits as in the max case */
7762                 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7763                 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7764                 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7765                 tribuf_calcntr = 0;
7766         }
7767         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
7768
7769         dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
7770         dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
7771         dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
7772         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
7773
7774         /* AFC Recal */
7775         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
7776                         vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
7777                         DPIO_AFC_RECAL);
7778
7779         vlv_dpio_put(dev_priv);
7780 }
7781
7782 /**
7783  * vlv_force_pll_on - forcibly enable just the PLL
7784  * @dev_priv: i915 private structure
7785  * @pipe: pipe PLL to enable
7786  * @dpll: PLL configuration
7787  *
7788  * Enable the PLL for @pipe using the supplied @dpll config. To be used
7789  * in cases where we need the PLL enabled even when @pipe is not going to
7790  * be enabled.
7791  */
7792 int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
7793                      const struct dpll *dpll)
7794 {
7795         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
7796         struct intel_crtc_state *pipe_config;
7797
7798         pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
7799         if (!pipe_config)
7800                 return -ENOMEM;
7801
7802         pipe_config->base.crtc = &crtc->base;
7803         pipe_config->pixel_multiplier = 1;
7804         pipe_config->dpll = *dpll;
7805
7806         if (IS_CHERRYVIEW(dev_priv)) {
7807                 chv_compute_dpll(crtc, pipe_config);
7808                 chv_prepare_pll(crtc, pipe_config);
7809                 chv_enable_pll(crtc, pipe_config);
7810         } else {
7811                 vlv_compute_dpll(crtc, pipe_config);
7812                 vlv_prepare_pll(crtc, pipe_config);
7813                 vlv_enable_pll(crtc, pipe_config);
7814         }
7815
7816         kfree(pipe_config);
7817
7818         return 0;
7819 }
7820
7821 /**
7822  * vlv_force_pll_off - forcibly disable just the PLL
7823  * @dev_priv: i915 private structure
7824  * @pipe: pipe PLL to disable
7825  *
7826  * Disable the PLL for @pipe. To be used in cases where we need
7827  * the PLL enabled even when @pipe is not going to be enabled.
7828  */
7829 void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
7830 {
7831         if (IS_CHERRYVIEW(dev_priv))
7832                 chv_disable_pll(dev_priv, pipe);
7833         else
7834                 vlv_disable_pll(dev_priv, pipe);
7835 }
7836
7837 static void i9xx_compute_dpll(struct intel_crtc *crtc,
7838                               struct intel_crtc_state *crtc_state,
7839                               struct dpll *reduced_clock)
7840 {
7841         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7842         u32 dpll;
7843         struct dpll *clock = &crtc_state->dpll;
7844
7845         i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
7846
7847         dpll = DPLL_VGA_MODE_DIS;
7848
7849         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
7850                 dpll |= DPLLB_MODE_LVDS;
7851         else
7852                 dpll |= DPLLB_MODE_DAC_SERIAL;
7853
7854         if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
7855             IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
7856                 dpll |= (crtc_state->pixel_multiplier - 1)
7857                         << SDVO_MULTIPLIER_SHIFT_HIRES;
7858         }
7859
7860         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
7861             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
7862                 dpll |= DPLL_SDVO_HIGH_SPEED;
7863
7864         if (intel_crtc_has_dp_encoder(crtc_state))
7865                 dpll |= DPLL_SDVO_HIGH_SPEED;
7866
7867         /* compute bitmask from p1 value */
7868         if (IS_PINEVIEW(dev_priv))
7869                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
7870         else {
7871                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7872                 if (IS_G4X(dev_priv) && reduced_clock)
7873                         dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
7874         }
7875         switch (clock->p2) {
7876         case 5:
7877                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
7878                 break;
7879         case 7:
7880                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
7881                 break;
7882         case 10:
7883                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
7884                 break;
7885         case 14:
7886                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
7887                 break;
7888         }
7889         if (INTEL_GEN(dev_priv) >= 4)
7890                 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
7891
7892         if (crtc_state->sdvo_tv_clock)
7893                 dpll |= PLL_REF_INPUT_TVCLKINBC;
7894         else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7895                  intel_panel_use_ssc(dev_priv))
7896                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7897         else
7898                 dpll |= PLL_REF_INPUT_DREFCLK;
7899
7900         dpll |= DPLL_VCO_ENABLE;
7901         crtc_state->dpll_hw_state.dpll = dpll;
7902
7903         if (INTEL_GEN(dev_priv) >= 4) {
7904                 u32 dpll_md = (crtc_state->pixel_multiplier - 1)
7905                         << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7906                 crtc_state->dpll_hw_state.dpll_md = dpll_md;
7907         }
7908 }
7909
7910 static void i8xx_compute_dpll(struct intel_crtc *crtc,
7911                               struct intel_crtc_state *crtc_state,
7912                               struct dpll *reduced_clock)
7913 {
7914         struct drm_device *dev = crtc->base.dev;
7915         struct drm_i915_private *dev_priv = to_i915(dev);
7916         u32 dpll;
7917         struct dpll *clock = &crtc_state->dpll;
7918
7919         i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
7920
7921         dpll = DPLL_VGA_MODE_DIS;
7922
7923         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7924                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7925         } else {
7926                 if (clock->p1 == 2)
7927                         dpll |= PLL_P1_DIVIDE_BY_TWO;
7928                 else
7929                         dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7930                 if (clock->p2 == 4)
7931                         dpll |= PLL_P2_DIVIDE_BY_4;
7932         }
7933
7934         /*
7935          * Bspec:
7936          * "[Almador Errata}: For the correct operation of the muxed DVO pins
7937          *  (GDEVSELB/I2Cdata, GIRDBY/I2CClk) and (GFRAMEB/DVI_Data,
7938          *  GTRDYB/DVI_Clk): Bit 31 (DPLL VCO Enable) and Bit 30 (2X Clock
7939          *  Enable) must be set to “1” in both the DPLL A Control Register
7940          *  (06014h-06017h) and DPLL B Control Register (06018h-0601Bh)."
7941          *
7942          * For simplicity We simply keep both bits always enabled in
7943          * both DPLLS. The spec says we should disable the DVO 2X clock
7944          * when not needed, but this seems to work fine in practice.
7945          */
7946         if (IS_I830(dev_priv) ||
7947             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
7948                 dpll |= DPLL_DVO_2X_MODE;
7949
7950         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7951             intel_panel_use_ssc(dev_priv))
7952                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7953         else
7954                 dpll |= PLL_REF_INPUT_DREFCLK;
7955
7956         dpll |= DPLL_VCO_ENABLE;
7957         crtc_state->dpll_hw_state.dpll = dpll;
7958 }
7959
7960 static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state)
7961 {
7962         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7963         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7964         enum pipe pipe = crtc->pipe;
7965         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
7966         const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
7967         u32 crtc_vtotal, crtc_vblank_end;
7968         int vsyncshift = 0;
7969
7970         /* We need to be careful not to changed the adjusted mode, for otherwise
7971          * the hw state checker will get angry at the mismatch. */
7972         crtc_vtotal = adjusted_mode->crtc_vtotal;
7973         crtc_vblank_end = adjusted_mode->crtc_vblank_end;
7974
7975         if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
7976                 /* the chip adds 2 halflines automatically */
7977                 crtc_vtotal -= 1;
7978                 crtc_vblank_end -= 1;
7979
7980                 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
7981                         vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
7982                 else
7983                         vsyncshift = adjusted_mode->crtc_hsync_start -
7984                                 adjusted_mode->crtc_htotal / 2;
7985                 if (vsyncshift < 0)
7986                         vsyncshift += adjusted_mode->crtc_htotal;
7987         }
7988
7989         if (INTEL_GEN(dev_priv) > 3)
7990                 I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
7991
7992         I915_WRITE(HTOTAL(cpu_transcoder),
7993                    (adjusted_mode->crtc_hdisplay - 1) |
7994                    ((adjusted_mode->crtc_htotal - 1) << 16));
7995         I915_WRITE(HBLANK(cpu_transcoder),
7996                    (adjusted_mode->crtc_hblank_start - 1) |
7997                    ((adjusted_mode->crtc_hblank_end - 1) << 16));
7998         I915_WRITE(HSYNC(cpu_transcoder),
7999                    (adjusted_mode->crtc_hsync_start - 1) |
8000                    ((adjusted_mode->crtc_hsync_end - 1) << 16));
8001
8002         I915_WRITE(VTOTAL(cpu_transcoder),
8003                    (adjusted_mode->crtc_vdisplay - 1) |
8004                    ((crtc_vtotal - 1) << 16));
8005         I915_WRITE(VBLANK(cpu_transcoder),
8006                    (adjusted_mode->crtc_vblank_start - 1) |
8007                    ((crtc_vblank_end - 1) << 16));
8008         I915_WRITE(VSYNC(cpu_transcoder),
8009                    (adjusted_mode->crtc_vsync_start - 1) |
8010                    ((adjusted_mode->crtc_vsync_end - 1) << 16));
8011
8012         /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
8013          * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
8014          * documented on the DDI_FUNC_CTL register description, EDP Input Select
8015          * bits. */
8016         if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
8017             (pipe == PIPE_B || pipe == PIPE_C))
8018                 I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
8019
8020 }
8021
8022 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
8023 {
8024         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
8025         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8026         enum pipe pipe = crtc->pipe;
8027
8028         /* pipesrc controls the size that is scaled from, which should
8029          * always be the user's requested size.
8030          */
8031         I915_WRITE(PIPESRC(pipe),
8032                    ((crtc_state->pipe_src_w - 1) << 16) |
8033                    (crtc_state->pipe_src_h - 1));
8034 }
8035
8036 static void intel_get_pipe_timings(struct intel_crtc *crtc,
8037                                    struct intel_crtc_state *pipe_config)
8038 {
8039         struct drm_device *dev = crtc->base.dev;
8040         struct drm_i915_private *dev_priv = to_i915(dev);
8041         enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
8042         u32 tmp;
8043
8044         tmp = I915_READ(HTOTAL(cpu_transcoder));
8045         pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
8046         pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
8047
8048         if (!transcoder_is_dsi(cpu_transcoder)) {
8049                 tmp = I915_READ(HBLANK(cpu_transcoder));
8050                 pipe_config->base.adjusted_mode.crtc_hblank_start =
8051                                                         (tmp & 0xffff) + 1;
8052                 pipe_config->base.adjusted_mode.crtc_hblank_end =
8053                                                 ((tmp >> 16) & 0xffff) + 1;
8054         }
8055         tmp = I915_READ(HSYNC(cpu_transcoder));
8056         pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
8057         pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
8058
8059         tmp = I915_READ(VTOTAL(cpu_transcoder));
8060         pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
8061         pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
8062
8063         if (!transcoder_is_dsi(cpu_transcoder)) {
8064                 tmp = I915_READ(VBLANK(cpu_transcoder));
8065                 pipe_config->base.adjusted_mode.crtc_vblank_start =
8066                                                         (tmp & 0xffff) + 1;
8067                 pipe_config->base.adjusted_mode.crtc_vblank_end =
8068                                                 ((tmp >> 16) & 0xffff) + 1;
8069         }
8070         tmp = I915_READ(VSYNC(cpu_transcoder));
8071         pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
8072         pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
8073
8074         if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
8075                 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
8076                 pipe_config->base.adjusted_mode.crtc_vtotal += 1;
8077                 pipe_config->base.adjusted_mode.crtc_vblank_end += 1;
8078         }
8079 }
8080
8081 static void intel_get_pipe_src_size(struct intel_crtc *crtc,
8082                                     struct intel_crtc_state *pipe_config)
8083 {
8084         struct drm_device *dev = crtc->base.dev;
8085         struct drm_i915_private *dev_priv = to_i915(dev);
8086         u32 tmp;
8087
8088         tmp = I915_READ(PIPESRC(crtc->pipe));
8089         pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
8090         pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
8091
8092         pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h;
8093         pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w;
8094 }
8095
8096 void intel_mode_from_pipe_config(struct drm_display_mode *mode,
8097                                  struct intel_crtc_state *pipe_config)
8098 {
8099         mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay;
8100         mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal;
8101         mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start;
8102         mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end;
8103
8104         mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay;
8105         mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal;
8106         mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start;
8107         mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end;
8108
8109         mode->flags = pipe_config->base.adjusted_mode.flags;
8110         mode->type = DRM_MODE_TYPE_DRIVER;
8111
8112         mode->clock = pipe_config->base.adjusted_mode.crtc_clock;
8113
8114         mode->hsync = drm_mode_hsync(mode);
8115         mode->vrefresh = drm_mode_vrefresh(mode);
8116         drm_mode_set_name(mode);
8117 }
8118
8119 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
8120 {
8121         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
8122         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8123         u32 pipeconf;
8124
8125         pipeconf = 0;
8126
8127         /* we keep both pipes enabled on 830 */
8128         if (IS_I830(dev_priv))
8129                 pipeconf |= I915_READ(PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
8130
8131         if (crtc_state->double_wide)
8132                 pipeconf |= PIPECONF_DOUBLE_WIDE;
8133
8134         /* only g4x and later have fancy bpc/dither controls */
8135         if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
8136             IS_CHERRYVIEW(dev_priv)) {
8137                 /* Bspec claims that we can't use dithering for 30bpp pipes. */
8138                 if (crtc_state->dither && crtc_state->pipe_bpp != 30)
8139                         pipeconf |= PIPECONF_DITHER_EN |
8140                                     PIPECONF_DITHER_TYPE_SP;
8141
8142                 switch (crtc_state->pipe_bpp) {
8143                 case 18:
8144                         pipeconf |= PIPECONF_6BPC;
8145                         break;
8146                 case 24:
8147                         pipeconf |= PIPECONF_8BPC;
8148                         break;
8149                 case 30:
8150                         pipeconf |= PIPECONF_10BPC;
8151                         break;
8152                 default:
8153                         /* Case prevented by intel_choose_pipe_bpp_dither. */
8154                         BUG();
8155                 }
8156         }
8157
8158         if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
8159                 if (INTEL_GEN(dev_priv) < 4 ||
8160                     intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
8161                         pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
8162                 else
8163                         pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
8164         } else {
8165                 pipeconf |= PIPECONF_PROGRESSIVE;
8166         }
8167
8168         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
8169              crtc_state->limited_color_range)
8170                 pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
8171
8172         pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
8173
8174         I915_WRITE(PIPECONF(crtc->pipe), pipeconf);
8175         POSTING_READ(PIPECONF(crtc->pipe));
8176 }
8177
8178 static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
8179                                    struct intel_crtc_state *crtc_state)
8180 {
8181         struct drm_device *dev = crtc->base.dev;
8182         struct drm_i915_private *dev_priv = to_i915(dev);
8183         const struct intel_limit *limit;
8184         int refclk = 48000;
8185
8186         memset(&crtc_state->dpll_hw_state, 0,
8187                sizeof(crtc_state->dpll_hw_state));
8188
8189         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8190                 if (intel_panel_use_ssc(dev_priv)) {
8191                         refclk = dev_priv->vbt.lvds_ssc_freq;
8192                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
8193                 }
8194
8195                 limit = &intel_limits_i8xx_lvds;
8196         } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) {
8197                 limit = &intel_limits_i8xx_dvo;
8198         } else {
8199                 limit = &intel_limits_i8xx_dac;
8200         }
8201
8202         if (!crtc_state->clock_set &&
8203             !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8204                                  refclk, NULL, &crtc_state->dpll)) {
8205                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8206                 return -EINVAL;
8207         }
8208
8209         i8xx_compute_dpll(crtc, crtc_state, NULL);
8210
8211         return 0;
8212 }
8213
8214 static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
8215                                   struct intel_crtc_state *crtc_state)
8216 {
8217         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8218         const struct intel_limit *limit;
8219         int refclk = 96000;
8220
8221         memset(&crtc_state->dpll_hw_state, 0,
8222                sizeof(crtc_state->dpll_hw_state));
8223
8224         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8225                 if (intel_panel_use_ssc(dev_priv)) {
8226                         refclk = dev_priv->vbt.lvds_ssc_freq;
8227                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
8228                 }
8229
8230                 if (intel_is_dual_link_lvds(dev_priv))
8231                         limit = &intel_limits_g4x_dual_channel_lvds;
8232                 else
8233                         limit = &intel_limits_g4x_single_channel_lvds;
8234         } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
8235                    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
8236                 limit = &intel_limits_g4x_hdmi;
8237         } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) {
8238                 limit = &intel_limits_g4x_sdvo;
8239         } else {
8240                 /* The option is for other outputs */
8241                 limit = &intel_limits_i9xx_sdvo;
8242         }
8243
8244         if (!crtc_state->clock_set &&
8245             !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8246                                 refclk, NULL, &crtc_state->dpll)) {
8247                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8248                 return -EINVAL;
8249         }
8250
8251         i9xx_compute_dpll(crtc, crtc_state, NULL);
8252
8253         return 0;
8254 }
8255
8256 static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
8257                                   struct intel_crtc_state *crtc_state)
8258 {
8259         struct drm_device *dev = crtc->base.dev;
8260         struct drm_i915_private *dev_priv = to_i915(dev);
8261         const struct intel_limit *limit;
8262         int refclk = 96000;
8263
8264         memset(&crtc_state->dpll_hw_state, 0,
8265                sizeof(crtc_state->dpll_hw_state));
8266
8267         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8268                 if (intel_panel_use_ssc(dev_priv)) {
8269                         refclk = dev_priv->vbt.lvds_ssc_freq;
8270                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
8271                 }
8272
8273                 limit = &intel_limits_pineview_lvds;
8274         } else {
8275                 limit = &intel_limits_pineview_sdvo;
8276         }
8277
8278         if (!crtc_state->clock_set &&
8279             !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8280                                 refclk, NULL, &crtc_state->dpll)) {
8281                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8282                 return -EINVAL;
8283         }
8284
8285         i9xx_compute_dpll(crtc, crtc_state, NULL);
8286
8287         return 0;
8288 }
8289
8290 static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
8291                                    struct intel_crtc_state *crtc_state)
8292 {
8293         struct drm_device *dev = crtc->base.dev;
8294         struct drm_i915_private *dev_priv = to_i915(dev);
8295         const struct intel_limit *limit;
8296         int refclk = 96000;
8297
8298         memset(&crtc_state->dpll_hw_state, 0,
8299                sizeof(crtc_state->dpll_hw_state));
8300
8301         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8302                 if (intel_panel_use_ssc(dev_priv)) {
8303                         refclk = dev_priv->vbt.lvds_ssc_freq;
8304                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
8305                 }
8306
8307                 limit = &intel_limits_i9xx_lvds;
8308         } else {
8309                 limit = &intel_limits_i9xx_sdvo;
8310         }
8311
8312         if (!crtc_state->clock_set &&
8313             !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8314                                  refclk, NULL, &crtc_state->dpll)) {
8315                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8316                 return -EINVAL;
8317         }
8318
8319         i9xx_compute_dpll(crtc, crtc_state, NULL);
8320
8321         return 0;
8322 }
8323
8324 static int chv_crtc_compute_clock(struct intel_crtc *crtc,
8325                                   struct intel_crtc_state *crtc_state)
8326 {
8327         int refclk = 100000;
8328         const struct intel_limit *limit = &intel_limits_chv;
8329
8330         memset(&crtc_state->dpll_hw_state, 0,
8331                sizeof(crtc_state->dpll_hw_state));
8332
8333         if (!crtc_state->clock_set &&
8334             !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8335                                 refclk, NULL, &crtc_state->dpll)) {
8336                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8337                 return -EINVAL;
8338         }
8339
8340         chv_compute_dpll(crtc, crtc_state);
8341
8342         return 0;
8343 }
8344
8345 static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
8346                                   struct intel_crtc_state *crtc_state)
8347 {
8348         int refclk = 100000;
8349         const struct intel_limit *limit = &intel_limits_vlv;
8350
8351         memset(&crtc_state->dpll_hw_state, 0,
8352                sizeof(crtc_state->dpll_hw_state));
8353
8354         if (!crtc_state->clock_set &&
8355             !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8356                                 refclk, NULL, &crtc_state->dpll)) {
8357                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8358                 return -EINVAL;
8359         }
8360
8361         vlv_compute_dpll(crtc, crtc_state);
8362
8363         return 0;
8364 }
8365
8366 static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
8367 {
8368         if (IS_I830(dev_priv))
8369                 return false;
8370
8371         return INTEL_GEN(dev_priv) >= 4 ||
8372                 IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
8373 }
8374
8375 static void i9xx_get_pfit_config(struct intel_crtc *crtc,
8376                                  struct intel_crtc_state *pipe_config)
8377 {
8378         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8379         u32 tmp;
8380
8381         if (!i9xx_has_pfit(dev_priv))
8382                 return;
8383
8384         tmp = I915_READ(PFIT_CONTROL);
8385         if (!(tmp & PFIT_ENABLE))
8386                 return;
8387
8388         /* Check whether the pfit is attached to our pipe. */
8389         if (INTEL_GEN(dev_priv) < 4) {
8390                 if (crtc->pipe != PIPE_B)
8391                         return;
8392         } else {
8393                 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
8394                         return;
8395         }
8396
8397         pipe_config->gmch_pfit.control = tmp;
8398         pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
8399 }
8400
8401 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
8402                                struct intel_crtc_state *pipe_config)
8403 {
8404         struct drm_device *dev = crtc->base.dev;
8405         struct drm_i915_private *dev_priv = to_i915(dev);
8406         int pipe = pipe_config->cpu_transcoder;
8407         struct dpll clock;
8408         u32 mdiv;
8409         int refclk = 100000;
8410
8411         /* In case of DSI, DPLL will not be used */
8412         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8413                 return;
8414
8415         vlv_dpio_get(dev_priv);
8416         mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
8417         vlv_dpio_put(dev_priv);
8418
8419         clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
8420         clock.m2 = mdiv & DPIO_M2DIV_MASK;
8421         clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
8422         clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
8423         clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
8424
8425         pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
8426 }
8427
8428 static void
8429 i9xx_get_initial_plane_config(struct intel_crtc *crtc,
8430                               struct intel_initial_plane_config *plane_config)
8431 {
8432         struct drm_device *dev = crtc->base.dev;
8433         struct drm_i915_private *dev_priv = to_i915(dev);
8434         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
8435         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
8436         enum pipe pipe;
8437         u32 val, base, offset;
8438         int fourcc, pixel_format;
8439         unsigned int aligned_height;
8440         struct drm_framebuffer *fb;
8441         struct intel_framebuffer *intel_fb;
8442
8443         if (!plane->get_hw_state(plane, &pipe))
8444                 return;
8445
8446         WARN_ON(pipe != crtc->pipe);
8447
8448         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
8449         if (!intel_fb) {
8450                 DRM_DEBUG_KMS("failed to alloc fb\n");
8451                 return;
8452         }
8453
8454         fb = &intel_fb->base;
8455
8456         fb->dev = dev;
8457
8458         val = I915_READ(DSPCNTR(i9xx_plane));
8459
8460         if (INTEL_GEN(dev_priv) >= 4) {
8461                 if (val & DISPPLANE_TILED) {
8462                         plane_config->tiling = I915_TILING_X;
8463                         fb->modifier = I915_FORMAT_MOD_X_TILED;
8464                 }
8465
8466                 if (val & DISPPLANE_ROTATE_180)
8467                         plane_config->rotation = DRM_MODE_ROTATE_180;
8468         }
8469
8470         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B &&
8471             val & DISPPLANE_MIRROR)
8472                 plane_config->rotation |= DRM_MODE_REFLECT_X;
8473
8474         pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
8475         fourcc = i9xx_format_to_fourcc(pixel_format);
8476         fb->format = drm_format_info(fourcc);
8477
8478         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
8479                 offset = I915_READ(DSPOFFSET(i9xx_plane));
8480                 base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
8481         } else if (INTEL_GEN(dev_priv) >= 4) {
8482                 if (plane_config->tiling)
8483                         offset = I915_READ(DSPTILEOFF(i9xx_plane));
8484                 else
8485                         offset = I915_READ(DSPLINOFF(i9xx_plane));
8486                 base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
8487         } else {
8488                 base = I915_READ(DSPADDR(i9xx_plane));
8489         }
8490         plane_config->base = base;
8491
8492         val = I915_READ(PIPESRC(pipe));
8493         fb->width = ((val >> 16) & 0xfff) + 1;
8494         fb->height = ((val >> 0) & 0xfff) + 1;
8495
8496         val = I915_READ(DSPSTRIDE(i9xx_plane));
8497         fb->pitches[0] = val & 0xffffffc0;
8498
8499         aligned_height = intel_fb_align_height(fb, 0, fb->height);
8500
8501         plane_config->size = fb->pitches[0] * aligned_height;
8502
8503         DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
8504                       crtc->base.name, plane->base.name, fb->width, fb->height,
8505                       fb->format->cpp[0] * 8, base, fb->pitches[0],
8506                       plane_config->size);
8507
8508         plane_config->fb = intel_fb;
8509 }
8510
8511 static void chv_crtc_clock_get(struct intel_crtc *crtc,
8512                                struct intel_crtc_state *pipe_config)
8513 {
8514         struct drm_device *dev = crtc->base.dev;
8515         struct drm_i915_private *dev_priv = to_i915(dev);
8516         int pipe = pipe_config->cpu_transcoder;
8517         enum dpio_channel port = vlv_pipe_to_channel(pipe);
8518         struct dpll clock;
8519         u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
8520         int refclk = 100000;
8521
8522         /* In case of DSI, DPLL will not be used */
8523         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8524                 return;
8525
8526         vlv_dpio_get(dev_priv);
8527         cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
8528         pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
8529         pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
8530         pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
8531         pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
8532         vlv_dpio_put(dev_priv);
8533
8534         clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
8535         clock.m2 = (pll_dw0 & 0xff) << 22;
8536         if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
8537                 clock.m2 |= pll_dw2 & 0x3fffff;
8538         clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
8539         clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
8540         clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
8541
8542         pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
8543 }
8544
8545 static void intel_get_crtc_ycbcr_config(struct intel_crtc *crtc,
8546                                         struct intel_crtc_state *pipe_config)
8547 {
8548         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8549         enum intel_output_format output = INTEL_OUTPUT_FORMAT_RGB;
8550
8551         pipe_config->lspcon_downsampling = false;
8552
8553         if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) {
8554                 u32 tmp = I915_READ(PIPEMISC(crtc->pipe));
8555
8556                 if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
8557                         bool ycbcr420_enabled = tmp & PIPEMISC_YUV420_ENABLE;
8558                         bool blend = tmp & PIPEMISC_YUV420_MODE_FULL_BLEND;
8559
8560                         if (ycbcr420_enabled) {
8561                                 /* We support 4:2:0 in full blend mode only */
8562                                 if (!blend)
8563                                         output = INTEL_OUTPUT_FORMAT_INVALID;
8564                                 else if (!(IS_GEMINILAKE(dev_priv) ||
8565                                            INTEL_GEN(dev_priv) >= 10))
8566                                         output = INTEL_OUTPUT_FORMAT_INVALID;
8567                                 else
8568                                         output = INTEL_OUTPUT_FORMAT_YCBCR420;
8569                         } else {
8570                                 /*
8571                                  * Currently there is no interface defined to
8572                                  * check user preference between RGB/YCBCR444
8573                                  * or YCBCR420. So the only possible case for
8574                                  * YCBCR444 usage is driving YCBCR420 output
8575                                  * with LSPCON, when pipe is configured for
8576                                  * YCBCR444 output and LSPCON takes care of
8577                                  * downsampling it.
8578                                  */
8579                                 pipe_config->lspcon_downsampling = true;
8580                                 output = INTEL_OUTPUT_FORMAT_YCBCR444;
8581                         }
8582                 }
8583         }
8584
8585         pipe_config->output_format = output;
8586 }
8587
8588 static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
8589 {
8590         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
8591         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
8592         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8593         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
8594         u32 tmp;
8595
8596         tmp = I915_READ(DSPCNTR(i9xx_plane));
8597
8598         if (tmp & DISPPLANE_GAMMA_ENABLE)
8599                 crtc_state->gamma_enable = true;
8600
8601         if (!HAS_GMCH(dev_priv) &&
8602             tmp & DISPPLANE_PIPE_CSC_ENABLE)
8603                 crtc_state->csc_enable = true;
8604 }
8605
8606 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
8607                                  struct intel_crtc_state *pipe_config)
8608 {
8609         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8610         enum intel_display_power_domain power_domain;
8611         intel_wakeref_t wakeref;
8612         u32 tmp;
8613         bool ret;
8614
8615         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
8616         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
8617         if (!wakeref)
8618                 return false;
8619
8620         pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
8621         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
8622         pipe_config->shared_dpll = NULL;
8623
8624         ret = false;
8625
8626         tmp = I915_READ(PIPECONF(crtc->pipe));
8627         if (!(tmp & PIPECONF_ENABLE))
8628                 goto out;
8629
8630         if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
8631             IS_CHERRYVIEW(dev_priv)) {
8632                 switch (tmp & PIPECONF_BPC_MASK) {
8633                 case PIPECONF_6BPC:
8634                         pipe_config->pipe_bpp = 18;
8635                         break;
8636                 case PIPECONF_8BPC:
8637                         pipe_config->pipe_bpp = 24;
8638                         break;
8639                 case PIPECONF_10BPC:
8640                         pipe_config->pipe_bpp = 30;
8641                         break;
8642                 default:
8643                         break;
8644                 }
8645         }
8646
8647         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
8648             (tmp & PIPECONF_COLOR_RANGE_SELECT))
8649                 pipe_config->limited_color_range = true;
8650
8651         pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_I9XX) >>
8652                 PIPECONF_GAMMA_MODE_SHIFT;
8653
8654         if (IS_CHERRYVIEW(dev_priv))
8655                 pipe_config->cgm_mode = I915_READ(CGM_PIPE_MODE(crtc->pipe));
8656
8657         i9xx_get_pipe_color_config(pipe_config);
8658         intel_color_get_config(pipe_config);
8659
8660         if (INTEL_GEN(dev_priv) < 4)
8661                 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
8662
8663         intel_get_pipe_timings(crtc, pipe_config);
8664         intel_get_pipe_src_size(crtc, pipe_config);
8665
8666         i9xx_get_pfit_config(crtc, pipe_config);
8667
8668         if (INTEL_GEN(dev_priv) >= 4) {
8669                 /* No way to read it out on pipes B and C */
8670                 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
8671                         tmp = dev_priv->chv_dpll_md[crtc->pipe];
8672                 else
8673                         tmp = I915_READ(DPLL_MD(crtc->pipe));
8674                 pipe_config->pixel_multiplier =
8675                         ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
8676                          >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
8677                 pipe_config->dpll_hw_state.dpll_md = tmp;
8678         } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
8679                    IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
8680                 tmp = I915_READ(DPLL(crtc->pipe));
8681                 pipe_config->pixel_multiplier =
8682                         ((tmp & SDVO_MULTIPLIER_MASK)
8683                          >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
8684         } else {
8685                 /* Note that on i915G/GM the pixel multiplier is in the sdvo
8686                  * port and will be fixed up in the encoder->get_config
8687                  * function. */
8688                 pipe_config->pixel_multiplier = 1;
8689         }
8690         pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
8691         if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
8692                 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
8693                 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
8694         } else {
8695                 /* Mask out read-only status bits. */
8696                 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
8697                                                      DPLL_PORTC_READY_MASK |
8698                                                      DPLL_PORTB_READY_MASK);
8699         }
8700
8701         if (IS_CHERRYVIEW(dev_priv))
8702                 chv_crtc_clock_get(crtc, pipe_config);
8703         else if (IS_VALLEYVIEW(dev_priv))
8704                 vlv_crtc_clock_get(crtc, pipe_config);
8705         else
8706                 i9xx_crtc_clock_get(crtc, pipe_config);
8707
8708         /*
8709          * Normally the dotclock is filled in by the encoder .get_config()
8710          * but in case the pipe is enabled w/o any ports we need a sane
8711          * default.
8712          */
8713         pipe_config->base.adjusted_mode.crtc_clock =
8714                 pipe_config->port_clock / pipe_config->pixel_multiplier;
8715
8716         ret = true;
8717
8718 out:
8719         intel_display_power_put(dev_priv, power_domain, wakeref);
8720
8721         return ret;
8722 }
8723
8724 static void ironlake_init_pch_refclk(struct drm_i915_private *dev_priv)
8725 {
8726         struct intel_encoder *encoder;
8727         int i;
8728         u32 val, final;
8729         bool has_lvds = false;
8730         bool has_cpu_edp = false;
8731         bool has_panel = false;
8732         bool has_ck505 = false;
8733         bool can_ssc = false;
8734         bool using_ssc_source = false;
8735
8736         /* We need to take the global config into account */
8737         for_each_intel_encoder(&dev_priv->drm, encoder) {
8738                 switch (encoder->type) {
8739                 case INTEL_OUTPUT_LVDS:
8740                         has_panel = true;
8741                         has_lvds = true;
8742                         break;
8743                 case INTEL_OUTPUT_EDP:
8744                         has_panel = true;
8745                         if (encoder->port == PORT_A)
8746                                 has_cpu_edp = true;
8747                         break;
8748                 default:
8749                         break;
8750                 }
8751         }
8752
8753         if (HAS_PCH_IBX(dev_priv)) {
8754                 has_ck505 = dev_priv->vbt.display_clock_mode;
8755                 can_ssc = has_ck505;
8756         } else {
8757                 has_ck505 = false;
8758                 can_ssc = true;
8759         }
8760
8761         /* Check if any DPLLs are using the SSC source */
8762         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
8763                 u32 temp = I915_READ(PCH_DPLL(i));
8764
8765                 if (!(temp & DPLL_VCO_ENABLE))
8766                         continue;
8767
8768                 if ((temp & PLL_REF_INPUT_MASK) ==
8769                     PLLB_REF_INPUT_SPREADSPECTRUMIN) {
8770                         using_ssc_source = true;
8771                         break;
8772                 }
8773         }
8774
8775         DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
8776                       has_panel, has_lvds, has_ck505, using_ssc_source);
8777
8778         /* Ironlake: try to setup display ref clock before DPLL
8779          * enabling. This is only under driver's control after
8780          * PCH B stepping, previous chipset stepping should be
8781          * ignoring this setting.
8782          */
8783         val = I915_READ(PCH_DREF_CONTROL);
8784
8785         /* As we must carefully and slowly disable/enable each source in turn,
8786          * compute the final state we want first and check if we need to
8787          * make any changes at all.
8788          */
8789         final = val;
8790         final &= ~DREF_NONSPREAD_SOURCE_MASK;
8791         if (has_ck505)
8792                 final |= DREF_NONSPREAD_CK505_ENABLE;
8793         else
8794                 final |= DREF_NONSPREAD_SOURCE_ENABLE;
8795
8796         final &= ~DREF_SSC_SOURCE_MASK;
8797         final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8798         final &= ~DREF_SSC1_ENABLE;
8799
8800         if (has_panel) {
8801                 final |= DREF_SSC_SOURCE_ENABLE;
8802
8803                 if (intel_panel_use_ssc(dev_priv) && can_ssc)
8804                         final |= DREF_SSC1_ENABLE;
8805
8806                 if (has_cpu_edp) {
8807                         if (intel_panel_use_ssc(dev_priv) && can_ssc)
8808                                 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
8809                         else
8810                                 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
8811                 } else
8812                         final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8813         } else if (using_ssc_source) {
8814                 final |= DREF_SSC_SOURCE_ENABLE;
8815                 final |= DREF_SSC1_ENABLE;
8816         }
8817
8818         if (final == val)
8819                 return;
8820
8821         /* Always enable nonspread source */
8822         val &= ~DREF_NONSPREAD_SOURCE_MASK;
8823
8824         if (has_ck505)
8825                 val |= DREF_NONSPREAD_CK505_ENABLE;
8826         else
8827                 val |= DREF_NONSPREAD_SOURCE_ENABLE;
8828
8829         if (has_panel) {
8830                 val &= ~DREF_SSC_SOURCE_MASK;
8831                 val |= DREF_SSC_SOURCE_ENABLE;
8832
8833                 /* SSC must be turned on before enabling the CPU output  */
8834                 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
8835                         DRM_DEBUG_KMS("Using SSC on panel\n");
8836                         val |= DREF_SSC1_ENABLE;
8837                 } else
8838                         val &= ~DREF_SSC1_ENABLE;
8839
8840                 /* Get SSC going before enabling the outputs */
8841                 I915_WRITE(PCH_DREF_CONTROL, val);
8842                 POSTING_READ(PCH_DREF_CONTROL);
8843                 udelay(200);
8844
8845                 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8846
8847                 /* Enable CPU source on CPU attached eDP */
8848                 if (has_cpu_edp) {
8849                         if (intel_panel_use_ssc(dev_priv) && can_ssc) {
8850                                 DRM_DEBUG_KMS("Using SSC on eDP\n");
8851                                 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
8852                         } else
8853                                 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
8854                 } else
8855                         val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8856
8857                 I915_WRITE(PCH_DREF_CONTROL, val);
8858                 POSTING_READ(PCH_DREF_CONTROL);
8859                 udelay(200);
8860         } else {
8861                 DRM_DEBUG_KMS("Disabling CPU source output\n");
8862
8863                 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8864
8865                 /* Turn off CPU output */
8866                 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8867
8868                 I915_WRITE(PCH_DREF_CONTROL, val);
8869                 POSTING_READ(PCH_DREF_CONTROL);
8870                 udelay(200);
8871
8872                 if (!using_ssc_source) {
8873                         DRM_DEBUG_KMS("Disabling SSC source\n");
8874
8875                         /* Turn off the SSC source */
8876                         val &= ~DREF_SSC_SOURCE_MASK;
8877                         val |= DREF_SSC_SOURCE_DISABLE;
8878
8879                         /* Turn off SSC1 */
8880                         val &= ~DREF_SSC1_ENABLE;
8881
8882                         I915_WRITE(PCH_DREF_CONTROL, val);
8883                         POSTING_READ(PCH_DREF_CONTROL);
8884                         udelay(200);
8885                 }
8886         }
8887
8888         BUG_ON(val != final);
8889 }
8890
8891 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
8892 {
8893         u32 tmp;
8894
8895         tmp = I915_READ(SOUTH_CHICKEN2);
8896         tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
8897         I915_WRITE(SOUTH_CHICKEN2, tmp);
8898
8899         if (wait_for_us(I915_READ(SOUTH_CHICKEN2) &
8900                         FDI_MPHY_IOSFSB_RESET_STATUS, 100))
8901                 DRM_ERROR("FDI mPHY reset assert timeout\n");
8902
8903         tmp = I915_READ(SOUTH_CHICKEN2);
8904         tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
8905         I915_WRITE(SOUTH_CHICKEN2, tmp);
8906
8907         if (wait_for_us((I915_READ(SOUTH_CHICKEN2) &
8908                          FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
8909                 DRM_ERROR("FDI mPHY reset de-assert timeout\n");
8910 }
8911
8912 /* WaMPhyProgramming:hsw */
8913 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
8914 {
8915         u32 tmp;
8916
8917         tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
8918         tmp &= ~(0xFF << 24);
8919         tmp |= (0x12 << 24);
8920         intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
8921
8922         tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
8923         tmp |= (1 << 11);
8924         intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
8925
8926         tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
8927         tmp |= (1 << 11);
8928         intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
8929
8930         tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
8931         tmp |= (1 << 24) | (1 << 21) | (1 << 18);
8932         intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
8933
8934         tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
8935         tmp |= (1 << 24) | (1 << 21) | (1 << 18);
8936         intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
8937
8938         tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
8939         tmp &= ~(7 << 13);
8940         tmp |= (5 << 13);
8941         intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
8942
8943         tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
8944         tmp &= ~(7 << 13);
8945         tmp |= (5 << 13);
8946         intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
8947
8948         tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
8949         tmp &= ~0xFF;
8950         tmp |= 0x1C;
8951         intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
8952
8953         tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
8954         tmp &= ~0xFF;
8955         tmp |= 0x1C;
8956         intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
8957
8958         tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
8959         tmp &= ~(0xFF << 16);
8960         tmp |= (0x1C << 16);
8961         intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
8962
8963         tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
8964         tmp &= ~(0xFF << 16);
8965         tmp |= (0x1C << 16);
8966         intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
8967
8968         tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
8969         tmp |= (1 << 27);
8970         intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
8971
8972         tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
8973         tmp |= (1 << 27);
8974         intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
8975
8976         tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
8977         tmp &= ~(0xF << 28);
8978         tmp |= (4 << 28);
8979         intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
8980
8981         tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
8982         tmp &= ~(0xF << 28);
8983         tmp |= (4 << 28);
8984         intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
8985 }
8986
8987 /* Implements 3 different sequences from BSpec chapter "Display iCLK
8988  * Programming" based on the parameters passed:
8989  * - Sequence to enable CLKOUT_DP
8990  * - Sequence to enable CLKOUT_DP without spread
8991  * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
8992  */
8993 static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
8994                                  bool with_spread, bool with_fdi)
8995 {
8996         u32 reg, tmp;
8997
8998         if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
8999                 with_spread = true;
9000         if (WARN(HAS_PCH_LPT_LP(dev_priv) &&
9001             with_fdi, "LP PCH doesn't have FDI\n"))
9002                 with_fdi = false;
9003
9004         mutex_lock(&dev_priv->sb_lock);
9005
9006         tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
9007         tmp &= ~SBI_SSCCTL_DISABLE;
9008         tmp |= SBI_SSCCTL_PATHALT;
9009         intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9010
9011         udelay(24);
9012
9013         if (with_spread) {
9014                 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
9015                 tmp &= ~SBI_SSCCTL_PATHALT;
9016                 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9017
9018                 if (with_fdi) {
9019                         lpt_reset_fdi_mphy(dev_priv);
9020                         lpt_program_fdi_mphy(dev_priv);
9021                 }
9022         }
9023
9024         reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
9025         tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
9026         tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
9027         intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
9028
9029         mutex_unlock(&dev_priv->sb_lock);
9030 }
9031
9032 /* Sequence to disable CLKOUT_DP */
9033 void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
9034 {
9035         u32 reg, tmp;
9036
9037         mutex_lock(&dev_priv->sb_lock);
9038
9039         reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
9040         tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
9041         tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
9042         intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
9043
9044         tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
9045         if (!(tmp & SBI_SSCCTL_DISABLE)) {
9046                 if (!(tmp & SBI_SSCCTL_PATHALT)) {
9047                         tmp |= SBI_SSCCTL_PATHALT;
9048                         intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9049                         udelay(32);
9050                 }
9051                 tmp |= SBI_SSCCTL_DISABLE;
9052                 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9053         }
9054
9055         mutex_unlock(&dev_priv->sb_lock);
9056 }
9057
9058 #define BEND_IDX(steps) ((50 + (steps)) / 5)
9059
9060 static const u16 sscdivintphase[] = {
9061         [BEND_IDX( 50)] = 0x3B23,
9062         [BEND_IDX( 45)] = 0x3B23,
9063         [BEND_IDX( 40)] = 0x3C23,
9064         [BEND_IDX( 35)] = 0x3C23,
9065         [BEND_IDX( 30)] = 0x3D23,
9066         [BEND_IDX( 25)] = 0x3D23,
9067         [BEND_IDX( 20)] = 0x3E23,
9068         [BEND_IDX( 15)] = 0x3E23,
9069         [BEND_IDX( 10)] = 0x3F23,
9070         [BEND_IDX(  5)] = 0x3F23,
9071         [BEND_IDX(  0)] = 0x0025,
9072         [BEND_IDX( -5)] = 0x0025,
9073         [BEND_IDX(-10)] = 0x0125,
9074         [BEND_IDX(-15)] = 0x0125,
9075         [BEND_IDX(-20)] = 0x0225,
9076         [BEND_IDX(-25)] = 0x0225,
9077         [BEND_IDX(-30)] = 0x0325,
9078         [BEND_IDX(-35)] = 0x0325,
9079         [BEND_IDX(-40)] = 0x0425,
9080         [BEND_IDX(-45)] = 0x0425,
9081         [BEND_IDX(-50)] = 0x0525,
9082 };
9083
9084 /*
9085  * Bend CLKOUT_DP
9086  * steps -50 to 50 inclusive, in steps of 5
9087  * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
9088  * change in clock period = -(steps / 10) * 5.787 ps
9089  */
9090 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
9091 {
9092         u32 tmp;
9093         int idx = BEND_IDX(steps);
9094
9095         if (WARN_ON(steps % 5 != 0))
9096                 return;
9097
9098         if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase)))
9099                 return;
9100
9101         mutex_lock(&dev_priv->sb_lock);
9102
9103         if (steps % 10 != 0)
9104                 tmp = 0xAAAAAAAB;
9105         else
9106                 tmp = 0x00000000;
9107         intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
9108
9109         tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
9110         tmp &= 0xffff0000;
9111         tmp |= sscdivintphase[idx];
9112         intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
9113
9114         mutex_unlock(&dev_priv->sb_lock);
9115 }
9116
9117 #undef BEND_IDX
9118
9119 static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv)
9120 {
9121         u32 fuse_strap = I915_READ(FUSE_STRAP);
9122         u32 ctl = I915_READ(SPLL_CTL);
9123
9124         if ((ctl & SPLL_PLL_ENABLE) == 0)
9125                 return false;
9126
9127         if ((ctl & SPLL_REF_MASK) == SPLL_REF_MUXED_SSC &&
9128             (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
9129                 return true;
9130
9131         if (IS_BROADWELL(dev_priv) &&
9132             (ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW)
9133                 return true;
9134
9135         return false;
9136 }
9137
9138 static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
9139                                enum intel_dpll_id id)
9140 {
9141         u32 fuse_strap = I915_READ(FUSE_STRAP);
9142         u32 ctl = I915_READ(WRPLL_CTL(id));
9143
9144         if ((ctl & WRPLL_PLL_ENABLE) == 0)
9145                 return false;
9146
9147         if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC)
9148                 return true;
9149
9150         if ((IS_BROADWELL(dev_priv) || IS_HSW_ULT(dev_priv)) &&
9151             (ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW &&
9152             (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
9153                 return true;
9154
9155         return false;
9156 }
9157
9158 static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
9159 {
9160         struct intel_encoder *encoder;
9161         bool pch_ssc_in_use = false;
9162         bool has_fdi = false;
9163
9164         for_each_intel_encoder(&dev_priv->drm, encoder) {
9165                 switch (encoder->type) {
9166                 case INTEL_OUTPUT_ANALOG:
9167                         has_fdi = true;
9168                         break;
9169                 default:
9170                         break;
9171                 }
9172         }
9173
9174         /*
9175          * The BIOS may have decided to use the PCH SSC
9176          * reference so we must not disable it until the
9177          * relevant PLLs have stopped relying on it. We'll
9178          * just leave the PCH SSC reference enabled in case
9179          * any active PLL is using it. It will get disabled
9180          * after runtime suspend if we don't have FDI.
9181          *
9182          * TODO: Move the whole reference clock handling
9183          * to the modeset sequence proper so that we can
9184          * actually enable/disable/reconfigure these things
9185          * safely. To do that we need to introduce a real
9186          * clock hierarchy. That would also allow us to do
9187          * clock bending finally.
9188          */
9189         if (spll_uses_pch_ssc(dev_priv)) {
9190                 DRM_DEBUG_KMS("SPLL using PCH SSC\n");
9191                 pch_ssc_in_use = true;
9192         }
9193
9194         if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) {
9195                 DRM_DEBUG_KMS("WRPLL1 using PCH SSC\n");
9196                 pch_ssc_in_use = true;
9197         }
9198
9199         if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) {
9200                 DRM_DEBUG_KMS("WRPLL2 using PCH SSC\n");
9201                 pch_ssc_in_use = true;
9202         }
9203
9204         if (pch_ssc_in_use)
9205                 return;
9206
9207         if (has_fdi) {
9208                 lpt_bend_clkout_dp(dev_priv, 0);
9209                 lpt_enable_clkout_dp(dev_priv, true, true);
9210         } else {
9211                 lpt_disable_clkout_dp(dev_priv);
9212         }
9213 }
9214
9215 /*
9216  * Initialize reference clocks when the driver loads
9217  */
9218 void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
9219 {
9220         if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
9221                 ironlake_init_pch_refclk(dev_priv);
9222         else if (HAS_PCH_LPT(dev_priv))
9223                 lpt_init_pch_refclk(dev_priv);
9224 }
9225
9226 static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state)
9227 {
9228         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
9229         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9230         enum pipe pipe = crtc->pipe;
9231         u32 val;
9232
9233         val = 0;
9234
9235         switch (crtc_state->pipe_bpp) {
9236         case 18:
9237                 val |= PIPECONF_6BPC;
9238                 break;
9239         case 24:
9240                 val |= PIPECONF_8BPC;
9241                 break;
9242         case 30:
9243                 val |= PIPECONF_10BPC;
9244                 break;
9245         case 36:
9246                 val |= PIPECONF_12BPC;
9247                 break;
9248         default:
9249                 /* Case prevented by intel_choose_pipe_bpp_dither. */
9250                 BUG();
9251         }
9252
9253         if (crtc_state->dither)
9254                 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
9255
9256         if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
9257                 val |= PIPECONF_INTERLACED_ILK;
9258         else
9259                 val |= PIPECONF_PROGRESSIVE;
9260
9261         if (crtc_state->limited_color_range)
9262                 val |= PIPECONF_COLOR_RANGE_SELECT;
9263
9264         val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
9265
9266         I915_WRITE(PIPECONF(pipe), val);
9267         POSTING_READ(PIPECONF(pipe));
9268 }
9269
9270 static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state)
9271 {
9272         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
9273         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9274         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
9275         u32 val = 0;
9276
9277         if (IS_HASWELL(dev_priv) && crtc_state->dither)
9278                 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
9279
9280         if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
9281                 val |= PIPECONF_INTERLACED_ILK;
9282         else
9283                 val |= PIPECONF_PROGRESSIVE;
9284
9285         I915_WRITE(PIPECONF(cpu_transcoder), val);
9286         POSTING_READ(PIPECONF(cpu_transcoder));
9287 }
9288
9289 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
9290 {
9291         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
9292         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9293         u32 val = 0;
9294
9295         switch (crtc_state->pipe_bpp) {
9296         case 18:
9297                 val |= PIPEMISC_DITHER_6_BPC;
9298                 break;
9299         case 24:
9300                 val |= PIPEMISC_DITHER_8_BPC;
9301                 break;
9302         case 30:
9303                 val |= PIPEMISC_DITHER_10_BPC;
9304                 break;
9305         case 36:
9306                 val |= PIPEMISC_DITHER_12_BPC;
9307                 break;
9308         default:
9309                 MISSING_CASE(crtc_state->pipe_bpp);
9310                 break;
9311         }
9312
9313         if (crtc_state->dither)
9314                 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
9315
9316         if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
9317             crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
9318                 val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
9319
9320         if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
9321                 val |= PIPEMISC_YUV420_ENABLE |
9322                         PIPEMISC_YUV420_MODE_FULL_BLEND;
9323
9324         if (INTEL_GEN(dev_priv) >= 11 &&
9325             (crtc_state->active_planes & ~(icl_hdr_plane_mask() |
9326                                            BIT(PLANE_CURSOR))) == 0)
9327                 val |= PIPEMISC_HDR_MODE_PRECISION;
9328
9329         I915_WRITE(PIPEMISC(crtc->pipe), val);
9330 }
9331
9332 int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
9333 {
9334         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9335         u32 tmp;
9336
9337         tmp = I915_READ(PIPEMISC(crtc->pipe));
9338
9339         switch (tmp & PIPEMISC_DITHER_BPC_MASK) {
9340         case PIPEMISC_DITHER_6_BPC:
9341                 return 18;
9342         case PIPEMISC_DITHER_8_BPC:
9343                 return 24;
9344         case PIPEMISC_DITHER_10_BPC:
9345                 return 30;
9346         case PIPEMISC_DITHER_12_BPC:
9347                 return 36;
9348         default:
9349                 MISSING_CASE(tmp);
9350                 return 0;
9351         }
9352 }
9353
9354 int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
9355 {
9356         /*
9357          * Account for spread spectrum to avoid
9358          * oversubscribing the link. Max center spread
9359          * is 2.5%; use 5% for safety's sake.
9360          */
9361         u32 bps = target_clock * bpp * 21 / 20;
9362         return DIV_ROUND_UP(bps, link_bw * 8);
9363 }
9364
9365 static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
9366 {
9367         return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
9368 }
9369
9370 static void ironlake_compute_dpll(struct intel_crtc *crtc,
9371                                   struct intel_crtc_state *crtc_state,
9372                                   struct dpll *reduced_clock)
9373 {
9374         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9375         u32 dpll, fp, fp2;
9376         int factor;
9377
9378         /* Enable autotuning of the PLL clock (if permissible) */
9379         factor = 21;
9380         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
9381                 if ((intel_panel_use_ssc(dev_priv) &&
9382                      dev_priv->vbt.lvds_ssc_freq == 100000) ||
9383                     (HAS_PCH_IBX(dev_priv) &&
9384                      intel_is_dual_link_lvds(dev_priv)))
9385                         factor = 25;
9386         } else if (crtc_state->sdvo_tv_clock) {
9387                 factor = 20;
9388         }
9389
9390         fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
9391
9392         if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor))
9393                 fp |= FP_CB_TUNE;
9394
9395         if (reduced_clock) {
9396                 fp2 = i9xx_dpll_compute_fp(reduced_clock);
9397
9398                 if (reduced_clock->m < factor * reduced_clock->n)
9399                         fp2 |= FP_CB_TUNE;
9400         } else {
9401                 fp2 = fp;
9402         }
9403
9404         dpll = 0;
9405
9406         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
9407                 dpll |= DPLLB_MODE_LVDS;
9408         else
9409                 dpll |= DPLLB_MODE_DAC_SERIAL;
9410
9411         dpll |= (crtc_state->pixel_multiplier - 1)
9412                 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
9413
9414         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
9415             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
9416                 dpll |= DPLL_SDVO_HIGH_SPEED;
9417
9418         if (intel_crtc_has_dp_encoder(crtc_state))
9419                 dpll |= DPLL_SDVO_HIGH_SPEED;
9420
9421         /*
9422          * The high speed IO clock is only really required for
9423          * SDVO/HDMI/DP, but we also enable it for CRT to make it
9424          * possible to share the DPLL between CRT and HDMI. Enabling
9425          * the clock needlessly does no real harm, except use up a
9426          * bit of power potentially.
9427          *
9428          * We'll limit this to IVB with 3 pipes, since it has only two
9429          * DPLLs and so DPLL sharing is the only way to get three pipes
9430          * driving PCH ports at the same time. On SNB we could do this,
9431          * and potentially avoid enabling the second DPLL, but it's not
9432          * clear if it''s a win or loss power wise. No point in doing
9433          * this on ILK at all since it has a fixed DPLL<->pipe mapping.
9434          */
9435         if (INTEL_INFO(dev_priv)->num_pipes == 3 &&
9436             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
9437                 dpll |= DPLL_SDVO_HIGH_SPEED;
9438
9439         /* compute bitmask from p1 value */
9440         dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
9441         /* also FPA1 */
9442         dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
9443
9444         switch (crtc_state->dpll.p2) {
9445         case 5:
9446                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
9447                 break;
9448         case 7:
9449                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
9450                 break;
9451         case 10:
9452                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
9453                 break;
9454         case 14:
9455                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
9456                 break;
9457         }
9458
9459         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
9460             intel_panel_use_ssc(dev_priv))
9461                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
9462         else
9463                 dpll |= PLL_REF_INPUT_DREFCLK;
9464
9465         dpll |= DPLL_VCO_ENABLE;
9466
9467         crtc_state->dpll_hw_state.dpll = dpll;
9468         crtc_state->dpll_hw_state.fp0 = fp;
9469         crtc_state->dpll_hw_state.fp1 = fp2;
9470 }
9471
9472 static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
9473                                        struct intel_crtc_state *crtc_state)
9474 {
9475         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9476         const struct intel_limit *limit;
9477         int refclk = 120000;
9478
9479         memset(&crtc_state->dpll_hw_state, 0,
9480                sizeof(crtc_state->dpll_hw_state));
9481
9482         /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
9483         if (!crtc_state->has_pch_encoder)
9484                 return 0;
9485
9486         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
9487                 if (intel_panel_use_ssc(dev_priv)) {
9488                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
9489                                       dev_priv->vbt.lvds_ssc_freq);
9490                         refclk = dev_priv->vbt.lvds_ssc_freq;
9491                 }
9492
9493                 if (intel_is_dual_link_lvds(dev_priv)) {
9494                         if (refclk == 100000)
9495                                 limit = &intel_limits_ironlake_dual_lvds_100m;
9496                         else
9497                                 limit = &intel_limits_ironlake_dual_lvds;
9498                 } else {
9499                         if (refclk == 100000)
9500                                 limit = &intel_limits_ironlake_single_lvds_100m;
9501                         else
9502                                 limit = &intel_limits_ironlake_single_lvds;
9503                 }
9504         } else {
9505                 limit = &intel_limits_ironlake_dac;
9506         }
9507
9508         if (!crtc_state->clock_set &&
9509             !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
9510                                 refclk, NULL, &crtc_state->dpll)) {
9511                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
9512                 return -EINVAL;
9513         }
9514
9515         ironlake_compute_dpll(crtc, crtc_state, NULL);
9516
9517         if (!intel_get_shared_dpll(crtc_state, NULL)) {
9518                 DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
9519                               pipe_name(crtc->pipe));
9520                 return -EINVAL;
9521         }
9522
9523         return 0;
9524 }
9525
9526 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
9527                                          struct intel_link_m_n *m_n)
9528 {
9529         struct drm_device *dev = crtc->base.dev;
9530         struct drm_i915_private *dev_priv = to_i915(dev);
9531         enum pipe pipe = crtc->pipe;
9532
9533         m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
9534         m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
9535         m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
9536                 & ~TU_SIZE_MASK;
9537         m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
9538         m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
9539                     & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9540 }
9541
9542 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
9543                                          enum transcoder transcoder,
9544                                          struct intel_link_m_n *m_n,
9545                                          struct intel_link_m_n *m2_n2)
9546 {
9547         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9548         enum pipe pipe = crtc->pipe;
9549
9550         if (INTEL_GEN(dev_priv) >= 5) {
9551                 m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
9552                 m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
9553                 m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
9554                         & ~TU_SIZE_MASK;
9555                 m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
9556                 m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
9557                             & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9558
9559                 if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) {
9560                         m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
9561                         m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder));
9562                         m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder))
9563                                         & ~TU_SIZE_MASK;
9564                         m2_n2->gmch_n = I915_READ(PIPE_DATA_N2(transcoder));
9565                         m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder))
9566                                         & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9567                 }
9568         } else {
9569                 m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
9570                 m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
9571                 m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
9572                         & ~TU_SIZE_MASK;
9573                 m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
9574                 m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
9575                             & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9576         }
9577 }
9578
9579 void intel_dp_get_m_n(struct intel_crtc *crtc,
9580                       struct intel_crtc_state *pipe_config)
9581 {
9582         if (pipe_config->has_pch_encoder)
9583                 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
9584         else
9585                 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
9586                                              &pipe_config->dp_m_n,
9587                                              &pipe_config->dp_m2_n2);
9588 }
9589
9590 static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
9591                                         struct intel_crtc_state *pipe_config)
9592 {
9593         intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
9594                                      &pipe_config->fdi_m_n, NULL);
9595 }
9596
9597 static void skylake_get_pfit_config(struct intel_crtc *crtc,
9598                                     struct intel_crtc_state *pipe_config)
9599 {
9600         struct drm_device *dev = crtc->base.dev;
9601         struct drm_i915_private *dev_priv = to_i915(dev);
9602         struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
9603         u32 ps_ctrl = 0;
9604         int id = -1;
9605         int i;
9606
9607         /* find scaler attached to this pipe */
9608         for (i = 0; i < crtc->num_scalers; i++) {
9609                 ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i));
9610                 if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) {
9611                         id = i;
9612                         pipe_config->pch_pfit.enabled = true;
9613                         pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i));
9614                         pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i));
9615                         scaler_state->scalers[i].in_use = true;
9616                         break;
9617                 }
9618         }
9619
9620         scaler_state->scaler_id = id;
9621         if (id >= 0) {
9622                 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
9623         } else {
9624                 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
9625         }
9626 }
9627
9628 static void
9629 skylake_get_initial_plane_config(struct intel_crtc *crtc,
9630                                  struct intel_initial_plane_config *plane_config)
9631 {
9632         struct drm_device *dev = crtc->base.dev;
9633         struct drm_i915_private *dev_priv = to_i915(dev);
9634         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
9635         enum plane_id plane_id = plane->id;
9636         enum pipe pipe;
9637         u32 val, base, offset, stride_mult, tiling, alpha;
9638         int fourcc, pixel_format;
9639         unsigned int aligned_height;
9640         struct drm_framebuffer *fb;
9641         struct intel_framebuffer *intel_fb;
9642
9643         if (!plane->get_hw_state(plane, &pipe))
9644                 return;
9645
9646         WARN_ON(pipe != crtc->pipe);
9647
9648         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
9649         if (!intel_fb) {
9650                 DRM_DEBUG_KMS("failed to alloc fb\n");
9651                 return;
9652         }
9653
9654         fb = &intel_fb->base;
9655
9656         fb->dev = dev;
9657
9658         val = I915_READ(PLANE_CTL(pipe, plane_id));
9659
9660         if (INTEL_GEN(dev_priv) >= 11)
9661                 pixel_format = val & ICL_PLANE_CTL_FORMAT_MASK;
9662         else
9663                 pixel_format = val & PLANE_CTL_FORMAT_MASK;
9664
9665         if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
9666                 alpha = I915_READ(PLANE_COLOR_CTL(pipe, plane_id));
9667                 alpha &= PLANE_COLOR_ALPHA_MASK;
9668         } else {
9669                 alpha = val & PLANE_CTL_ALPHA_MASK;
9670         }
9671
9672         fourcc = skl_format_to_fourcc(pixel_format,
9673                                       val & PLANE_CTL_ORDER_RGBX, alpha);
9674         fb->format = drm_format_info(fourcc);
9675
9676         tiling = val & PLANE_CTL_TILED_MASK;
9677         switch (tiling) {
9678         case PLANE_CTL_TILED_LINEAR:
9679                 fb->modifier = DRM_FORMAT_MOD_LINEAR;
9680                 break;
9681         case PLANE_CTL_TILED_X:
9682                 plane_config->tiling = I915_TILING_X;
9683                 fb->modifier = I915_FORMAT_MOD_X_TILED;
9684                 break;
9685         case PLANE_CTL_TILED_Y:
9686                 plane_config->tiling = I915_TILING_Y;
9687                 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
9688                         fb->modifier = I915_FORMAT_MOD_Y_TILED_CCS;
9689                 else
9690                         fb->modifier = I915_FORMAT_MOD_Y_TILED;
9691                 break;
9692         case PLANE_CTL_TILED_YF:
9693                 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
9694                         fb->modifier = I915_FORMAT_MOD_Yf_TILED_CCS;
9695                 else
9696                         fb->modifier = I915_FORMAT_MOD_Yf_TILED;
9697                 break;
9698         default:
9699                 MISSING_CASE(tiling);
9700                 goto error;
9701         }
9702
9703         /*
9704          * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
9705          * while i915 HW rotation is clockwise, thats why this swapping.
9706          */
9707         switch (val & PLANE_CTL_ROTATE_MASK) {
9708         case PLANE_CTL_ROTATE_0:
9709                 plane_config->rotation = DRM_MODE_ROTATE_0;
9710                 break;
9711         case PLANE_CTL_ROTATE_90:
9712                 plane_config->rotation = DRM_MODE_ROTATE_270;
9713                 break;
9714         case PLANE_CTL_ROTATE_180:
9715                 plane_config->rotation = DRM_MODE_ROTATE_180;
9716                 break;
9717         case PLANE_CTL_ROTATE_270:
9718                 plane_config->rotation = DRM_MODE_ROTATE_90;
9719                 break;
9720         }
9721
9722         if (INTEL_GEN(dev_priv) >= 10 &&
9723             val & PLANE_CTL_FLIP_HORIZONTAL)
9724                 plane_config->rotation |= DRM_MODE_REFLECT_X;
9725
9726         base = I915_READ(PLANE_SURF(pipe, plane_id)) & 0xfffff000;
9727         plane_config->base = base;
9728
9729         offset = I915_READ(PLANE_OFFSET(pipe, plane_id));
9730
9731         val = I915_READ(PLANE_SIZE(pipe, plane_id));
9732         fb->height = ((val >> 16) & 0xfff) + 1;
9733         fb->width = ((val >> 0) & 0x1fff) + 1;
9734
9735         val = I915_READ(PLANE_STRIDE(pipe, plane_id));
9736         stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0);
9737         fb->pitches[0] = (val & 0x3ff) * stride_mult;
9738
9739         aligned_height = intel_fb_align_height(fb, 0, fb->height);
9740
9741         plane_config->size = fb->pitches[0] * aligned_height;
9742
9743         DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
9744                       crtc->base.name, plane->base.name, fb->width, fb->height,
9745                       fb->format->cpp[0] * 8, base, fb->pitches[0],
9746                       plane_config->size);
9747
9748         plane_config->fb = intel_fb;
9749         return;
9750
9751 error:
9752         kfree(intel_fb);
9753 }
9754
9755 static void ironlake_get_pfit_config(struct intel_crtc *crtc,
9756                                      struct intel_crtc_state *pipe_config)
9757 {
9758         struct drm_device *dev = crtc->base.dev;
9759         struct drm_i915_private *dev_priv = to_i915(dev);
9760         u32 tmp;
9761
9762         tmp = I915_READ(PF_CTL(crtc->pipe));
9763
9764         if (tmp & PF_ENABLE) {
9765                 pipe_config->pch_pfit.enabled = true;
9766                 pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
9767                 pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
9768
9769                 /* We currently do not free assignements of panel fitters on
9770                  * ivb/hsw (since we don't use the higher upscaling modes which
9771                  * differentiates them) so just WARN about this case for now. */
9772                 if (IS_GEN(dev_priv, 7)) {
9773                         WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
9774                                 PF_PIPE_SEL_IVB(crtc->pipe));
9775                 }
9776         }
9777 }
9778
9779 static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
9780                                      struct intel_crtc_state *pipe_config)
9781 {
9782         struct drm_device *dev = crtc->base.dev;
9783         struct drm_i915_private *dev_priv = to_i915(dev);
9784         enum intel_display_power_domain power_domain;
9785         intel_wakeref_t wakeref;
9786         u32 tmp;
9787         bool ret;
9788
9789         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
9790         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
9791         if (!wakeref)
9792                 return false;
9793
9794         pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
9795         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9796         pipe_config->shared_dpll = NULL;
9797
9798         ret = false;
9799         tmp = I915_READ(PIPECONF(crtc->pipe));
9800         if (!(tmp & PIPECONF_ENABLE))
9801                 goto out;
9802
9803         switch (tmp & PIPECONF_BPC_MASK) {
9804         case PIPECONF_6BPC:
9805                 pipe_config->pipe_bpp = 18;
9806                 break;
9807         case PIPECONF_8BPC:
9808                 pipe_config->pipe_bpp = 24;
9809                 break;
9810         case PIPECONF_10BPC:
9811                 pipe_config->pipe_bpp = 30;
9812                 break;
9813         case PIPECONF_12BPC:
9814                 pipe_config->pipe_bpp = 36;
9815                 break;
9816         default:
9817                 break;
9818         }
9819
9820         if (tmp & PIPECONF_COLOR_RANGE_SELECT)
9821                 pipe_config->limited_color_range = true;
9822
9823         pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >>
9824                 PIPECONF_GAMMA_MODE_SHIFT;
9825
9826         pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe));
9827
9828         i9xx_get_pipe_color_config(pipe_config);
9829         intel_color_get_config(pipe_config);
9830
9831         if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
9832                 struct intel_shared_dpll *pll;
9833                 enum intel_dpll_id pll_id;
9834
9835                 pipe_config->has_pch_encoder = true;
9836
9837                 tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
9838                 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
9839                                           FDI_DP_PORT_WIDTH_SHIFT) + 1;
9840
9841                 ironlake_get_fdi_m_n_config(crtc, pipe_config);
9842
9843                 if (HAS_PCH_IBX(dev_priv)) {
9844                         /*
9845                          * The pipe->pch transcoder and pch transcoder->pll
9846                          * mapping is fixed.
9847                          */
9848                         pll_id = (enum intel_dpll_id) crtc->pipe;
9849                 } else {
9850                         tmp = I915_READ(PCH_DPLL_SEL);
9851                         if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
9852                                 pll_id = DPLL_ID_PCH_PLL_B;
9853                         else
9854                                 pll_id= DPLL_ID_PCH_PLL_A;
9855                 }
9856
9857                 pipe_config->shared_dpll =
9858                         intel_get_shared_dpll_by_id(dev_priv, pll_id);
9859                 pll = pipe_config->shared_dpll;
9860
9861                 WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
9862                                                 &pipe_config->dpll_hw_state));
9863
9864                 tmp = pipe_config->dpll_hw_state.dpll;
9865                 pipe_config->pixel_multiplier =
9866                         ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
9867                          >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
9868
9869                 ironlake_pch_clock_get(crtc, pipe_config);
9870         } else {
9871                 pipe_config->pixel_multiplier = 1;
9872         }
9873
9874         intel_get_pipe_timings(crtc, pipe_config);
9875         intel_get_pipe_src_size(crtc, pipe_config);
9876
9877         ironlake_get_pfit_config(crtc, pipe_config);
9878
9879         ret = true;
9880
9881 out:
9882         intel_display_power_put(dev_priv, power_domain, wakeref);
9883
9884         return ret;
9885 }
9886 static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
9887                                       struct intel_crtc_state *crtc_state)
9888 {
9889         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9890         struct intel_atomic_state *state =
9891                 to_intel_atomic_state(crtc_state->base.state);
9892
9893         if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) ||
9894             INTEL_GEN(dev_priv) >= 11) {
9895                 struct intel_encoder *encoder =
9896                         intel_get_crtc_new_encoder(state, crtc_state);
9897
9898                 if (!intel_get_shared_dpll(crtc_state, encoder)) {
9899                         DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
9900                                       pipe_name(crtc->pipe));
9901                         return -EINVAL;
9902                 }
9903         }
9904
9905         return 0;
9906 }
9907
9908 static void cannonlake_get_ddi_pll(struct drm_i915_private *dev_priv,
9909                                    enum port port,
9910                                    struct intel_crtc_state *pipe_config)
9911 {
9912         enum intel_dpll_id id;
9913         u32 temp;
9914
9915         temp = I915_READ(DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
9916         id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
9917
9918         if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL2))
9919                 return;
9920
9921         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9922 }
9923
9924 static void icelake_get_ddi_pll(struct drm_i915_private *dev_priv,
9925                                 enum port port,
9926                                 struct intel_crtc_state *pipe_config)
9927 {
9928         enum intel_dpll_id id;
9929         u32 temp;
9930
9931         if (intel_port_is_combophy(dev_priv, port)) {
9932                 temp = I915_READ(DPCLKA_CFGCR0_ICL) &
9933                        DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
9934                 id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
9935         } else if (intel_port_is_tc(dev_priv, port)) {
9936                 u32 clk_sel = I915_READ(DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK;
9937
9938                 if (clk_sel == DDI_CLK_SEL_MG) {
9939                         id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
9940                                                                     port));
9941                 } else {
9942                         WARN_ON(clk_sel < DDI_CLK_SEL_TBT_162);
9943                         id = DPLL_ID_ICL_TBTPLL;
9944                 }
9945         } else {
9946                 WARN(1, "Invalid port %x\n", port);
9947                 return;
9948         }
9949
9950         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9951 }
9952
9953 static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
9954                                 enum port port,
9955                                 struct intel_crtc_state *pipe_config)
9956 {
9957         enum intel_dpll_id id;
9958
9959         switch (port) {
9960         case PORT_A:
9961                 id = DPLL_ID_SKL_DPLL0;
9962                 break;
9963         case PORT_B:
9964                 id = DPLL_ID_SKL_DPLL1;
9965                 break;
9966         case PORT_C:
9967                 id = DPLL_ID_SKL_DPLL2;
9968                 break;
9969         default:
9970                 DRM_ERROR("Incorrect port type\n");
9971                 return;
9972         }
9973
9974         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9975 }
9976
9977 static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
9978                                 enum port port,
9979                                 struct intel_crtc_state *pipe_config)
9980 {
9981         enum intel_dpll_id id;
9982         u32 temp;
9983
9984         temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
9985         id = temp >> (port * 3 + 1);
9986
9987         if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL3))
9988                 return;
9989
9990         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9991 }
9992
9993 static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
9994                                 enum port port,
9995                                 struct intel_crtc_state *pipe_config)
9996 {
9997         enum intel_dpll_id id;
9998         u32 ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
9999
10000         switch (ddi_pll_sel) {
10001         case PORT_CLK_SEL_WRPLL1:
10002                 id = DPLL_ID_WRPLL1;
10003                 break;
10004         case PORT_CLK_SEL_WRPLL2:
10005                 id = DPLL_ID_WRPLL2;
10006                 break;
10007         case PORT_CLK_SEL_SPLL:
10008                 id = DPLL_ID_SPLL;
10009                 break;
10010         case PORT_CLK_SEL_LCPLL_810:
10011                 id = DPLL_ID_LCPLL_810;
10012                 break;
10013         case PORT_CLK_SEL_LCPLL_1350:
10014                 id = DPLL_ID_LCPLL_1350;
10015                 break;
10016         case PORT_CLK_SEL_LCPLL_2700:
10017                 id = DPLL_ID_LCPLL_2700;
10018                 break;
10019         default:
10020                 MISSING_CASE(ddi_pll_sel);
10021                 /* fall through */
10022         case PORT_CLK_SEL_NONE:
10023                 return;
10024         }
10025
10026         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
10027 }
10028
10029 static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
10030                                      struct intel_crtc_state *pipe_config,
10031                                      u64 *power_domain_mask,
10032                                      intel_wakeref_t *wakerefs)
10033 {
10034         struct drm_device *dev = crtc->base.dev;
10035         struct drm_i915_private *dev_priv = to_i915(dev);
10036         enum intel_display_power_domain power_domain;
10037         unsigned long panel_transcoder_mask = 0;
10038         unsigned long enabled_panel_transcoders = 0;
10039         enum transcoder panel_transcoder;
10040         intel_wakeref_t wf;
10041         u32 tmp;
10042
10043         if (INTEL_GEN(dev_priv) >= 11)
10044                 panel_transcoder_mask |=
10045                         BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
10046
10047         if (HAS_TRANSCODER_EDP(dev_priv))
10048                 panel_transcoder_mask |= BIT(TRANSCODER_EDP);
10049
10050         /*
10051          * The pipe->transcoder mapping is fixed with the exception of the eDP
10052          * and DSI transcoders handled below.
10053          */
10054         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
10055
10056         /*
10057          * XXX: Do intel_display_power_get_if_enabled before reading this (for
10058          * consistency and less surprising code; it's in always on power).
10059          */
10060         for_each_set_bit(panel_transcoder,
10061                          &panel_transcoder_mask,
10062                          ARRAY_SIZE(INTEL_INFO(dev_priv)->trans_offsets)) {
10063                 bool force_thru = false;
10064                 enum pipe trans_pipe;
10065
10066                 tmp = I915_READ(TRANS_DDI_FUNC_CTL(panel_transcoder));
10067                 if (!(tmp & TRANS_DDI_FUNC_ENABLE))
10068                         continue;
10069
10070                 /*
10071                  * Log all enabled ones, only use the first one.
10072                  *
10073                  * FIXME: This won't work for two separate DSI displays.
10074                  */
10075                 enabled_panel_transcoders |= BIT(panel_transcoder);
10076                 if (enabled_panel_transcoders != BIT(panel_transcoder))
10077                         continue;
10078
10079                 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
10080                 default:
10081                         WARN(1, "unknown pipe linked to transcoder %s\n",
10082                              transcoder_name(panel_transcoder));
10083                         /* fall through */
10084                 case TRANS_DDI_EDP_INPUT_A_ONOFF:
10085                         force_thru = true;
10086                         /* fall through */
10087                 case TRANS_DDI_EDP_INPUT_A_ON:
10088                         trans_pipe = PIPE_A;
10089                         break;
10090                 case TRANS_DDI_EDP_INPUT_B_ONOFF:
10091                         trans_pipe = PIPE_B;
10092                         break;
10093                 case TRANS_DDI_EDP_INPUT_C_ONOFF:
10094                         trans_pipe = PIPE_C;
10095                         break;
10096                 }
10097
10098                 if (trans_pipe == crtc->pipe) {
10099                         pipe_config->cpu_transcoder = panel_transcoder;
10100                         pipe_config->pch_pfit.force_thru = force_thru;
10101                 }
10102         }
10103
10104         /*
10105          * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1
10106          */
10107         WARN_ON((enabled_panel_transcoders & BIT(TRANSCODER_EDP)) &&
10108                 enabled_panel_transcoders != BIT(TRANSCODER_EDP));
10109
10110         power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
10111         WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
10112
10113         wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
10114         if (!wf)
10115                 return false;
10116
10117         wakerefs[power_domain] = wf;
10118         *power_domain_mask |= BIT_ULL(power_domain);
10119
10120         tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
10121
10122         return tmp & PIPECONF_ENABLE;
10123 }
10124
10125 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
10126                                          struct intel_crtc_state *pipe_config,
10127                                          u64 *power_domain_mask,
10128                                          intel_wakeref_t *wakerefs)
10129 {
10130         struct drm_device *dev = crtc->base.dev;
10131         struct drm_i915_private *dev_priv = to_i915(dev);
10132         enum intel_display_power_domain power_domain;
10133         enum transcoder cpu_transcoder;
10134         intel_wakeref_t wf;
10135         enum port port;
10136         u32 tmp;
10137
10138         for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
10139                 if (port == PORT_A)
10140                         cpu_transcoder = TRANSCODER_DSI_A;
10141                 else
10142                         cpu_transcoder = TRANSCODER_DSI_C;
10143
10144                 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
10145                 WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
10146
10147                 wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
10148                 if (!wf)
10149                         continue;
10150
10151                 wakerefs[power_domain] = wf;
10152                 *power_domain_mask |= BIT_ULL(power_domain);
10153
10154                 /*
10155                  * The PLL needs to be enabled with a valid divider
10156                  * configuration, otherwise accessing DSI registers will hang
10157                  * the machine. See BSpec North Display Engine
10158                  * registers/MIPI[BXT]. We can break out here early, since we
10159                  * need the same DSI PLL to be enabled for both DSI ports.
10160                  */
10161                 if (!bxt_dsi_pll_is_enabled(dev_priv))
10162                         break;
10163
10164                 /* XXX: this works for video mode only */
10165                 tmp = I915_READ(BXT_MIPI_PORT_CTRL(port));
10166                 if (!(tmp & DPI_ENABLE))
10167                         continue;
10168
10169                 tmp = I915_READ(MIPI_CTRL(port));
10170                 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
10171                         continue;
10172
10173                 pipe_config->cpu_transcoder = cpu_transcoder;
10174                 break;
10175         }
10176
10177         return transcoder_is_dsi(pipe_config->cpu_transcoder);
10178 }
10179
10180 static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
10181                                        struct intel_crtc_state *pipe_config)
10182 {
10183         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10184         struct intel_shared_dpll *pll;
10185         enum port port;
10186         u32 tmp;
10187
10188         tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
10189
10190         port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
10191
10192         if (INTEL_GEN(dev_priv) >= 11)
10193                 icelake_get_ddi_pll(dev_priv, port, pipe_config);
10194         else if (IS_CANNONLAKE(dev_priv))
10195                 cannonlake_get_ddi_pll(dev_priv, port, pipe_config);
10196         else if (IS_GEN9_BC(dev_priv))
10197                 skylake_get_ddi_pll(dev_priv, port, pipe_config);
10198         else if (IS_GEN9_LP(dev_priv))
10199                 bxt_get_ddi_pll(dev_priv, port, pipe_config);
10200         else
10201                 haswell_get_ddi_pll(dev_priv, port, pipe_config);
10202
10203         pll = pipe_config->shared_dpll;
10204         if (pll) {
10205                 WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
10206                                                 &pipe_config->dpll_hw_state));
10207         }
10208
10209         /*
10210          * Haswell has only FDI/PCH transcoder A. It is which is connected to
10211          * DDI E. So just check whether this pipe is wired to DDI E and whether
10212          * the PCH transcoder is on.
10213          */
10214         if (INTEL_GEN(dev_priv) < 9 &&
10215             (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
10216                 pipe_config->has_pch_encoder = true;
10217
10218                 tmp = I915_READ(FDI_RX_CTL(PIPE_A));
10219                 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
10220                                           FDI_DP_PORT_WIDTH_SHIFT) + 1;
10221
10222                 ironlake_get_fdi_m_n_config(crtc, pipe_config);
10223         }
10224 }
10225
10226 static bool haswell_get_pipe_config(struct intel_crtc *crtc,
10227                                     struct intel_crtc_state *pipe_config)
10228 {
10229         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10230         intel_wakeref_t wakerefs[POWER_DOMAIN_NUM], wf;
10231         enum intel_display_power_domain power_domain;
10232         u64 power_domain_mask;
10233         bool active;
10234
10235         intel_crtc_init_scalers(crtc, pipe_config);
10236
10237         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
10238         wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
10239         if (!wf)
10240                 return false;
10241
10242         wakerefs[power_domain] = wf;
10243         power_domain_mask = BIT_ULL(power_domain);
10244
10245         pipe_config->shared_dpll = NULL;
10246
10247         active = hsw_get_transcoder_state(crtc, pipe_config,
10248                                           &power_domain_mask, wakerefs);
10249
10250         if (IS_GEN9_LP(dev_priv) &&
10251             bxt_get_dsi_transcoder_state(crtc, pipe_config,
10252                                          &power_domain_mask, wakerefs)) {
10253                 WARN_ON(active);
10254                 active = true;
10255         }
10256
10257         if (!active)
10258                 goto out;
10259
10260         if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
10261             INTEL_GEN(dev_priv) >= 11) {
10262                 haswell_get_ddi_port_state(crtc, pipe_config);
10263                 intel_get_pipe_timings(crtc, pipe_config);
10264         }
10265
10266         intel_get_pipe_src_size(crtc, pipe_config);
10267         intel_get_crtc_ycbcr_config(crtc, pipe_config);
10268
10269         pipe_config->gamma_mode = I915_READ(GAMMA_MODE(crtc->pipe));
10270
10271         pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe));
10272
10273         if (INTEL_GEN(dev_priv) >= 9) {
10274                 u32 tmp = I915_READ(SKL_BOTTOM_COLOR(crtc->pipe));
10275
10276                 if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
10277                         pipe_config->gamma_enable = true;
10278
10279                 if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE)
10280                         pipe_config->csc_enable = true;
10281         } else {
10282                 i9xx_get_pipe_color_config(pipe_config);
10283         }
10284
10285         intel_color_get_config(pipe_config);
10286
10287         power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
10288         WARN_ON(power_domain_mask & BIT_ULL(power_domain));
10289
10290         wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
10291         if (wf) {
10292                 wakerefs[power_domain] = wf;
10293                 power_domain_mask |= BIT_ULL(power_domain);
10294
10295                 if (INTEL_GEN(dev_priv) >= 9)
10296                         skylake_get_pfit_config(crtc, pipe_config);
10297                 else
10298                         ironlake_get_pfit_config(crtc, pipe_config);
10299         }
10300
10301         if (hsw_crtc_supports_ips(crtc)) {
10302                 if (IS_HASWELL(dev_priv))
10303                         pipe_config->ips_enabled = I915_READ(IPS_CTL) & IPS_ENABLE;
10304                 else {
10305                         /*
10306                          * We cannot readout IPS state on broadwell, set to
10307                          * true so we can set it to a defined state on first
10308                          * commit.
10309                          */
10310                         pipe_config->ips_enabled = true;
10311                 }
10312         }
10313
10314         if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
10315             !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
10316                 pipe_config->pixel_multiplier =
10317                         I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
10318         } else {
10319                 pipe_config->pixel_multiplier = 1;
10320         }
10321
10322 out:
10323         for_each_power_domain(power_domain, power_domain_mask)
10324                 intel_display_power_put(dev_priv,
10325                                         power_domain, wakerefs[power_domain]);
10326
10327         return active;
10328 }
10329
10330 static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
10331 {
10332         struct drm_i915_private *dev_priv =
10333                 to_i915(plane_state->base.plane->dev);
10334         const struct drm_framebuffer *fb = plane_state->base.fb;
10335         const struct drm_i915_gem_object *obj = intel_fb_obj(fb);
10336         u32 base;
10337
10338         if (INTEL_INFO(dev_priv)->display.cursor_needs_physical)
10339                 base = obj->phys_handle->busaddr;
10340         else
10341                 base = intel_plane_ggtt_offset(plane_state);
10342
10343         base += plane_state->color_plane[0].offset;
10344
10345         /* ILK+ do this automagically */
10346         if (HAS_GMCH(dev_priv) &&
10347             plane_state->base.rotation & DRM_MODE_ROTATE_180)
10348                 base += (plane_state->base.crtc_h *
10349                          plane_state->base.crtc_w - 1) * fb->format->cpp[0];
10350
10351         return base;
10352 }
10353
10354 static u32 intel_cursor_position(const struct intel_plane_state *plane_state)
10355 {
10356         int x = plane_state->base.crtc_x;
10357         int y = plane_state->base.crtc_y;
10358         u32 pos = 0;
10359
10360         if (x < 0) {
10361                 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
10362                 x = -x;
10363         }
10364         pos |= x << CURSOR_X_SHIFT;
10365
10366         if (y < 0) {
10367                 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
10368                 y = -y;
10369         }
10370         pos |= y << CURSOR_Y_SHIFT;
10371
10372         return pos;
10373 }
10374
10375 static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state)
10376 {
10377         const struct drm_mode_config *config =
10378                 &plane_state->base.plane->dev->mode_config;
10379         int width = plane_state->base.crtc_w;
10380         int height = plane_state->base.crtc_h;
10381
10382         return width > 0 && width <= config->cursor_width &&
10383                 height > 0 && height <= config->cursor_height;
10384 }
10385
10386 static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
10387 {
10388         int src_x, src_y;
10389         u32 offset;
10390         int ret;
10391
10392         ret = intel_plane_compute_gtt(plane_state);
10393         if (ret)
10394                 return ret;
10395
10396         if (!plane_state->base.visible)
10397                 return 0;
10398
10399         src_x = plane_state->base.src_x >> 16;
10400         src_y = plane_state->base.src_y >> 16;
10401
10402         intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
10403         offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
10404                                                     plane_state, 0);
10405
10406         if (src_x != 0 || src_y != 0) {
10407                 DRM_DEBUG_KMS("Arbitrary cursor panning not supported\n");
10408                 return -EINVAL;
10409         }
10410
10411         plane_state->color_plane[0].offset = offset;
10412
10413         return 0;
10414 }
10415
10416 static int intel_check_cursor(struct intel_crtc_state *crtc_state,
10417                               struct intel_plane_state *plane_state)
10418 {
10419         const struct drm_framebuffer *fb = plane_state->base.fb;
10420         int ret;
10421
10422         if (fb && fb->modifier != DRM_FORMAT_MOD_LINEAR) {
10423                 DRM_DEBUG_KMS("cursor cannot be tiled\n");
10424                 return -EINVAL;
10425         }
10426
10427         ret = drm_atomic_helper_check_plane_state(&plane_state->base,
10428                                                   &crtc_state->base,
10429                                                   DRM_PLANE_HELPER_NO_SCALING,
10430                                                   DRM_PLANE_HELPER_NO_SCALING,
10431                                                   true, true);
10432         if (ret)
10433                 return ret;
10434
10435         ret = intel_cursor_check_surface(plane_state);
10436         if (ret)
10437                 return ret;
10438
10439         if (!plane_state->base.visible)
10440                 return 0;
10441
10442         ret = intel_plane_check_src_coordinates(plane_state);
10443         if (ret)
10444                 return ret;
10445
10446         return 0;
10447 }
10448
10449 static unsigned int
10450 i845_cursor_max_stride(struct intel_plane *plane,
10451                        u32 pixel_format, u64 modifier,
10452                        unsigned int rotation)
10453 {
10454         return 2048;
10455 }
10456
10457 static u32 i845_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
10458 {
10459         u32 cntl = 0;
10460
10461         if (crtc_state->gamma_enable)
10462                 cntl |= CURSOR_GAMMA_ENABLE;
10463
10464         return cntl;
10465 }
10466
10467 static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
10468                            const struct intel_plane_state *plane_state)
10469 {
10470         return CURSOR_ENABLE |
10471                 CURSOR_FORMAT_ARGB |
10472                 CURSOR_STRIDE(plane_state->color_plane[0].stride);
10473 }
10474
10475 static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state)
10476 {
10477         int width = plane_state->base.crtc_w;
10478
10479         /*
10480          * 845g/865g are only limited by the width of their cursors,
10481          * the height is arbitrary up to the precision of the register.
10482          */
10483         return intel_cursor_size_ok(plane_state) && IS_ALIGNED(width, 64);
10484 }
10485
10486 static int i845_check_cursor(struct intel_crtc_state *crtc_state,
10487                              struct intel_plane_state *plane_state)
10488 {
10489         const struct drm_framebuffer *fb = plane_state->base.fb;
10490         int ret;
10491
10492         ret = intel_check_cursor(crtc_state, plane_state);
10493         if (ret)
10494                 return ret;
10495
10496         /* if we want to turn off the cursor ignore width and height */
10497         if (!fb)
10498                 return 0;
10499
10500         /* Check for which cursor types we support */
10501         if (!i845_cursor_size_ok(plane_state)) {
10502                 DRM_DEBUG("Cursor dimension %dx%d not supported\n",
10503                           plane_state->base.crtc_w,
10504                           plane_state->base.crtc_h);
10505                 return -EINVAL;
10506         }
10507
10508         WARN_ON(plane_state->base.visible &&
10509                 plane_state->color_plane[0].stride != fb->pitches[0]);
10510
10511         switch (fb->pitches[0]) {
10512         case 256:
10513         case 512:
10514         case 1024:
10515         case 2048:
10516                 break;
10517         default:
10518                 DRM_DEBUG_KMS("Invalid cursor stride (%u)\n",
10519                               fb->pitches[0]);
10520                 return -EINVAL;
10521         }
10522
10523         plane_state->ctl = i845_cursor_ctl(crtc_state, plane_state);
10524
10525         return 0;
10526 }
10527
10528 static void i845_update_cursor(struct intel_plane *plane,
10529                                const struct intel_crtc_state *crtc_state,
10530                                const struct intel_plane_state *plane_state)
10531 {
10532         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10533         u32 cntl = 0, base = 0, pos = 0, size = 0;
10534         unsigned long irqflags;
10535
10536         if (plane_state && plane_state->base.visible) {
10537                 unsigned int width = plane_state->base.crtc_w;
10538                 unsigned int height = plane_state->base.crtc_h;
10539
10540                 cntl = plane_state->ctl |
10541                         i845_cursor_ctl_crtc(crtc_state);
10542
10543                 size = (height << 12) | width;
10544
10545                 base = intel_cursor_base(plane_state);
10546                 pos = intel_cursor_position(plane_state);
10547         }
10548
10549         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
10550
10551         /* On these chipsets we can only modify the base/size/stride
10552          * whilst the cursor is disabled.
10553          */
10554         if (plane->cursor.base != base ||
10555             plane->cursor.size != size ||
10556             plane->cursor.cntl != cntl) {
10557                 I915_WRITE_FW(CURCNTR(PIPE_A), 0);
10558                 I915_WRITE_FW(CURBASE(PIPE_A), base);
10559                 I915_WRITE_FW(CURSIZE, size);
10560                 I915_WRITE_FW(CURPOS(PIPE_A), pos);
10561                 I915_WRITE_FW(CURCNTR(PIPE_A), cntl);
10562
10563                 plane->cursor.base = base;
10564                 plane->cursor.size = size;
10565                 plane->cursor.cntl = cntl;
10566         } else {
10567                 I915_WRITE_FW(CURPOS(PIPE_A), pos);
10568         }
10569
10570         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
10571 }
10572
10573 static void i845_disable_cursor(struct intel_plane *plane,
10574                                 const struct intel_crtc_state *crtc_state)
10575 {
10576         i845_update_cursor(plane, crtc_state, NULL);
10577 }
10578
10579 static bool i845_cursor_get_hw_state(struct intel_plane *plane,
10580                                      enum pipe *pipe)
10581 {
10582         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10583         enum intel_display_power_domain power_domain;
10584         intel_wakeref_t wakeref;
10585         bool ret;
10586
10587         power_domain = POWER_DOMAIN_PIPE(PIPE_A);
10588         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
10589         if (!wakeref)
10590                 return false;
10591
10592         ret = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
10593
10594         *pipe = PIPE_A;
10595
10596         intel_display_power_put(dev_priv, power_domain, wakeref);
10597
10598         return ret;
10599 }
10600
10601 static unsigned int
10602 i9xx_cursor_max_stride(struct intel_plane *plane,
10603                        u32 pixel_format, u64 modifier,
10604                        unsigned int rotation)
10605 {
10606         return plane->base.dev->mode_config.cursor_width * 4;
10607 }
10608
10609 static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
10610 {
10611         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
10612         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10613         u32 cntl = 0;
10614
10615         if (INTEL_GEN(dev_priv) >= 11)
10616                 return cntl;
10617
10618         if (crtc_state->gamma_enable)
10619                 cntl = MCURSOR_GAMMA_ENABLE;
10620
10621         if (crtc_state->csc_enable)
10622                 cntl |= MCURSOR_PIPE_CSC_ENABLE;
10623
10624         if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
10625                 cntl |= MCURSOR_PIPE_SELECT(crtc->pipe);
10626
10627         return cntl;
10628 }
10629
10630 static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
10631                            const struct intel_plane_state *plane_state)
10632 {
10633         struct drm_i915_private *dev_priv =
10634                 to_i915(plane_state->base.plane->dev);
10635         u32 cntl = 0;
10636
10637         if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
10638                 cntl |= MCURSOR_TRICKLE_FEED_DISABLE;
10639
10640         switch (plane_state->base.crtc_w) {
10641         case 64:
10642                 cntl |= MCURSOR_MODE_64_ARGB_AX;
10643                 break;
10644         case 128:
10645                 cntl |= MCURSOR_MODE_128_ARGB_AX;
10646                 break;
10647         case 256:
10648                 cntl |= MCURSOR_MODE_256_ARGB_AX;
10649                 break;
10650         default:
10651                 MISSING_CASE(plane_state->base.crtc_w);
10652                 return 0;
10653         }
10654
10655         if (plane_state->base.rotation & DRM_MODE_ROTATE_180)
10656                 cntl |= MCURSOR_ROTATE_180;
10657
10658         return cntl;
10659 }
10660
10661 static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
10662 {
10663         struct drm_i915_private *dev_priv =
10664                 to_i915(plane_state->base.plane->dev);
10665         int width = plane_state->base.crtc_w;
10666         int height = plane_state->base.crtc_h;
10667
10668         if (!intel_cursor_size_ok(plane_state))
10669                 return false;
10670
10671         /* Cursor width is limited to a few power-of-two sizes */
10672         switch (width) {
10673         case 256:
10674         case 128:
10675         case 64:
10676                 break;
10677         default:
10678                 return false;
10679         }
10680
10681         /*
10682          * IVB+ have CUR_FBC_CTL which allows an arbitrary cursor
10683          * height from 8 lines up to the cursor width, when the
10684          * cursor is not rotated. Everything else requires square
10685          * cursors.
10686          */
10687         if (HAS_CUR_FBC(dev_priv) &&
10688             plane_state->base.rotation & DRM_MODE_ROTATE_0) {
10689                 if (height < 8 || height > width)
10690                         return false;
10691         } else {
10692                 if (height != width)
10693                         return false;
10694         }
10695
10696         return true;
10697 }
10698
10699 static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
10700                              struct intel_plane_state *plane_state)
10701 {
10702         struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
10703         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10704         const struct drm_framebuffer *fb = plane_state->base.fb;
10705         enum pipe pipe = plane->pipe;
10706         int ret;
10707
10708         ret = intel_check_cursor(crtc_state, plane_state);
10709         if (ret)
10710                 return ret;
10711
10712         /* if we want to turn off the cursor ignore width and height */
10713         if (!fb)
10714                 return 0;
10715
10716         /* Check for which cursor types we support */
10717         if (!i9xx_cursor_size_ok(plane_state)) {
10718                 DRM_DEBUG("Cursor dimension %dx%d not supported\n",
10719                           plane_state->base.crtc_w,
10720                           plane_state->base.crtc_h);
10721                 return -EINVAL;
10722         }
10723
10724         WARN_ON(plane_state->base.visible &&
10725                 plane_state->color_plane[0].stride != fb->pitches[0]);
10726
10727         if (fb->pitches[0] != plane_state->base.crtc_w * fb->format->cpp[0]) {
10728                 DRM_DEBUG_KMS("Invalid cursor stride (%u) (cursor width %d)\n",
10729                               fb->pitches[0], plane_state->base.crtc_w);
10730                 return -EINVAL;
10731         }
10732
10733         /*
10734          * There's something wrong with the cursor on CHV pipe C.
10735          * If it straddles the left edge of the screen then
10736          * moving it away from the edge or disabling it often
10737          * results in a pipe underrun, and often that can lead to
10738          * dead pipe (constant underrun reported, and it scans
10739          * out just a solid color). To recover from that, the
10740          * display power well must be turned off and on again.
10741          * Refuse the put the cursor into that compromised position.
10742          */
10743         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C &&
10744             plane_state->base.visible && plane_state->base.crtc_x < 0) {
10745                 DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
10746                 return -EINVAL;
10747         }
10748
10749         plane_state->ctl = i9xx_cursor_ctl(crtc_state, plane_state);
10750
10751         return 0;
10752 }
10753
10754 static void i9xx_update_cursor(struct intel_plane *plane,
10755                                const struct intel_crtc_state *crtc_state,
10756                                const struct intel_plane_state *plane_state)
10757 {
10758         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10759         enum pipe pipe = plane->pipe;
10760         u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0;
10761         unsigned long irqflags;
10762
10763         if (plane_state && plane_state->base.visible) {
10764                 cntl = plane_state->ctl |
10765                         i9xx_cursor_ctl_crtc(crtc_state);
10766
10767                 if (plane_state->base.crtc_h != plane_state->base.crtc_w)
10768                         fbc_ctl = CUR_FBC_CTL_EN | (plane_state->base.crtc_h - 1);
10769
10770                 base = intel_cursor_base(plane_state);
10771                 pos = intel_cursor_position(plane_state);
10772         }
10773
10774         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
10775
10776         /*
10777          * On some platforms writing CURCNTR first will also
10778          * cause CURPOS to be armed by the CURBASE write.
10779          * Without the CURCNTR write the CURPOS write would
10780          * arm itself. Thus we always update CURCNTR before
10781          * CURPOS.
10782          *
10783          * On other platforms CURPOS always requires the
10784          * CURBASE write to arm the update. Additonally
10785          * a write to any of the cursor register will cancel
10786          * an already armed cursor update. Thus leaving out
10787          * the CURBASE write after CURPOS could lead to a
10788          * cursor that doesn't appear to move, or even change
10789          * shape. Thus we always write CURBASE.
10790          *
10791          * The other registers are armed by by the CURBASE write
10792          * except when the plane is getting enabled at which time
10793          * the CURCNTR write arms the update.
10794          */
10795
10796         if (INTEL_GEN(dev_priv) >= 9)
10797                 skl_write_cursor_wm(plane, crtc_state);
10798
10799         if (plane->cursor.base != base ||
10800             plane->cursor.size != fbc_ctl ||
10801             plane->cursor.cntl != cntl) {
10802                 if (HAS_CUR_FBC(dev_priv))
10803                         I915_WRITE_FW(CUR_FBC_CTL(pipe), fbc_ctl);
10804                 I915_WRITE_FW(CURCNTR(pipe), cntl);
10805                 I915_WRITE_FW(CURPOS(pipe), pos);
10806                 I915_WRITE_FW(CURBASE(pipe), base);
10807
10808                 plane->cursor.base = base;
10809                 plane->cursor.size = fbc_ctl;
10810                 plane->cursor.cntl = cntl;
10811         } else {
10812                 I915_WRITE_FW(CURPOS(pipe), pos);
10813                 I915_WRITE_FW(CURBASE(pipe), base);
10814         }
10815
10816         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
10817 }
10818
10819 static void i9xx_disable_cursor(struct intel_plane *plane,
10820                                 const struct intel_crtc_state *crtc_state)
10821 {
10822         i9xx_update_cursor(plane, crtc_state, NULL);
10823 }
10824
10825 static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
10826                                      enum pipe *pipe)
10827 {
10828         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10829         enum intel_display_power_domain power_domain;
10830         intel_wakeref_t wakeref;
10831         bool ret;
10832         u32 val;
10833
10834         /*
10835          * Not 100% correct for planes that can move between pipes,
10836          * but that's only the case for gen2-3 which don't have any
10837          * display power wells.
10838          */
10839         power_domain = POWER_DOMAIN_PIPE(plane->pipe);
10840         wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
10841         if (!wakeref)
10842                 return false;
10843
10844         val = I915_READ(CURCNTR(plane->pipe));
10845
10846         ret = val & MCURSOR_MODE;
10847
10848         if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
10849                 *pipe = plane->pipe;
10850         else
10851                 *pipe = (val & MCURSOR_PIPE_SELECT_MASK) >>
10852                         MCURSOR_PIPE_SELECT_SHIFT;
10853
10854         intel_display_power_put(dev_priv, power_domain, wakeref);
10855
10856         return ret;
10857 }
10858
10859 /* VESA 640x480x72Hz mode to set on the pipe */
10860 static const struct drm_display_mode load_detect_mode = {
10861         DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
10862                  704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
10863 };
10864
10865 struct drm_framebuffer *
10866 intel_framebuffer_create(struct drm_i915_gem_object *obj,
10867                          struct drm_mode_fb_cmd2 *mode_cmd)
10868 {
10869         struct intel_framebuffer *intel_fb;
10870         int ret;
10871
10872         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
10873         if (!intel_fb)
10874                 return ERR_PTR(-ENOMEM);
10875
10876         ret = intel_framebuffer_init(intel_fb, obj, mode_cmd);
10877         if (ret)
10878                 goto err;
10879
10880         return &intel_fb->base;
10881
10882 err:
10883         kfree(intel_fb);
10884         return ERR_PTR(ret);
10885 }
10886
10887 static int intel_modeset_disable_planes(struct drm_atomic_state *state,
10888                                         struct drm_crtc *crtc)
10889 {
10890         struct drm_plane *plane;
10891         struct drm_plane_state *plane_state;
10892         int ret, i;
10893
10894         ret = drm_atomic_add_affected_planes(state, crtc);
10895         if (ret)
10896                 return ret;
10897
10898         for_each_new_plane_in_state(state, plane, plane_state, i) {
10899                 if (plane_state->crtc != crtc)
10900                         continue;
10901
10902                 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
10903                 if (ret)
10904                         return ret;
10905
10906                 drm_atomic_set_fb_for_plane(plane_state, NULL);
10907         }
10908
10909         return 0;
10910 }
10911
10912 int intel_get_load_detect_pipe(struct drm_connector *connector,
10913                                const struct drm_display_mode *mode,
10914                                struct intel_load_detect_pipe *old,
10915                                struct drm_modeset_acquire_ctx *ctx)
10916 {
10917         struct intel_crtc *intel_crtc;
10918         struct intel_encoder *intel_encoder =
10919                 intel_attached_encoder(connector);
10920         struct drm_crtc *possible_crtc;
10921         struct drm_encoder *encoder = &intel_encoder->base;
10922         struct drm_crtc *crtc = NULL;
10923         struct drm_device *dev = encoder->dev;
10924         struct drm_i915_private *dev_priv = to_i915(dev);
10925         struct drm_mode_config *config = &dev->mode_config;
10926         struct drm_atomic_state *state = NULL, *restore_state = NULL;
10927         struct drm_connector_state *connector_state;
10928         struct intel_crtc_state *crtc_state;
10929         int ret, i = -1;
10930
10931         DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
10932                       connector->base.id, connector->name,
10933                       encoder->base.id, encoder->name);
10934
10935         old->restore_state = NULL;
10936
10937         WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
10938
10939         /*
10940          * Algorithm gets a little messy:
10941          *
10942          *   - if the connector already has an assigned crtc, use it (but make
10943          *     sure it's on first)
10944          *
10945          *   - try to find the first unused crtc that can drive this connector,
10946          *     and use that if we find one
10947          */
10948
10949         /* See if we already have a CRTC for this connector */
10950         if (connector->state->crtc) {
10951                 crtc = connector->state->crtc;
10952
10953                 ret = drm_modeset_lock(&crtc->mutex, ctx);
10954                 if (ret)
10955                         goto fail;
10956
10957                 /* Make sure the crtc and connector are running */
10958                 goto found;
10959         }
10960
10961         /* Find an unused one (if possible) */
10962         for_each_crtc(dev, possible_crtc) {
10963                 i++;
10964                 if (!(encoder->possible_crtcs & (1 << i)))
10965                         continue;
10966
10967                 ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
10968                 if (ret)
10969                         goto fail;
10970
10971                 if (possible_crtc->state->enable) {
10972                         drm_modeset_unlock(&possible_crtc->mutex);
10973                         continue;
10974                 }
10975
10976                 crtc = possible_crtc;
10977                 break;
10978         }
10979
10980         /*
10981          * If we didn't find an unused CRTC, don't use any.
10982          */
10983         if (!crtc) {
10984                 DRM_DEBUG_KMS("no pipe available for load-detect\n");
10985                 ret = -ENODEV;
10986                 goto fail;
10987         }
10988
10989 found:
10990         intel_crtc = to_intel_crtc(crtc);
10991
10992         state = drm_atomic_state_alloc(dev);
10993         restore_state = drm_atomic_state_alloc(dev);
10994         if (!state || !restore_state) {
10995                 ret = -ENOMEM;
10996                 goto fail;
10997         }
10998
10999         state->acquire_ctx = ctx;
11000         restore_state->acquire_ctx = ctx;
11001
11002         connector_state = drm_atomic_get_connector_state(state, connector);
11003         if (IS_ERR(connector_state)) {
11004                 ret = PTR_ERR(connector_state);
11005                 goto fail;
11006         }
11007
11008         ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
11009         if (ret)
11010                 goto fail;
11011
11012         crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
11013         if (IS_ERR(crtc_state)) {
11014                 ret = PTR_ERR(crtc_state);
11015                 goto fail;
11016         }
11017
11018         crtc_state->base.active = crtc_state->base.enable = true;
11019
11020         if (!mode)
11021                 mode = &load_detect_mode;
11022
11023         ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode);
11024         if (ret)
11025                 goto fail;
11026
11027         ret = intel_modeset_disable_planes(state, crtc);
11028         if (ret)
11029                 goto fail;
11030
11031         ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
11032         if (!ret)
11033                 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
11034         if (!ret)
11035                 ret = drm_atomic_add_affected_planes(restore_state, crtc);
11036         if (ret) {
11037                 DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret);
11038                 goto fail;
11039         }
11040
11041         ret = drm_atomic_commit(state);
11042         if (ret) {
11043                 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
11044                 goto fail;
11045         }
11046
11047         old->restore_state = restore_state;
11048         drm_atomic_state_put(state);
11049
11050         /* let the connector get through one full cycle before testing */
11051         intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
11052         return true;
11053
11054 fail:
11055         if (state) {
11056                 drm_atomic_state_put(state);
11057                 state = NULL;
11058         }
11059         if (restore_state) {
11060                 drm_atomic_state_put(restore_state);
11061                 restore_state = NULL;
11062         }
11063
11064         if (ret == -EDEADLK)
11065                 return ret;
11066
11067         return false;
11068 }
11069
11070 void intel_release_load_detect_pipe(struct drm_connector *connector,
11071                                     struct intel_load_detect_pipe *old,
11072                                     struct drm_modeset_acquire_ctx *ctx)
11073 {
11074         struct intel_encoder *intel_encoder =
11075                 intel_attached_encoder(connector);
11076         struct drm_encoder *encoder = &intel_encoder->base;
11077         struct drm_atomic_state *state = old->restore_state;
11078         int ret;
11079
11080         DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
11081                       connector->base.id, connector->name,
11082                       encoder->base.id, encoder->name);
11083
11084         if (!state)
11085                 return;
11086
11087         ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
11088         if (ret)
11089                 DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret);
11090         drm_atomic_state_put(state);
11091 }
11092
11093 static int i9xx_pll_refclk(struct drm_device *dev,
11094                            const struct intel_crtc_state *pipe_config)
11095 {
11096         struct drm_i915_private *dev_priv = to_i915(dev);
11097         u32 dpll = pipe_config->dpll_hw_state.dpll;
11098
11099         if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
11100                 return dev_priv->vbt.lvds_ssc_freq;
11101         else if (HAS_PCH_SPLIT(dev_priv))
11102                 return 120000;
11103         else if (!IS_GEN(dev_priv, 2))
11104                 return 96000;
11105         else
11106                 return 48000;
11107 }
11108
11109 /* Returns the clock of the currently programmed mode of the given pipe. */
11110 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
11111                                 struct intel_crtc_state *pipe_config)
11112 {
11113         struct drm_device *dev = crtc->base.dev;
11114         struct drm_i915_private *dev_priv = to_i915(dev);
11115         int pipe = pipe_config->cpu_transcoder;
11116         u32 dpll = pipe_config->dpll_hw_state.dpll;
11117         u32 fp;
11118         struct dpll clock;
11119         int port_clock;
11120         int refclk = i9xx_pll_refclk(dev, pipe_config);
11121
11122         if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
11123                 fp = pipe_config->dpll_hw_state.fp0;
11124         else
11125                 fp = pipe_config->dpll_hw_state.fp1;
11126
11127         clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
11128         if (IS_PINEVIEW(dev_priv)) {
11129                 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
11130                 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
11131         } else {
11132                 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
11133                 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
11134         }
11135
11136         if (!IS_GEN(dev_priv, 2)) {
11137                 if (IS_PINEVIEW(dev_priv))
11138                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
11139                                 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
11140                 else
11141                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
11142                                DPLL_FPA01_P1_POST_DIV_SHIFT);
11143
11144                 switch (dpll & DPLL_MODE_MASK) {
11145                 case DPLLB_MODE_DAC_SERIAL:
11146                         clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
11147                                 5 : 10;
11148                         break;
11149                 case DPLLB_MODE_LVDS:
11150                         clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
11151                                 7 : 14;
11152                         break;
11153                 default:
11154                         DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
11155                                   "mode\n", (int)(dpll & DPLL_MODE_MASK));
11156                         return;
11157                 }
11158
11159                 if (IS_PINEVIEW(dev_priv))
11160                         port_clock = pnv_calc_dpll_params(refclk, &clock);
11161                 else
11162                         port_clock = i9xx_calc_dpll_params(refclk, &clock);
11163         } else {
11164                 u32 lvds = IS_I830(dev_priv) ? 0 : I915_READ(LVDS);
11165                 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
11166
11167                 if (is_lvds) {
11168                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
11169                                        DPLL_FPA01_P1_POST_DIV_SHIFT);
11170
11171                         if (lvds & LVDS_CLKB_POWER_UP)
11172                                 clock.p2 = 7;
11173                         else
11174                                 clock.p2 = 14;
11175                 } else {
11176                         if (dpll & PLL_P1_DIVIDE_BY_TWO)
11177                                 clock.p1 = 2;
11178                         else {
11179                                 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
11180                                             DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
11181                         }
11182                         if (dpll & PLL_P2_DIVIDE_BY_4)
11183                                 clock.p2 = 4;
11184                         else
11185                                 clock.p2 = 2;
11186                 }
11187
11188                 port_clock = i9xx_calc_dpll_params(refclk, &clock);
11189         }
11190
11191         /*
11192          * This value includes pixel_multiplier. We will use
11193          * port_clock to compute adjusted_mode.crtc_clock in the
11194          * encoder's get_config() function.
11195          */
11196         pipe_config->port_clock = port_clock;
11197 }
11198
11199 int intel_dotclock_calculate(int link_freq,
11200                              const struct intel_link_m_n *m_n)
11201 {
11202         /*
11203          * The calculation for the data clock is:
11204          * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
11205          * But we want to avoid losing precison if possible, so:
11206          * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
11207          *
11208          * and the link clock is simpler:
11209          * link_clock = (m * link_clock) / n
11210          */
11211
11212         if (!m_n->link_n)
11213                 return 0;
11214
11215         return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
11216 }
11217
11218 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
11219                                    struct intel_crtc_state *pipe_config)
11220 {
11221         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11222
11223         /* read out port_clock from the DPLL */
11224         i9xx_crtc_clock_get(crtc, pipe_config);
11225
11226         /*
11227          * In case there is an active pipe without active ports,
11228          * we may need some idea for the dotclock anyway.
11229          * Calculate one based on the FDI configuration.
11230          */
11231         pipe_config->base.adjusted_mode.crtc_clock =
11232                 intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
11233                                          &pipe_config->fdi_m_n);
11234 }
11235
11236 /* Returns the currently programmed mode of the given encoder. */
11237 struct drm_display_mode *
11238 intel_encoder_current_mode(struct intel_encoder *encoder)
11239 {
11240         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
11241         struct intel_crtc_state *crtc_state;
11242         struct drm_display_mode *mode;
11243         struct intel_crtc *crtc;
11244         enum pipe pipe;
11245
11246         if (!encoder->get_hw_state(encoder, &pipe))
11247                 return NULL;
11248
11249         crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
11250
11251         mode = kzalloc(sizeof(*mode), GFP_KERNEL);
11252         if (!mode)
11253                 return NULL;
11254
11255         crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
11256         if (!crtc_state) {
11257                 kfree(mode);
11258                 return NULL;
11259         }
11260
11261         crtc_state->base.crtc = &crtc->base;
11262
11263         if (!dev_priv->display.get_pipe_config(crtc, crtc_state)) {
11264                 kfree(crtc_state);
11265                 kfree(mode);
11266                 return NULL;
11267         }
11268
11269         encoder->get_config(encoder, crtc_state);
11270
11271         intel_mode_from_pipe_config(mode, crtc_state);
11272
11273         kfree(crtc_state);
11274
11275         return mode;
11276 }
11277
11278 static void intel_crtc_destroy(struct drm_crtc *crtc)
11279 {
11280         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11281
11282         drm_crtc_cleanup(crtc);
11283         kfree(intel_crtc);
11284 }
11285
11286 /**
11287  * intel_wm_need_update - Check whether watermarks need updating
11288  * @cur: current plane state
11289  * @new: new plane state
11290  *
11291  * Check current plane state versus the new one to determine whether
11292  * watermarks need to be recalculated.
11293  *
11294  * Returns true or false.
11295  */
11296 static bool intel_wm_need_update(const struct intel_plane_state *cur,
11297                                  struct intel_plane_state *new)
11298 {
11299         /* Update watermarks on tiling or size changes. */
11300         if (new->base.visible != cur->base.visible)
11301                 return true;
11302
11303         if (!cur->base.fb || !new->base.fb)
11304                 return false;
11305
11306         if (cur->base.fb->modifier != new->base.fb->modifier ||
11307             cur->base.rotation != new->base.rotation ||
11308             drm_rect_width(&new->base.src) != drm_rect_width(&cur->base.src) ||
11309             drm_rect_height(&new->base.src) != drm_rect_height(&cur->base.src) ||
11310             drm_rect_width(&new->base.dst) != drm_rect_width(&cur->base.dst) ||
11311             drm_rect_height(&new->base.dst) != drm_rect_height(&cur->base.dst))
11312                 return true;
11313
11314         return false;
11315 }
11316
11317 static bool needs_scaling(const struct intel_plane_state *state)
11318 {
11319         int src_w = drm_rect_width(&state->base.src) >> 16;
11320         int src_h = drm_rect_height(&state->base.src) >> 16;
11321         int dst_w = drm_rect_width(&state->base.dst);
11322         int dst_h = drm_rect_height(&state->base.dst);
11323
11324         return (src_w != dst_w || src_h != dst_h);
11325 }
11326
11327 int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
11328                                     struct intel_crtc_state *crtc_state,
11329                                     const struct intel_plane_state *old_plane_state,
11330                                     struct intel_plane_state *plane_state)
11331 {
11332         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
11333         struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
11334         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11335         bool mode_changed = needs_modeset(crtc_state);
11336         bool was_crtc_enabled = old_crtc_state->base.active;
11337         bool is_crtc_enabled = crtc_state->base.active;
11338         bool turn_off, turn_on, visible, was_visible;
11339         struct drm_framebuffer *fb = plane_state->base.fb;
11340         int ret;
11341
11342         if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
11343                 ret = skl_update_scaler_plane(crtc_state, plane_state);
11344                 if (ret)
11345                         return ret;
11346         }
11347
11348         was_visible = old_plane_state->base.visible;
11349         visible = plane_state->base.visible;
11350
11351         if (!was_crtc_enabled && WARN_ON(was_visible))
11352                 was_visible = false;
11353
11354         /*
11355          * Visibility is calculated as if the crtc was on, but
11356          * after scaler setup everything depends on it being off
11357          * when the crtc isn't active.
11358          *
11359          * FIXME this is wrong for watermarks. Watermarks should also
11360          * be computed as if the pipe would be active. Perhaps move
11361          * per-plane wm computation to the .check_plane() hook, and
11362          * only combine the results from all planes in the current place?
11363          */
11364         if (!is_crtc_enabled) {
11365                 plane_state->base.visible = visible = false;
11366                 crtc_state->active_planes &= ~BIT(plane->id);
11367                 crtc_state->data_rate[plane->id] = 0;
11368         }
11369
11370         if (!was_visible && !visible)
11371                 return 0;
11372
11373         if (fb != old_plane_state->base.fb)
11374                 crtc_state->fb_changed = true;
11375
11376         turn_off = was_visible && (!visible || mode_changed);
11377         turn_on = visible && (!was_visible || mode_changed);
11378
11379         DRM_DEBUG_ATOMIC("[CRTC:%d:%s] has [PLANE:%d:%s] with fb %i\n",
11380                          crtc->base.base.id, crtc->base.name,
11381                          plane->base.base.id, plane->base.name,
11382                          fb ? fb->base.id : -1);
11383
11384         DRM_DEBUG_ATOMIC("[PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
11385                          plane->base.base.id, plane->base.name,
11386                          was_visible, visible,
11387                          turn_off, turn_on, mode_changed);
11388
11389         if (turn_on) {
11390                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
11391                         crtc_state->update_wm_pre = true;
11392
11393                 /* must disable cxsr around plane enable/disable */
11394                 if (plane->id != PLANE_CURSOR)
11395                         crtc_state->disable_cxsr = true;
11396         } else if (turn_off) {
11397                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
11398                         crtc_state->update_wm_post = true;
11399
11400                 /* must disable cxsr around plane enable/disable */
11401                 if (plane->id != PLANE_CURSOR)
11402                         crtc_state->disable_cxsr = true;
11403         } else if (intel_wm_need_update(old_plane_state, plane_state)) {
11404                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
11405                         /* FIXME bollocks */
11406                         crtc_state->update_wm_pre = true;
11407                         crtc_state->update_wm_post = true;
11408                 }
11409         }
11410
11411         if (visible || was_visible)
11412                 crtc_state->fb_bits |= plane->frontbuffer_bit;
11413
11414         /*
11415          * ILK/SNB DVSACNTR/Sprite Enable
11416          * IVB SPR_CTL/Sprite Enable
11417          * "When in Self Refresh Big FIFO mode, a write to enable the
11418          *  plane will be internally buffered and delayed while Big FIFO
11419          *  mode is exiting."
11420          *
11421          * Which means that enabling the sprite can take an extra frame
11422          * when we start in big FIFO mode (LP1+). Thus we need to drop
11423          * down to LP0 and wait for vblank in order to make sure the
11424          * sprite gets enabled on the next vblank after the register write.
11425          * Doing otherwise would risk enabling the sprite one frame after
11426          * we've already signalled flip completion. We can resume LP1+
11427          * once the sprite has been enabled.
11428          *
11429          *
11430          * WaCxSRDisabledForSpriteScaling:ivb
11431          * IVB SPR_SCALE/Scaling Enable
11432          * "Low Power watermarks must be disabled for at least one
11433          *  frame before enabling sprite scaling, and kept disabled
11434          *  until sprite scaling is disabled."
11435          *
11436          * ILK/SNB DVSASCALE/Scaling Enable
11437          * "When in Self Refresh Big FIFO mode, scaling enable will be
11438          *  masked off while Big FIFO mode is exiting."
11439          *
11440          * Despite the w/a only being listed for IVB we assume that
11441          * the ILK/SNB note has similar ramifications, hence we apply
11442          * the w/a on all three platforms.
11443          *
11444          * With experimental results seems this is needed also for primary
11445          * plane, not only sprite plane.
11446          */
11447         if (plane->id != PLANE_CURSOR &&
11448             (IS_GEN_RANGE(dev_priv, 5, 6) ||
11449              IS_IVYBRIDGE(dev_priv)) &&
11450             (turn_on || (!needs_scaling(old_plane_state) &&
11451                          needs_scaling(plane_state))))
11452                 crtc_state->disable_lp_wm = true;
11453
11454         return 0;
11455 }
11456
11457 static bool encoders_cloneable(const struct intel_encoder *a,
11458                                const struct intel_encoder *b)
11459 {
11460         /* masks could be asymmetric, so check both ways */
11461         return a == b || (a->cloneable & (1 << b->type) &&
11462                           b->cloneable & (1 << a->type));
11463 }
11464
11465 static bool check_single_encoder_cloning(struct drm_atomic_state *state,
11466                                          struct intel_crtc *crtc,
11467                                          struct intel_encoder *encoder)
11468 {
11469         struct intel_encoder *source_encoder;
11470         struct drm_connector *connector;
11471         struct drm_connector_state *connector_state;
11472         int i;
11473
11474         for_each_new_connector_in_state(state, connector, connector_state, i) {
11475                 if (connector_state->crtc != &crtc->base)
11476                         continue;
11477
11478                 source_encoder =
11479                         to_intel_encoder(connector_state->best_encoder);
11480                 if (!encoders_cloneable(encoder, source_encoder))
11481                         return false;
11482         }
11483
11484         return true;
11485 }
11486
11487 static int icl_add_linked_planes(struct intel_atomic_state *state)
11488 {
11489         struct intel_plane *plane, *linked;
11490         struct intel_plane_state *plane_state, *linked_plane_state;
11491         int i;
11492
11493         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
11494                 linked = plane_state->linked_plane;
11495
11496                 if (!linked)
11497                         continue;
11498
11499                 linked_plane_state = intel_atomic_get_plane_state(state, linked);
11500                 if (IS_ERR(linked_plane_state))
11501                         return PTR_ERR(linked_plane_state);
11502
11503                 WARN_ON(linked_plane_state->linked_plane != plane);
11504                 WARN_ON(linked_plane_state->slave == plane_state->slave);
11505         }
11506
11507         return 0;
11508 }
11509
11510 static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
11511 {
11512         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
11513         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11514         struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->base.state);
11515         struct intel_plane *plane, *linked;
11516         struct intel_plane_state *plane_state;
11517         int i;
11518
11519         if (INTEL_GEN(dev_priv) < 11)
11520                 return 0;
11521
11522         /*
11523          * Destroy all old plane links and make the slave plane invisible
11524          * in the crtc_state->active_planes mask.
11525          */
11526         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
11527                 if (plane->pipe != crtc->pipe || !plane_state->linked_plane)
11528                         continue;
11529
11530                 plane_state->linked_plane = NULL;
11531                 if (plane_state->slave && !plane_state->base.visible) {
11532                         crtc_state->active_planes &= ~BIT(plane->id);
11533                         crtc_state->update_planes |= BIT(plane->id);
11534                 }
11535
11536                 plane_state->slave = false;
11537         }
11538
11539         if (!crtc_state->nv12_planes)
11540                 return 0;
11541
11542         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
11543                 struct intel_plane_state *linked_state = NULL;
11544
11545                 if (plane->pipe != crtc->pipe ||
11546                     !(crtc_state->nv12_planes & BIT(plane->id)))
11547                         continue;
11548
11549                 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
11550                         if (!icl_is_nv12_y_plane(linked->id))
11551                                 continue;
11552
11553                         if (crtc_state->active_planes & BIT(linked->id))
11554                                 continue;
11555
11556                         linked_state = intel_atomic_get_plane_state(state, linked);
11557                         if (IS_ERR(linked_state))
11558                                 return PTR_ERR(linked_state);
11559
11560                         break;
11561                 }
11562
11563                 if (!linked_state) {
11564                         DRM_DEBUG_KMS("Need %d free Y planes for planar YUV\n",
11565                                       hweight8(crtc_state->nv12_planes));
11566
11567                         return -EINVAL;
11568                 }
11569
11570                 plane_state->linked_plane = linked;
11571
11572                 linked_state->slave = true;
11573                 linked_state->linked_plane = plane;
11574                 crtc_state->active_planes |= BIT(linked->id);
11575                 crtc_state->update_planes |= BIT(linked->id);
11576                 DRM_DEBUG_KMS("Using %s as Y plane for %s\n", linked->base.name, plane->base.name);
11577         }
11578
11579         return 0;
11580 }
11581
11582 static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
11583 {
11584         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
11585         struct intel_atomic_state *state =
11586                 to_intel_atomic_state(new_crtc_state->base.state);
11587         const struct intel_crtc_state *old_crtc_state =
11588                 intel_atomic_get_old_crtc_state(state, crtc);
11589
11590         return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes;
11591 }
11592
11593 static int intel_crtc_atomic_check(struct drm_crtc *crtc,
11594                                    struct drm_crtc_state *crtc_state)
11595 {
11596         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
11597         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11598         struct intel_crtc_state *pipe_config =
11599                 to_intel_crtc_state(crtc_state);
11600         int ret;
11601         bool mode_changed = needs_modeset(pipe_config);
11602
11603         if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv) &&
11604             mode_changed && !crtc_state->active)
11605                 pipe_config->update_wm_post = true;
11606
11607         if (mode_changed && crtc_state->enable &&
11608             dev_priv->display.crtc_compute_clock &&
11609             !WARN_ON(pipe_config->shared_dpll)) {
11610                 ret = dev_priv->display.crtc_compute_clock(intel_crtc,
11611                                                            pipe_config);
11612                 if (ret)
11613                         return ret;
11614         }
11615
11616         /*
11617          * May need to update pipe gamma enable bits
11618          * when C8 planes are getting enabled/disabled.
11619          */
11620         if (c8_planes_changed(pipe_config))
11621                 crtc_state->color_mgmt_changed = true;
11622
11623         if (mode_changed || pipe_config->update_pipe ||
11624             crtc_state->color_mgmt_changed) {
11625                 ret = intel_color_check(pipe_config);
11626                 if (ret)
11627                         return ret;
11628         }
11629
11630         ret = 0;
11631         if (dev_priv->display.compute_pipe_wm) {
11632                 ret = dev_priv->display.compute_pipe_wm(pipe_config);
11633                 if (ret) {
11634                         DRM_DEBUG_KMS("Target pipe watermarks are invalid\n");
11635                         return ret;
11636                 }
11637         }
11638
11639         if (dev_priv->display.compute_intermediate_wm) {
11640                 if (WARN_ON(!dev_priv->display.compute_pipe_wm))
11641                         return 0;
11642
11643                 /*
11644                  * Calculate 'intermediate' watermarks that satisfy both the
11645                  * old state and the new state.  We can program these
11646                  * immediately.
11647                  */
11648                 ret = dev_priv->display.compute_intermediate_wm(pipe_config);
11649                 if (ret) {
11650                         DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
11651                         return ret;
11652                 }
11653         }
11654
11655         if (INTEL_GEN(dev_priv) >= 9) {
11656                 if (mode_changed || pipe_config->update_pipe)
11657                         ret = skl_update_scaler_crtc(pipe_config);
11658
11659                 if (!ret)
11660                         ret = icl_check_nv12_planes(pipe_config);
11661                 if (!ret)
11662                         ret = skl_check_pipe_max_pixel_rate(intel_crtc,
11663                                                             pipe_config);
11664                 if (!ret)
11665                         ret = intel_atomic_setup_scalers(dev_priv, intel_crtc,
11666                                                          pipe_config);
11667         }
11668
11669         if (HAS_IPS(dev_priv))
11670                 pipe_config->ips_enabled = hsw_compute_ips_config(pipe_config);
11671
11672         return ret;
11673 }
11674
11675 static const struct drm_crtc_helper_funcs intel_helper_funcs = {
11676         .atomic_check = intel_crtc_atomic_check,
11677 };
11678
11679 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
11680 {
11681         struct intel_connector *connector;
11682         struct drm_connector_list_iter conn_iter;
11683
11684         drm_connector_list_iter_begin(dev, &conn_iter);
11685         for_each_intel_connector_iter(connector, &conn_iter) {
11686                 if (connector->base.state->crtc)
11687                         drm_connector_put(&connector->base);
11688
11689                 if (connector->base.encoder) {
11690                         connector->base.state->best_encoder =
11691                                 connector->base.encoder;
11692                         connector->base.state->crtc =
11693                                 connector->base.encoder->crtc;
11694
11695                         drm_connector_get(&connector->base);
11696                 } else {
11697                         connector->base.state->best_encoder = NULL;
11698                         connector->base.state->crtc = NULL;
11699                 }
11700         }
11701         drm_connector_list_iter_end(&conn_iter);
11702 }
11703
11704 static int
11705 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
11706                       struct intel_crtc_state *pipe_config)
11707 {
11708         struct drm_connector *connector = conn_state->connector;
11709         const struct drm_display_info *info = &connector->display_info;
11710         int bpp;
11711
11712         switch (conn_state->max_bpc) {
11713         case 6 ... 7:
11714                 bpp = 6 * 3;
11715                 break;
11716         case 8 ... 9:
11717                 bpp = 8 * 3;
11718                 break;
11719         case 10 ... 11:
11720                 bpp = 10 * 3;
11721                 break;
11722         case 12:
11723                 bpp = 12 * 3;
11724                 break;
11725         default:
11726                 return -EINVAL;
11727         }
11728
11729         if (bpp < pipe_config->pipe_bpp) {
11730                 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
11731                               "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
11732                               connector->base.id, connector->name,
11733                               bpp, 3 * info->bpc, 3 * conn_state->max_requested_bpc,
11734                               pipe_config->pipe_bpp);
11735
11736                 pipe_config->pipe_bpp = bpp;
11737         }
11738
11739         return 0;
11740 }
11741
11742 static int
11743 compute_baseline_pipe_bpp(struct intel_crtc *crtc,
11744                           struct intel_crtc_state *pipe_config)
11745 {
11746         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11747         struct drm_atomic_state *state = pipe_config->base.state;
11748         struct drm_connector *connector;
11749         struct drm_connector_state *connector_state;
11750         int bpp, i;
11751
11752         if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
11753             IS_CHERRYVIEW(dev_priv)))
11754                 bpp = 10*3;
11755         else if (INTEL_GEN(dev_priv) >= 5)
11756                 bpp = 12*3;
11757         else
11758                 bpp = 8*3;
11759
11760         pipe_config->pipe_bpp = bpp;
11761
11762         /* Clamp display bpp to connector max bpp */
11763         for_each_new_connector_in_state(state, connector, connector_state, i) {
11764                 int ret;
11765
11766                 if (connector_state->crtc != &crtc->base)
11767                         continue;
11768
11769                 ret = compute_sink_pipe_bpp(connector_state, pipe_config);
11770                 if (ret)
11771                         return ret;
11772         }
11773
11774         return 0;
11775 }
11776
11777 static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
11778 {
11779         DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
11780                       "type: 0x%x flags: 0x%x\n",
11781                       mode->crtc_clock,
11782                       mode->crtc_hdisplay, mode->crtc_hsync_start,
11783                       mode->crtc_hsync_end, mode->crtc_htotal,
11784                       mode->crtc_vdisplay, mode->crtc_vsync_start,
11785                       mode->crtc_vsync_end, mode->crtc_vtotal,
11786                       mode->type, mode->flags);
11787 }
11788
11789 static inline void
11790 intel_dump_m_n_config(const struct intel_crtc_state *pipe_config,
11791                       const char *id, unsigned int lane_count,
11792                       const struct intel_link_m_n *m_n)
11793 {
11794         DRM_DEBUG_KMS("%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
11795                       id, lane_count,
11796                       m_n->gmch_m, m_n->gmch_n,
11797                       m_n->link_m, m_n->link_n, m_n->tu);
11798 }
11799
11800 static void
11801 intel_dump_infoframe(struct drm_i915_private *dev_priv,
11802                      const union hdmi_infoframe *frame)
11803 {
11804         if ((drm_debug & DRM_UT_KMS) == 0)
11805                 return;
11806
11807         hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame);
11808 }
11809
11810 #define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
11811
11812 static const char * const output_type_str[] = {
11813         OUTPUT_TYPE(UNUSED),
11814         OUTPUT_TYPE(ANALOG),
11815         OUTPUT_TYPE(DVO),
11816         OUTPUT_TYPE(SDVO),
11817         OUTPUT_TYPE(LVDS),
11818         OUTPUT_TYPE(TVOUT),
11819         OUTPUT_TYPE(HDMI),
11820         OUTPUT_TYPE(DP),
11821         OUTPUT_TYPE(EDP),
11822         OUTPUT_TYPE(DSI),
11823         OUTPUT_TYPE(DDI),
11824         OUTPUT_TYPE(DP_MST),
11825 };
11826
11827 #undef OUTPUT_TYPE
11828
11829 static void snprintf_output_types(char *buf, size_t len,
11830                                   unsigned int output_types)
11831 {
11832         char *str = buf;
11833         int i;
11834
11835         str[0] = '\0';
11836
11837         for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
11838                 int r;
11839
11840                 if ((output_types & BIT(i)) == 0)
11841                         continue;
11842
11843                 r = snprintf(str, len, "%s%s",
11844                              str != buf ? "," : "", output_type_str[i]);
11845                 if (r >= len)
11846                         break;
11847                 str += r;
11848                 len -= r;
11849
11850                 output_types &= ~BIT(i);
11851         }
11852
11853         WARN_ON_ONCE(output_types != 0);
11854 }
11855
11856 static const char * const output_format_str[] = {
11857         [INTEL_OUTPUT_FORMAT_INVALID] = "Invalid",
11858         [INTEL_OUTPUT_FORMAT_RGB] = "RGB",
11859         [INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
11860         [INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
11861 };
11862
11863 static const char *output_formats(enum intel_output_format format)
11864 {
11865         if (format >= ARRAY_SIZE(output_format_str))
11866                 format = INTEL_OUTPUT_FORMAT_INVALID;
11867         return output_format_str[format];
11868 }
11869
11870 static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
11871 {
11872         struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
11873         const struct drm_framebuffer *fb = plane_state->base.fb;
11874         struct drm_format_name_buf format_name;
11875
11876         if (!fb) {
11877                 DRM_DEBUG_KMS("[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
11878                               plane->base.base.id, plane->base.name,
11879                               yesno(plane_state->base.visible));
11880                 return;
11881         }
11882
11883         DRM_DEBUG_KMS("[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %s, visible: %s\n",
11884                       plane->base.base.id, plane->base.name,
11885                       fb->base.id, fb->width, fb->height,
11886                       drm_get_format_name(fb->format->format, &format_name),
11887                       yesno(plane_state->base.visible));
11888         DRM_DEBUG_KMS("\trotation: 0x%x, scaler: %d\n",
11889                       plane_state->base.rotation, plane_state->scaler_id);
11890         if (plane_state->base.visible)
11891                 DRM_DEBUG_KMS("\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
11892                               DRM_RECT_FP_ARG(&plane_state->base.src),
11893                               DRM_RECT_ARG(&plane_state->base.dst));
11894 }
11895
11896 static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
11897                                    struct intel_atomic_state *state,
11898                                    const char *context)
11899 {
11900         struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
11901         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11902         const struct intel_plane_state *plane_state;
11903         struct intel_plane *plane;
11904         char buf[64];
11905         int i;
11906
11907         DRM_DEBUG_KMS("[CRTC:%d:%s] enable: %s %s\n",
11908                       crtc->base.base.id, crtc->base.name,
11909                       yesno(pipe_config->base.enable), context);
11910
11911         if (!pipe_config->base.enable)
11912                 goto dump_planes;
11913
11914         snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
11915         DRM_DEBUG_KMS("active: %s, output_types: %s (0x%x), output format: %s\n",
11916                       yesno(pipe_config->base.active),
11917                       buf, pipe_config->output_types,
11918                       output_formats(pipe_config->output_format));
11919
11920         DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
11921                       transcoder_name(pipe_config->cpu_transcoder),
11922                       pipe_config->pipe_bpp, pipe_config->dither);
11923
11924         if (pipe_config->has_pch_encoder)
11925                 intel_dump_m_n_config(pipe_config, "fdi",
11926                                       pipe_config->fdi_lanes,
11927                                       &pipe_config->fdi_m_n);
11928
11929         if (intel_crtc_has_dp_encoder(pipe_config)) {
11930                 intel_dump_m_n_config(pipe_config, "dp m_n",
11931                                 pipe_config->lane_count, &pipe_config->dp_m_n);
11932                 if (pipe_config->has_drrs)
11933                         intel_dump_m_n_config(pipe_config, "dp m2_n2",
11934                                               pipe_config->lane_count,
11935                                               &pipe_config->dp_m2_n2);
11936         }
11937
11938         DRM_DEBUG_KMS("audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
11939                       pipe_config->has_audio, pipe_config->has_infoframe,
11940                       pipe_config->infoframes.enable);
11941
11942         if (pipe_config->infoframes.enable &
11943             intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL))
11944                 DRM_DEBUG_KMS("GCP: 0x%x\n", pipe_config->infoframes.gcp);
11945         if (pipe_config->infoframes.enable &
11946             intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
11947                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi);
11948         if (pipe_config->infoframes.enable &
11949             intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD))
11950                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.spd);
11951         if (pipe_config->infoframes.enable &
11952             intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
11953                 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi);
11954
11955         DRM_DEBUG_KMS("requested mode:\n");
11956         drm_mode_debug_printmodeline(&pipe_config->base.mode);
11957         DRM_DEBUG_KMS("adjusted mode:\n");
11958         drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode);
11959         intel_dump_crtc_timings(&pipe_config->base.adjusted_mode);
11960         DRM_DEBUG_KMS("port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
11961                       pipe_config->port_clock,
11962                       pipe_config->pipe_src_w, pipe_config->pipe_src_h,
11963                       pipe_config->pixel_rate);
11964
11965         if (INTEL_GEN(dev_priv) >= 9)
11966                 DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
11967                               crtc->num_scalers,
11968                               pipe_config->scaler_state.scaler_users,
11969                               pipe_config->scaler_state.scaler_id);
11970
11971         if (HAS_GMCH(dev_priv))
11972                 DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
11973                               pipe_config->gmch_pfit.control,
11974                               pipe_config->gmch_pfit.pgm_ratios,
11975                               pipe_config->gmch_pfit.lvds_border_bits);
11976         else
11977                 DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s, force thru: %s\n",
11978                               pipe_config->pch_pfit.pos,
11979                               pipe_config->pch_pfit.size,
11980                               enableddisabled(pipe_config->pch_pfit.enabled),
11981                               yesno(pipe_config->pch_pfit.force_thru));
11982
11983         DRM_DEBUG_KMS("ips: %i, double wide: %i\n",
11984                       pipe_config->ips_enabled, pipe_config->double_wide);
11985
11986         intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
11987
11988 dump_planes:
11989         if (!state)
11990                 return;
11991
11992         for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
11993                 if (plane->pipe == crtc->pipe)
11994                         intel_dump_plane_state(plane_state);
11995         }
11996 }
11997
11998 static bool check_digital_port_conflicts(struct intel_atomic_state *state)
11999 {
12000         struct drm_device *dev = state->base.dev;
12001         struct drm_connector *connector;
12002         struct drm_connector_list_iter conn_iter;
12003         unsigned int used_ports = 0;
12004         unsigned int used_mst_ports = 0;
12005         bool ret = true;
12006
12007         /*
12008          * Walk the connector list instead of the encoder
12009          * list to detect the problem on ddi platforms
12010          * where there's just one encoder per digital port.
12011          */
12012         drm_connector_list_iter_begin(dev, &conn_iter);
12013         drm_for_each_connector_iter(connector, &conn_iter) {
12014                 struct drm_connector_state *connector_state;
12015                 struct intel_encoder *encoder;
12016
12017                 connector_state =
12018                         drm_atomic_get_new_connector_state(&state->base,
12019                                                            connector);
12020                 if (!connector_state)
12021                         connector_state = connector->state;
12022
12023                 if (!connector_state->best_encoder)
12024                         continue;
12025
12026                 encoder = to_intel_encoder(connector_state->best_encoder);
12027
12028                 WARN_ON(!connector_state->crtc);
12029
12030                 switch (encoder->type) {
12031                         unsigned int port_mask;
12032                 case INTEL_OUTPUT_DDI:
12033                         if (WARN_ON(!HAS_DDI(to_i915(dev))))
12034                                 break;
12035                         /* else: fall through */
12036                 case INTEL_OUTPUT_DP:
12037                 case INTEL_OUTPUT_HDMI:
12038                 case INTEL_OUTPUT_EDP:
12039                         port_mask = 1 << encoder->port;
12040
12041                         /* the same port mustn't appear more than once */
12042                         if (used_ports & port_mask)
12043                                 ret = false;
12044
12045                         used_ports |= port_mask;
12046                         break;
12047                 case INTEL_OUTPUT_DP_MST:
12048                         used_mst_ports |=
12049                                 1 << encoder->port;
12050                         break;
12051                 default:
12052                         break;
12053                 }
12054         }
12055         drm_connector_list_iter_end(&conn_iter);
12056
12057         /* can't mix MST and SST/HDMI on the same port */
12058         if (used_ports & used_mst_ports)
12059                 return false;
12060
12061         return ret;
12062 }
12063
12064 static int
12065 clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
12066 {
12067         struct drm_i915_private *dev_priv =
12068                 to_i915(crtc_state->base.crtc->dev);
12069         struct intel_crtc_state *saved_state;
12070
12071         saved_state = kzalloc(sizeof(*saved_state), GFP_KERNEL);
12072         if (!saved_state)
12073                 return -ENOMEM;
12074
12075         /* FIXME: before the switch to atomic started, a new pipe_config was
12076          * kzalloc'd. Code that depends on any field being zero should be
12077          * fixed, so that the crtc_state can be safely duplicated. For now,
12078          * only fields that are know to not cause problems are preserved. */
12079
12080         saved_state->scaler_state = crtc_state->scaler_state;
12081         saved_state->shared_dpll = crtc_state->shared_dpll;
12082         saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
12083         saved_state->crc_enabled = crtc_state->crc_enabled;
12084         if (IS_G4X(dev_priv) ||
12085             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
12086                 saved_state->wm = crtc_state->wm;
12087
12088         /* Keep base drm_crtc_state intact, only clear our extended struct */
12089         BUILD_BUG_ON(offsetof(struct intel_crtc_state, base));
12090         memcpy(&crtc_state->base + 1, &saved_state->base + 1,
12091                sizeof(*crtc_state) - sizeof(crtc_state->base));
12092
12093         kfree(saved_state);
12094         return 0;
12095 }
12096
12097 static int
12098 intel_modeset_pipe_config(struct intel_crtc_state *pipe_config)
12099 {
12100         struct drm_crtc *crtc = pipe_config->base.crtc;
12101         struct drm_atomic_state *state = pipe_config->base.state;
12102         struct intel_encoder *encoder;
12103         struct drm_connector *connector;
12104         struct drm_connector_state *connector_state;
12105         int base_bpp, ret;
12106         int i;
12107         bool retry = true;
12108
12109         ret = clear_intel_crtc_state(pipe_config);
12110         if (ret)
12111                 return ret;
12112
12113         pipe_config->cpu_transcoder =
12114                 (enum transcoder) to_intel_crtc(crtc)->pipe;
12115
12116         /*
12117          * Sanitize sync polarity flags based on requested ones. If neither
12118          * positive or negative polarity is requested, treat this as meaning
12119          * negative polarity.
12120          */
12121         if (!(pipe_config->base.adjusted_mode.flags &
12122               (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
12123                 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
12124
12125         if (!(pipe_config->base.adjusted_mode.flags &
12126               (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
12127                 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
12128
12129         ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
12130                                         pipe_config);
12131         if (ret)
12132                 return ret;
12133
12134         base_bpp = pipe_config->pipe_bpp;
12135
12136         /*
12137          * Determine the real pipe dimensions. Note that stereo modes can
12138          * increase the actual pipe size due to the frame doubling and
12139          * insertion of additional space for blanks between the frame. This
12140          * is stored in the crtc timings. We use the requested mode to do this
12141          * computation to clearly distinguish it from the adjusted mode, which
12142          * can be changed by the connectors in the below retry loop.
12143          */
12144         drm_mode_get_hv_timing(&pipe_config->base.mode,
12145                                &pipe_config->pipe_src_w,
12146                                &pipe_config->pipe_src_h);
12147
12148         for_each_new_connector_in_state(state, connector, connector_state, i) {
12149                 if (connector_state->crtc != crtc)
12150                         continue;
12151
12152                 encoder = to_intel_encoder(connector_state->best_encoder);
12153
12154                 if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
12155                         DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
12156                         return -EINVAL;
12157                 }
12158
12159                 /*
12160                  * Determine output_types before calling the .compute_config()
12161                  * hooks so that the hooks can use this information safely.
12162                  */
12163                 if (encoder->compute_output_type)
12164                         pipe_config->output_types |=
12165                                 BIT(encoder->compute_output_type(encoder, pipe_config,
12166                                                                  connector_state));
12167                 else
12168                         pipe_config->output_types |= BIT(encoder->type);
12169         }
12170
12171 encoder_retry:
12172         /* Ensure the port clock defaults are reset when retrying. */
12173         pipe_config->port_clock = 0;
12174         pipe_config->pixel_multiplier = 1;
12175
12176         /* Fill in default crtc timings, allow encoders to overwrite them. */
12177         drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode,
12178                               CRTC_STEREO_DOUBLE);
12179
12180         /* Pass our mode to the connectors and the CRTC to give them a chance to
12181          * adjust it according to limitations or connector properties, and also
12182          * a chance to reject the mode entirely.
12183          */
12184         for_each_new_connector_in_state(state, connector, connector_state, i) {
12185                 if (connector_state->crtc != crtc)
12186                         continue;
12187
12188                 encoder = to_intel_encoder(connector_state->best_encoder);
12189                 ret = encoder->compute_config(encoder, pipe_config,
12190                                               connector_state);
12191                 if (ret < 0) {
12192                         if (ret != -EDEADLK)
12193                                 DRM_DEBUG_KMS("Encoder config failure: %d\n",
12194                                               ret);
12195                         return ret;
12196                 }
12197         }
12198
12199         /* Set default port clock if not overwritten by the encoder. Needs to be
12200          * done afterwards in case the encoder adjusts the mode. */
12201         if (!pipe_config->port_clock)
12202                 pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock
12203                         * pipe_config->pixel_multiplier;
12204
12205         ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
12206         if (ret == -EDEADLK)
12207                 return ret;
12208         if (ret < 0) {
12209                 DRM_DEBUG_KMS("CRTC fixup failed\n");
12210                 return ret;
12211         }
12212
12213         if (ret == RETRY) {
12214                 if (WARN(!retry, "loop in pipe configuration computation\n"))
12215                         return -EINVAL;
12216
12217                 DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
12218                 retry = false;
12219                 goto encoder_retry;
12220         }
12221
12222         /* Dithering seems to not pass-through bits correctly when it should, so
12223          * only enable it on 6bpc panels and when its not a compliance
12224          * test requesting 6bpc video pattern.
12225          */
12226         pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
12227                 !pipe_config->dither_force_disable;
12228         DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
12229                       base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
12230
12231         return 0;
12232 }
12233
12234 bool intel_fuzzy_clock_check(int clock1, int clock2)
12235 {
12236         int diff;
12237
12238         if (clock1 == clock2)
12239                 return true;
12240
12241         if (!clock1 || !clock2)
12242                 return false;
12243
12244         diff = abs(clock1 - clock2);
12245
12246         if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
12247                 return true;
12248
12249         return false;
12250 }
12251
12252 static bool
12253 intel_compare_m_n(unsigned int m, unsigned int n,
12254                   unsigned int m2, unsigned int n2,
12255                   bool exact)
12256 {
12257         if (m == m2 && n == n2)
12258                 return true;
12259
12260         if (exact || !m || !n || !m2 || !n2)
12261                 return false;
12262
12263         BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
12264
12265         if (n > n2) {
12266                 while (n > n2) {
12267                         m2 <<= 1;
12268                         n2 <<= 1;
12269                 }
12270         } else if (n < n2) {
12271                 while (n < n2) {
12272                         m <<= 1;
12273                         n <<= 1;
12274                 }
12275         }
12276
12277         if (n != n2)
12278                 return false;
12279
12280         return intel_fuzzy_clock_check(m, m2);
12281 }
12282
12283 static bool
12284 intel_compare_link_m_n(const struct intel_link_m_n *m_n,
12285                        const struct intel_link_m_n *m2_n2,
12286                        bool exact)
12287 {
12288         return m_n->tu == m2_n2->tu &&
12289                 intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
12290                                   m2_n2->gmch_m, m2_n2->gmch_n, exact) &&
12291                 intel_compare_m_n(m_n->link_m, m_n->link_n,
12292                                   m2_n2->link_m, m2_n2->link_n, exact);
12293 }
12294
12295 static bool
12296 intel_compare_infoframe(const union hdmi_infoframe *a,
12297                         const union hdmi_infoframe *b)
12298 {
12299         return memcmp(a, b, sizeof(*a)) == 0;
12300 }
12301
12302 static void
12303 pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
12304                                bool fastset, const char *name,
12305                                const union hdmi_infoframe *a,
12306                                const union hdmi_infoframe *b)
12307 {
12308         if (fastset) {
12309                 if ((drm_debug & DRM_UT_KMS) == 0)
12310                         return;
12311
12312                 drm_dbg(DRM_UT_KMS, "fastset mismatch in %s infoframe", name);
12313                 drm_dbg(DRM_UT_KMS, "expected:");
12314                 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
12315                 drm_dbg(DRM_UT_KMS, "found");
12316                 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
12317         } else {
12318                 drm_err("mismatch in %s infoframe", name);
12319                 drm_err("expected:");
12320                 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
12321                 drm_err("found");
12322                 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
12323         }
12324 }
12325
12326 static void __printf(3, 4)
12327 pipe_config_mismatch(bool fastset, const char *name, const char *format, ...)
12328 {
12329         struct va_format vaf;
12330         va_list args;
12331
12332         va_start(args, format);
12333         vaf.fmt = format;
12334         vaf.va = &args;
12335
12336         if (fastset)
12337                 drm_dbg(DRM_UT_KMS, "fastset mismatch in %s %pV", name, &vaf);
12338         else
12339                 drm_err("mismatch in %s %pV", name, &vaf);
12340
12341         va_end(args);
12342 }
12343
12344 static bool fastboot_enabled(struct drm_i915_private *dev_priv)
12345 {
12346         if (i915_modparams.fastboot != -1)
12347                 return i915_modparams.fastboot;
12348
12349         /* Enable fastboot by default on Skylake and newer */
12350         if (INTEL_GEN(dev_priv) >= 9)
12351                 return true;
12352
12353         /* Enable fastboot by default on VLV and CHV */
12354         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
12355                 return true;
12356
12357         /* Disabled by default on all others */
12358         return false;
12359 }
12360
12361 static bool
12362 intel_pipe_config_compare(const struct intel_crtc_state *current_config,
12363                           const struct intel_crtc_state *pipe_config,
12364                           bool fastset)
12365 {
12366         struct drm_i915_private *dev_priv = to_i915(current_config->base.crtc->dev);
12367         bool ret = true;
12368         bool fixup_inherited = fastset &&
12369                 (current_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED) &&
12370                 !(pipe_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED);
12371
12372         if (fixup_inherited && !fastboot_enabled(dev_priv)) {
12373                 DRM_DEBUG_KMS("initial modeset and fastboot not set\n");
12374                 ret = false;
12375         }
12376
12377 #define PIPE_CONF_CHECK_X(name) do { \
12378         if (current_config->name != pipe_config->name) { \
12379                 pipe_config_mismatch(fastset, __stringify(name), \
12380                                      "(expected 0x%08x, found 0x%08x)\n", \
12381                                      current_config->name, \
12382                                      pipe_config->name); \
12383                 ret = false; \
12384         } \
12385 } while (0)
12386
12387 #define PIPE_CONF_CHECK_I(name) do { \
12388         if (current_config->name != pipe_config->name) { \
12389                 pipe_config_mismatch(fastset, __stringify(name), \
12390                                      "(expected %i, found %i)\n", \
12391                                      current_config->name, \
12392                                      pipe_config->name); \
12393                 ret = false; \
12394         } \
12395 } while (0)
12396
12397 #define PIPE_CONF_CHECK_BOOL(name) do { \
12398         if (current_config->name != pipe_config->name) { \
12399                 pipe_config_mismatch(fastset, __stringify(name), \
12400                                      "(expected %s, found %s)\n", \
12401                                      yesno(current_config->name), \
12402                                      yesno(pipe_config->name)); \
12403                 ret = false; \
12404         } \
12405 } while (0)
12406
12407 /*
12408  * Checks state where we only read out the enabling, but not the entire
12409  * state itself (like full infoframes or ELD for audio). These states
12410  * require a full modeset on bootup to fix up.
12411  */
12412 #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
12413         if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
12414                 PIPE_CONF_CHECK_BOOL(name); \
12415         } else { \
12416                 pipe_config_mismatch(fastset, __stringify(name), \
12417                                      "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)\n", \
12418                                      yesno(current_config->name), \
12419                                      yesno(pipe_config->name)); \
12420                 ret = false; \
12421         } \
12422 } while (0)
12423
12424 #define PIPE_CONF_CHECK_P(name) do { \
12425         if (current_config->name != pipe_config->name) { \
12426                 pipe_config_mismatch(fastset, __stringify(name), \
12427                                      "(expected %p, found %p)\n", \
12428                                      current_config->name, \
12429                                      pipe_config->name); \
12430                 ret = false; \
12431         } \
12432 } while (0)
12433
12434 #define PIPE_CONF_CHECK_M_N(name) do { \
12435         if (!intel_compare_link_m_n(&current_config->name, \
12436                                     &pipe_config->name,\
12437                                     !fastset)) { \
12438                 pipe_config_mismatch(fastset, __stringify(name), \
12439                                      "(expected tu %i gmch %i/%i link %i/%i, " \
12440                                      "found tu %i, gmch %i/%i link %i/%i)\n", \
12441                                      current_config->name.tu, \
12442                                      current_config->name.gmch_m, \
12443                                      current_config->name.gmch_n, \
12444                                      current_config->name.link_m, \
12445                                      current_config->name.link_n, \
12446                                      pipe_config->name.tu, \
12447                                      pipe_config->name.gmch_m, \
12448                                      pipe_config->name.gmch_n, \
12449                                      pipe_config->name.link_m, \
12450                                      pipe_config->name.link_n); \
12451                 ret = false; \
12452         } \
12453 } while (0)
12454
12455 /* This is required for BDW+ where there is only one set of registers for
12456  * switching between high and low RR.
12457  * This macro can be used whenever a comparison has to be made between one
12458  * hw state and multiple sw state variables.
12459  */
12460 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
12461         if (!intel_compare_link_m_n(&current_config->name, \
12462                                     &pipe_config->name, !fastset) && \
12463             !intel_compare_link_m_n(&current_config->alt_name, \
12464                                     &pipe_config->name, !fastset)) { \
12465                 pipe_config_mismatch(fastset, __stringify(name), \
12466                                      "(expected tu %i gmch %i/%i link %i/%i, " \
12467                                      "or tu %i gmch %i/%i link %i/%i, " \
12468                                      "found tu %i, gmch %i/%i link %i/%i)\n", \
12469                                      current_config->name.tu, \
12470                                      current_config->name.gmch_m, \
12471                                      current_config->name.gmch_n, \
12472                                      current_config->name.link_m, \
12473                                      current_config->name.link_n, \
12474                                      current_config->alt_name.tu, \
12475                                      current_config->alt_name.gmch_m, \
12476                                      current_config->alt_name.gmch_n, \
12477                                      current_config->alt_name.link_m, \
12478                                      current_config->alt_name.link_n, \
12479                                      pipe_config->name.tu, \
12480                                      pipe_config->name.gmch_m, \
12481                                      pipe_config->name.gmch_n, \
12482                                      pipe_config->name.link_m, \
12483                                      pipe_config->name.link_n); \
12484                 ret = false; \
12485         } \
12486 } while (0)
12487
12488 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
12489         if ((current_config->name ^ pipe_config->name) & (mask)) { \
12490                 pipe_config_mismatch(fastset, __stringify(name), \
12491                                      "(%x) (expected %i, found %i)\n", \
12492                                      (mask), \
12493                                      current_config->name & (mask), \
12494                                      pipe_config->name & (mask)); \
12495                 ret = false; \
12496         } \
12497 } while (0)
12498
12499 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
12500         if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
12501                 pipe_config_mismatch(fastset, __stringify(name), \
12502                                      "(expected %i, found %i)\n", \
12503                                      current_config->name, \
12504                                      pipe_config->name); \
12505                 ret = false; \
12506         } \
12507 } while (0)
12508
12509 #define PIPE_CONF_CHECK_INFOFRAME(name) do { \
12510         if (!intel_compare_infoframe(&current_config->infoframes.name, \
12511                                      &pipe_config->infoframes.name)) { \
12512                 pipe_config_infoframe_mismatch(dev_priv, fastset, __stringify(name), \
12513                                                &current_config->infoframes.name, \
12514                                                &pipe_config->infoframes.name); \
12515                 ret = false; \
12516         } \
12517 } while (0)
12518
12519 #define PIPE_CONF_QUIRK(quirk) \
12520         ((current_config->quirks | pipe_config->quirks) & (quirk))
12521
12522         PIPE_CONF_CHECK_I(cpu_transcoder);
12523
12524         PIPE_CONF_CHECK_BOOL(has_pch_encoder);
12525         PIPE_CONF_CHECK_I(fdi_lanes);
12526         PIPE_CONF_CHECK_M_N(fdi_m_n);
12527
12528         PIPE_CONF_CHECK_I(lane_count);
12529         PIPE_CONF_CHECK_X(lane_lat_optim_mask);
12530
12531         if (INTEL_GEN(dev_priv) < 8) {
12532                 PIPE_CONF_CHECK_M_N(dp_m_n);
12533
12534                 if (current_config->has_drrs)
12535                         PIPE_CONF_CHECK_M_N(dp_m2_n2);
12536         } else
12537                 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
12538
12539         PIPE_CONF_CHECK_X(output_types);
12540
12541         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
12542         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
12543         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start);
12544         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end);
12545         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start);
12546         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end);
12547
12548         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay);
12549         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal);
12550         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start);
12551         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end);
12552         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start);
12553         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end);
12554
12555         PIPE_CONF_CHECK_I(pixel_multiplier);
12556         PIPE_CONF_CHECK_I(output_format);
12557         PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
12558         if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
12559             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
12560                 PIPE_CONF_CHECK_BOOL(limited_color_range);
12561
12562         PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
12563         PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
12564         PIPE_CONF_CHECK_BOOL(has_infoframe);
12565
12566         PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
12567
12568         PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12569                               DRM_MODE_FLAG_INTERLACE);
12570
12571         if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
12572                 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12573                                       DRM_MODE_FLAG_PHSYNC);
12574                 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12575                                       DRM_MODE_FLAG_NHSYNC);
12576                 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12577                                       DRM_MODE_FLAG_PVSYNC);
12578                 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12579                                       DRM_MODE_FLAG_NVSYNC);
12580         }
12581
12582         PIPE_CONF_CHECK_X(gmch_pfit.control);
12583         /* pfit ratios are autocomputed by the hw on gen4+ */
12584         if (INTEL_GEN(dev_priv) < 4)
12585                 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
12586         PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
12587
12588         /*
12589          * Changing the EDP transcoder input mux
12590          * (A_ONOFF vs. A_ON) requires a full modeset.
12591          */
12592         PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru);
12593
12594         if (!fastset) {
12595                 PIPE_CONF_CHECK_I(pipe_src_w);
12596                 PIPE_CONF_CHECK_I(pipe_src_h);
12597
12598                 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
12599                 if (current_config->pch_pfit.enabled) {
12600                         PIPE_CONF_CHECK_X(pch_pfit.pos);
12601                         PIPE_CONF_CHECK_X(pch_pfit.size);
12602                 }
12603
12604                 PIPE_CONF_CHECK_I(scaler_state.scaler_id);
12605                 PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
12606
12607                 PIPE_CONF_CHECK_X(gamma_mode);
12608                 if (IS_CHERRYVIEW(dev_priv))
12609                         PIPE_CONF_CHECK_X(cgm_mode);
12610                 else
12611                         PIPE_CONF_CHECK_X(csc_mode);
12612                 PIPE_CONF_CHECK_BOOL(gamma_enable);
12613                 PIPE_CONF_CHECK_BOOL(csc_enable);
12614         }
12615
12616         PIPE_CONF_CHECK_BOOL(double_wide);
12617
12618         PIPE_CONF_CHECK_P(shared_dpll);
12619         PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
12620         PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
12621         PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
12622         PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
12623         PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
12624         PIPE_CONF_CHECK_X(dpll_hw_state.spll);
12625         PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
12626         PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
12627         PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
12628         PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
12629         PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
12630         PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
12631         PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
12632         PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
12633         PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
12634         PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
12635         PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
12636         PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
12637         PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
12638         PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
12639         PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
12640         PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
12641         PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
12642         PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
12643         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
12644         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
12645         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
12646         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
12647         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
12648         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
12649         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
12650
12651         PIPE_CONF_CHECK_X(dsi_pll.ctrl);
12652         PIPE_CONF_CHECK_X(dsi_pll.div);
12653
12654         if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5)
12655                 PIPE_CONF_CHECK_I(pipe_bpp);
12656
12657         PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock);
12658         PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
12659
12660         PIPE_CONF_CHECK_I(min_voltage_level);
12661
12662         PIPE_CONF_CHECK_X(infoframes.enable);
12663         PIPE_CONF_CHECK_X(infoframes.gcp);
12664         PIPE_CONF_CHECK_INFOFRAME(avi);
12665         PIPE_CONF_CHECK_INFOFRAME(spd);
12666         PIPE_CONF_CHECK_INFOFRAME(hdmi);
12667         PIPE_CONF_CHECK_INFOFRAME(drm);
12668
12669 #undef PIPE_CONF_CHECK_X
12670 #undef PIPE_CONF_CHECK_I
12671 #undef PIPE_CONF_CHECK_BOOL
12672 #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
12673 #undef PIPE_CONF_CHECK_P
12674 #undef PIPE_CONF_CHECK_FLAGS
12675 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
12676 #undef PIPE_CONF_QUIRK
12677
12678         return ret;
12679 }
12680
12681 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
12682                                            const struct intel_crtc_state *pipe_config)
12683 {
12684         if (pipe_config->has_pch_encoder) {
12685                 int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
12686                                                             &pipe_config->fdi_m_n);
12687                 int dotclock = pipe_config->base.adjusted_mode.crtc_clock;
12688
12689                 /*
12690                  * FDI already provided one idea for the dotclock.
12691                  * Yell if the encoder disagrees.
12692                  */
12693                 WARN(!intel_fuzzy_clock_check(fdi_dotclock, dotclock),
12694                      "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
12695                      fdi_dotclock, dotclock);
12696         }
12697 }
12698
12699 static void verify_wm_state(struct intel_crtc *crtc,
12700                             struct intel_crtc_state *new_crtc_state)
12701 {
12702         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12703         struct skl_hw_state {
12704                 struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
12705                 struct skl_ddb_entry ddb_uv[I915_MAX_PLANES];
12706                 struct skl_ddb_allocation ddb;
12707                 struct skl_pipe_wm wm;
12708         } *hw;
12709         struct skl_ddb_allocation *sw_ddb;
12710         struct skl_pipe_wm *sw_wm;
12711         struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
12712         const enum pipe pipe = crtc->pipe;
12713         int plane, level, max_level = ilk_wm_max_level(dev_priv);
12714
12715         if (INTEL_GEN(dev_priv) < 9 || !new_crtc_state->base.active)
12716                 return;
12717
12718         hw = kzalloc(sizeof(*hw), GFP_KERNEL);
12719         if (!hw)
12720                 return;
12721
12722         skl_pipe_wm_get_hw_state(crtc, &hw->wm);
12723         sw_wm = &new_crtc_state->wm.skl.optimal;
12724
12725         skl_pipe_ddb_get_hw_state(crtc, hw->ddb_y, hw->ddb_uv);
12726
12727         skl_ddb_get_hw_state(dev_priv, &hw->ddb);
12728         sw_ddb = &dev_priv->wm.skl_hw.ddb;
12729
12730         if (INTEL_GEN(dev_priv) >= 11 &&
12731             hw->ddb.enabled_slices != sw_ddb->enabled_slices)
12732                 DRM_ERROR("mismatch in DBUF Slices (expected %u, got %u)\n",
12733                           sw_ddb->enabled_slices,
12734                           hw->ddb.enabled_slices);
12735
12736         /* planes */
12737         for_each_universal_plane(dev_priv, pipe, plane) {
12738                 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
12739
12740                 hw_plane_wm = &hw->wm.planes[plane];
12741                 sw_plane_wm = &sw_wm->planes[plane];
12742
12743                 /* Watermarks */
12744                 for (level = 0; level <= max_level; level++) {
12745                         if (skl_wm_level_equals(&hw_plane_wm->wm[level],
12746                                                 &sw_plane_wm->wm[level]))
12747                                 continue;
12748
12749                         DRM_ERROR("mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
12750                                   pipe_name(pipe), plane + 1, level,
12751                                   sw_plane_wm->wm[level].plane_en,
12752                                   sw_plane_wm->wm[level].plane_res_b,
12753                                   sw_plane_wm->wm[level].plane_res_l,
12754                                   hw_plane_wm->wm[level].plane_en,
12755                                   hw_plane_wm->wm[level].plane_res_b,
12756                                   hw_plane_wm->wm[level].plane_res_l);
12757                 }
12758
12759                 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
12760                                          &sw_plane_wm->trans_wm)) {
12761                         DRM_ERROR("mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
12762                                   pipe_name(pipe), plane + 1,
12763                                   sw_plane_wm->trans_wm.plane_en,
12764                                   sw_plane_wm->trans_wm.plane_res_b,
12765                                   sw_plane_wm->trans_wm.plane_res_l,
12766                                   hw_plane_wm->trans_wm.plane_en,
12767                                   hw_plane_wm->trans_wm.plane_res_b,
12768                                   hw_plane_wm->trans_wm.plane_res_l);
12769                 }
12770
12771                 /* DDB */
12772                 hw_ddb_entry = &hw->ddb_y[plane];
12773                 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[plane];
12774
12775                 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
12776                         DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
12777                                   pipe_name(pipe), plane + 1,
12778                                   sw_ddb_entry->start, sw_ddb_entry->end,
12779                                   hw_ddb_entry->start, hw_ddb_entry->end);
12780                 }
12781         }
12782
12783         /*
12784          * cursor
12785          * If the cursor plane isn't active, we may not have updated it's ddb
12786          * allocation. In that case since the ddb allocation will be updated
12787          * once the plane becomes visible, we can skip this check
12788          */
12789         if (1) {
12790                 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
12791
12792                 hw_plane_wm = &hw->wm.planes[PLANE_CURSOR];
12793                 sw_plane_wm = &sw_wm->planes[PLANE_CURSOR];
12794
12795                 /* Watermarks */
12796                 for (level = 0; level <= max_level; level++) {
12797                         if (skl_wm_level_equals(&hw_plane_wm->wm[level],
12798                                                 &sw_plane_wm->wm[level]))
12799                                 continue;
12800
12801                         DRM_ERROR("mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
12802                                   pipe_name(pipe), level,
12803                                   sw_plane_wm->wm[level].plane_en,
12804                                   sw_plane_wm->wm[level].plane_res_b,
12805                                   sw_plane_wm->wm[level].plane_res_l,
12806                                   hw_plane_wm->wm[level].plane_en,
12807                                   hw_plane_wm->wm[level].plane_res_b,
12808                                   hw_plane_wm->wm[level].plane_res_l);
12809                 }
12810
12811                 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
12812                                          &sw_plane_wm->trans_wm)) {
12813                         DRM_ERROR("mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
12814                                   pipe_name(pipe),
12815                                   sw_plane_wm->trans_wm.plane_en,
12816                                   sw_plane_wm->trans_wm.plane_res_b,
12817                                   sw_plane_wm->trans_wm.plane_res_l,
12818                                   hw_plane_wm->trans_wm.plane_en,
12819                                   hw_plane_wm->trans_wm.plane_res_b,
12820                                   hw_plane_wm->trans_wm.plane_res_l);
12821                 }
12822
12823                 /* DDB */
12824                 hw_ddb_entry = &hw->ddb_y[PLANE_CURSOR];
12825                 sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
12826
12827                 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
12828                         DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
12829                                   pipe_name(pipe),
12830                                   sw_ddb_entry->start, sw_ddb_entry->end,
12831                                   hw_ddb_entry->start, hw_ddb_entry->end);
12832                 }
12833         }
12834
12835         kfree(hw);
12836 }
12837
12838 static void
12839 verify_connector_state(struct intel_atomic_state *state,
12840                        struct intel_crtc *crtc)
12841 {
12842         struct drm_connector *connector;
12843         struct drm_connector_state *new_conn_state;
12844         int i;
12845
12846         for_each_new_connector_in_state(&state->base, connector, new_conn_state, i) {
12847                 struct drm_encoder *encoder = connector->encoder;
12848                 struct intel_crtc_state *crtc_state = NULL;
12849
12850                 if (new_conn_state->crtc != &crtc->base)
12851                         continue;
12852
12853                 if (crtc)
12854                         crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
12855
12856                 intel_connector_verify_state(crtc_state, new_conn_state);
12857
12858                 I915_STATE_WARN(new_conn_state->best_encoder != encoder,
12859                      "connector's atomic encoder doesn't match legacy encoder\n");
12860         }
12861 }
12862
12863 static void
12864 verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_state *state)
12865 {
12866         struct intel_encoder *encoder;
12867         struct drm_connector *connector;
12868         struct drm_connector_state *old_conn_state, *new_conn_state;
12869         int i;
12870
12871         for_each_intel_encoder(&dev_priv->drm, encoder) {
12872                 bool enabled = false, found = false;
12873                 enum pipe pipe;
12874
12875                 DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
12876                               encoder->base.base.id,
12877                               encoder->base.name);
12878
12879                 for_each_oldnew_connector_in_state(&state->base, connector, old_conn_state,
12880                                                    new_conn_state, i) {
12881                         if (old_conn_state->best_encoder == &encoder->base)
12882                                 found = true;
12883
12884                         if (new_conn_state->best_encoder != &encoder->base)
12885                                 continue;
12886                         found = enabled = true;
12887
12888                         I915_STATE_WARN(new_conn_state->crtc !=
12889                                         encoder->base.crtc,
12890                              "connector's crtc doesn't match encoder crtc\n");
12891                 }
12892
12893                 if (!found)
12894                         continue;
12895
12896                 I915_STATE_WARN(!!encoder->base.crtc != enabled,
12897                      "encoder's enabled state mismatch "
12898                      "(expected %i, found %i)\n",
12899                      !!encoder->base.crtc, enabled);
12900
12901                 if (!encoder->base.crtc) {
12902                         bool active;
12903
12904                         active = encoder->get_hw_state(encoder, &pipe);
12905                         I915_STATE_WARN(active,
12906                              "encoder detached but still enabled on pipe %c.\n",
12907                              pipe_name(pipe));
12908                 }
12909         }
12910 }
12911
12912 static void
12913 verify_crtc_state(struct intel_crtc *crtc,
12914                   struct intel_crtc_state *old_crtc_state,
12915                   struct intel_crtc_state *new_crtc_state)
12916 {
12917         struct drm_device *dev = crtc->base.dev;
12918         struct drm_i915_private *dev_priv = to_i915(dev);
12919         struct intel_encoder *encoder;
12920         struct intel_crtc_state *pipe_config;
12921         struct drm_atomic_state *state;
12922         bool active;
12923
12924         state = old_crtc_state->base.state;
12925         __drm_atomic_helper_crtc_destroy_state(&old_crtc_state->base);
12926         pipe_config = old_crtc_state;
12927         memset(pipe_config, 0, sizeof(*pipe_config));
12928         pipe_config->base.crtc = &crtc->base;
12929         pipe_config->base.state = state;
12930
12931         DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.base.id, crtc->base.name);
12932
12933         active = dev_priv->display.get_pipe_config(crtc, pipe_config);
12934
12935         /* we keep both pipes enabled on 830 */
12936         if (IS_I830(dev_priv))
12937                 active = new_crtc_state->base.active;
12938
12939         I915_STATE_WARN(new_crtc_state->base.active != active,
12940              "crtc active state doesn't match with hw state "
12941              "(expected %i, found %i)\n", new_crtc_state->base.active, active);
12942
12943         I915_STATE_WARN(crtc->active != new_crtc_state->base.active,
12944              "transitional active state does not match atomic hw state "
12945              "(expected %i, found %i)\n", new_crtc_state->base.active, crtc->active);
12946
12947         for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
12948                 enum pipe pipe;
12949
12950                 active = encoder->get_hw_state(encoder, &pipe);
12951                 I915_STATE_WARN(active != new_crtc_state->base.active,
12952                         "[ENCODER:%i] active %i with crtc active %i\n",
12953                         encoder->base.base.id, active, new_crtc_state->base.active);
12954
12955                 I915_STATE_WARN(active && crtc->pipe != pipe,
12956                                 "Encoder connected to wrong pipe %c\n",
12957                                 pipe_name(pipe));
12958
12959                 if (active)
12960                         encoder->get_config(encoder, pipe_config);
12961         }
12962
12963         intel_crtc_compute_pixel_rate(pipe_config);
12964
12965         if (!new_crtc_state->base.active)
12966                 return;
12967
12968         intel_pipe_config_sanity_check(dev_priv, pipe_config);
12969
12970         if (!intel_pipe_config_compare(new_crtc_state,
12971                                        pipe_config, false)) {
12972                 I915_STATE_WARN(1, "pipe state doesn't match!\n");
12973                 intel_dump_pipe_config(pipe_config, NULL, "[hw state]");
12974                 intel_dump_pipe_config(new_crtc_state, NULL, "[sw state]");
12975         }
12976 }
12977
12978 static void
12979 intel_verify_planes(struct intel_atomic_state *state)
12980 {
12981         struct intel_plane *plane;
12982         const struct intel_plane_state *plane_state;
12983         int i;
12984
12985         for_each_new_intel_plane_in_state(state, plane,
12986                                           plane_state, i)
12987                 assert_plane(plane, plane_state->slave ||
12988                              plane_state->base.visible);
12989 }
12990
12991 static void
12992 verify_single_dpll_state(struct drm_i915_private *dev_priv,
12993                          struct intel_shared_dpll *pll,
12994                          struct intel_crtc *crtc,
12995                          struct intel_crtc_state *new_crtc_state)
12996 {
12997         struct intel_dpll_hw_state dpll_hw_state;
12998         unsigned int crtc_mask;
12999         bool active;
13000
13001         memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
13002
13003         DRM_DEBUG_KMS("%s\n", pll->info->name);
13004
13005         active = pll->info->funcs->get_hw_state(dev_priv, pll, &dpll_hw_state);
13006
13007         if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
13008                 I915_STATE_WARN(!pll->on && pll->active_mask,
13009                      "pll in active use but not on in sw tracking\n");
13010                 I915_STATE_WARN(pll->on && !pll->active_mask,
13011                      "pll is on but not used by any active crtc\n");
13012                 I915_STATE_WARN(pll->on != active,
13013                      "pll on state mismatch (expected %i, found %i)\n",
13014                      pll->on, active);
13015         }
13016
13017         if (!crtc) {
13018                 I915_STATE_WARN(pll->active_mask & ~pll->state.crtc_mask,
13019                                 "more active pll users than references: %x vs %x\n",
13020                                 pll->active_mask, pll->state.crtc_mask);
13021
13022                 return;
13023         }
13024
13025         crtc_mask = drm_crtc_mask(&crtc->base);
13026
13027         if (new_crtc_state->base.active)
13028                 I915_STATE_WARN(!(pll->active_mask & crtc_mask),
13029                                 "pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
13030                                 pipe_name(drm_crtc_index(&crtc->base)), pll->active_mask);
13031         else
13032                 I915_STATE_WARN(pll->active_mask & crtc_mask,
13033                                 "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
13034                                 pipe_name(drm_crtc_index(&crtc->base)), pll->active_mask);
13035
13036         I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask),
13037                         "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
13038                         crtc_mask, pll->state.crtc_mask);
13039
13040         I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
13041                                           &dpll_hw_state,
13042                                           sizeof(dpll_hw_state)),
13043                         "pll hw state mismatch\n");
13044 }
13045
13046 static void
13047 verify_shared_dpll_state(struct intel_crtc *crtc,
13048                          struct intel_crtc_state *old_crtc_state,
13049                          struct intel_crtc_state *new_crtc_state)
13050 {
13051         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13052
13053         if (new_crtc_state->shared_dpll)
13054                 verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll, crtc, new_crtc_state);
13055
13056         if (old_crtc_state->shared_dpll &&
13057             old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
13058                 unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
13059                 struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
13060
13061                 I915_STATE_WARN(pll->active_mask & crtc_mask,
13062                                 "pll active mismatch (didn't expect pipe %c in active mask)\n",
13063                                 pipe_name(drm_crtc_index(&crtc->base)));
13064                 I915_STATE_WARN(pll->state.crtc_mask & crtc_mask,
13065                                 "pll enabled crtcs mismatch (found %x in enabled mask)\n",
13066                                 pipe_name(drm_crtc_index(&crtc->base)));
13067         }
13068 }
13069
13070 static void
13071 intel_modeset_verify_crtc(struct intel_crtc *crtc,
13072                           struct intel_atomic_state *state,
13073                           struct intel_crtc_state *old_crtc_state,
13074                           struct intel_crtc_state *new_crtc_state)
13075 {
13076         if (!needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe)
13077                 return;
13078
13079         verify_wm_state(crtc, new_crtc_state);
13080         verify_connector_state(state, crtc);
13081         verify_crtc_state(crtc, old_crtc_state, new_crtc_state);
13082         verify_shared_dpll_state(crtc, old_crtc_state, new_crtc_state);
13083 }
13084
13085 static void
13086 verify_disabled_dpll_state(struct drm_i915_private *dev_priv)
13087 {
13088         int i;
13089
13090         for (i = 0; i < dev_priv->num_shared_dpll; i++)
13091                 verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL);
13092 }
13093
13094 static void
13095 intel_modeset_verify_disabled(struct drm_i915_private *dev_priv,
13096                               struct intel_atomic_state *state)
13097 {
13098         verify_encoder_state(dev_priv, state);
13099         verify_connector_state(state, NULL);
13100         verify_disabled_dpll_state(dev_priv);
13101 }
13102
13103 static void update_scanline_offset(const struct intel_crtc_state *crtc_state)
13104 {
13105         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
13106         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13107
13108         /*
13109          * The scanline counter increments at the leading edge of hsync.
13110          *
13111          * On most platforms it starts counting from vtotal-1 on the
13112          * first active line. That means the scanline counter value is
13113          * always one less than what we would expect. Ie. just after
13114          * start of vblank, which also occurs at start of hsync (on the
13115          * last active line), the scanline counter will read vblank_start-1.
13116          *
13117          * On gen2 the scanline counter starts counting from 1 instead
13118          * of vtotal-1, so we have to subtract one (or rather add vtotal-1
13119          * to keep the value positive), instead of adding one.
13120          *
13121          * On HSW+ the behaviour of the scanline counter depends on the output
13122          * type. For DP ports it behaves like most other platforms, but on HDMI
13123          * there's an extra 1 line difference. So we need to add two instead of
13124          * one to the value.
13125          *
13126          * On VLV/CHV DSI the scanline counter would appear to increment
13127          * approx. 1/3 of a scanline before start of vblank. Unfortunately
13128          * that means we can't tell whether we're in vblank or not while
13129          * we're on that particular line. We must still set scanline_offset
13130          * to 1 so that the vblank timestamps come out correct when we query
13131          * the scanline counter from within the vblank interrupt handler.
13132          * However if queried just before the start of vblank we'll get an
13133          * answer that's slightly in the future.
13134          */
13135         if (IS_GEN(dev_priv, 2)) {
13136                 const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
13137                 int vtotal;
13138
13139                 vtotal = adjusted_mode->crtc_vtotal;
13140                 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
13141                         vtotal /= 2;
13142
13143                 crtc->scanline_offset = vtotal - 1;
13144         } else if (HAS_DDI(dev_priv) &&
13145                    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
13146                 crtc->scanline_offset = 2;
13147         } else
13148                 crtc->scanline_offset = 1;
13149 }
13150
13151 static void intel_modeset_clear_plls(struct intel_atomic_state *state)
13152 {
13153         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
13154         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
13155         struct intel_crtc *crtc;
13156         int i;
13157
13158         if (!dev_priv->display.crtc_compute_clock)
13159                 return;
13160
13161         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
13162                                             new_crtc_state, i) {
13163                 struct intel_shared_dpll *old_dpll =
13164                         old_crtc_state->shared_dpll;
13165
13166                 if (!needs_modeset(new_crtc_state))
13167                         continue;
13168
13169                 new_crtc_state->shared_dpll = NULL;
13170
13171                 if (!old_dpll)
13172                         continue;
13173
13174                 intel_release_shared_dpll(old_dpll, crtc, &state->base);
13175         }
13176 }
13177
13178 /*
13179  * This implements the workaround described in the "notes" section of the mode
13180  * set sequence documentation. When going from no pipes or single pipe to
13181  * multiple pipes, and planes are enabled after the pipe, we need to wait at
13182  * least 2 vblanks on the first pipe before enabling planes on the second pipe.
13183  */
13184 static int haswell_mode_set_planes_workaround(struct intel_atomic_state *state)
13185 {
13186         struct intel_crtc_state *crtc_state;
13187         struct intel_crtc *crtc;
13188         struct intel_crtc_state *first_crtc_state = NULL;
13189         struct intel_crtc_state *other_crtc_state = NULL;
13190         enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
13191         int i;
13192
13193         /* look at all crtc's that are going to be enabled in during modeset */
13194         for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
13195                 if (!crtc_state->base.active ||
13196                     !needs_modeset(crtc_state))
13197                         continue;
13198
13199                 if (first_crtc_state) {
13200                         other_crtc_state = crtc_state;
13201                         break;
13202                 } else {
13203                         first_crtc_state = crtc_state;
13204                         first_pipe = crtc->pipe;
13205                 }
13206         }
13207
13208         /* No workaround needed? */
13209         if (!first_crtc_state)
13210                 return 0;
13211
13212         /* w/a possibly needed, check how many crtc's are already enabled. */
13213         for_each_intel_crtc(state->base.dev, crtc) {
13214                 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
13215                 if (IS_ERR(crtc_state))
13216                         return PTR_ERR(crtc_state);
13217
13218                 crtc_state->hsw_workaround_pipe = INVALID_PIPE;
13219
13220                 if (!crtc_state->base.active ||
13221                     needs_modeset(crtc_state))
13222                         continue;
13223
13224                 /* 2 or more enabled crtcs means no need for w/a */
13225                 if (enabled_pipe != INVALID_PIPE)
13226                         return 0;
13227
13228                 enabled_pipe = crtc->pipe;
13229         }
13230
13231         if (enabled_pipe != INVALID_PIPE)
13232                 first_crtc_state->hsw_workaround_pipe = enabled_pipe;
13233         else if (other_crtc_state)
13234                 other_crtc_state->hsw_workaround_pipe = first_pipe;
13235
13236         return 0;
13237 }
13238
13239 static int intel_lock_all_pipes(struct drm_atomic_state *state)
13240 {
13241         struct drm_crtc *crtc;
13242
13243         /* Add all pipes to the state */
13244         for_each_crtc(state->dev, crtc) {
13245                 struct drm_crtc_state *crtc_state;
13246
13247                 crtc_state = drm_atomic_get_crtc_state(state, crtc);
13248                 if (IS_ERR(crtc_state))
13249                         return PTR_ERR(crtc_state);
13250         }
13251
13252         return 0;
13253 }
13254
13255 static int intel_modeset_all_pipes(struct drm_atomic_state *state)
13256 {
13257         struct drm_crtc *crtc;
13258
13259         /*
13260          * Add all pipes to the state, and force
13261          * a modeset on all the active ones.
13262          */
13263         for_each_crtc(state->dev, crtc) {
13264                 struct drm_crtc_state *crtc_state;
13265                 int ret;
13266
13267                 crtc_state = drm_atomic_get_crtc_state(state, crtc);
13268                 if (IS_ERR(crtc_state))
13269                         return PTR_ERR(crtc_state);
13270
13271                 if (!crtc_state->active || needs_modeset(to_intel_crtc_state(crtc_state)))
13272                         continue;
13273
13274                 crtc_state->mode_changed = true;
13275
13276                 ret = drm_atomic_add_affected_connectors(state, crtc);
13277                 if (ret)
13278                         return ret;
13279
13280                 ret = drm_atomic_add_affected_planes(state, crtc);
13281                 if (ret)
13282                         return ret;
13283         }
13284
13285         return 0;
13286 }
13287
13288 static int intel_modeset_checks(struct intel_atomic_state *state)
13289 {
13290         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
13291         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
13292         struct intel_crtc *crtc;
13293         int ret = 0, i;
13294
13295         if (!check_digital_port_conflicts(state)) {
13296                 DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
13297                 return -EINVAL;
13298         }
13299
13300         /* keep the current setting */
13301         if (!state->cdclk.force_min_cdclk_changed)
13302                 state->cdclk.force_min_cdclk = dev_priv->cdclk.force_min_cdclk;
13303
13304         state->modeset = true;
13305         state->active_crtcs = dev_priv->active_crtcs;
13306         state->cdclk.logical = dev_priv->cdclk.logical;
13307         state->cdclk.actual = dev_priv->cdclk.actual;
13308         state->cdclk.pipe = INVALID_PIPE;
13309
13310         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
13311                                             new_crtc_state, i) {
13312                 if (new_crtc_state->base.active)
13313                         state->active_crtcs |= 1 << i;
13314                 else
13315                         state->active_crtcs &= ~(1 << i);
13316
13317                 if (old_crtc_state->base.active != new_crtc_state->base.active)
13318                         state->active_pipe_changes |= drm_crtc_mask(&crtc->base);
13319         }
13320
13321         /*
13322          * See if the config requires any additional preparation, e.g.
13323          * to adjust global state with pipes off.  We need to do this
13324          * here so we can get the modeset_pipe updated config for the new
13325          * mode set on this crtc.  For other crtcs we need to use the
13326          * adjusted_mode bits in the crtc directly.
13327          */
13328         if (dev_priv->display.modeset_calc_cdclk) {
13329                 enum pipe pipe;
13330
13331                 ret = dev_priv->display.modeset_calc_cdclk(state);
13332                 if (ret < 0)
13333                         return ret;
13334
13335                 /*
13336                  * Writes to dev_priv->cdclk.logical must protected by
13337                  * holding all the crtc locks, even if we don't end up
13338                  * touching the hardware
13339                  */
13340                 if (intel_cdclk_changed(&dev_priv->cdclk.logical,
13341                                         &state->cdclk.logical)) {
13342                         ret = intel_lock_all_pipes(&state->base);
13343                         if (ret < 0)
13344                                 return ret;
13345                 }
13346
13347                 if (is_power_of_2(state->active_crtcs)) {
13348                         struct intel_crtc *crtc;
13349                         struct intel_crtc_state *crtc_state;
13350
13351                         pipe = ilog2(state->active_crtcs);
13352                         crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
13353                         crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
13354                         if (crtc_state && needs_modeset(crtc_state))
13355                                 pipe = INVALID_PIPE;
13356                 } else {
13357                         pipe = INVALID_PIPE;
13358                 }
13359
13360                 /* All pipes must be switched off while we change the cdclk. */
13361                 if (pipe != INVALID_PIPE &&
13362                     intel_cdclk_needs_cd2x_update(dev_priv,
13363                                                   &dev_priv->cdclk.actual,
13364                                                   &state->cdclk.actual)) {
13365                         ret = intel_lock_all_pipes(&state->base);
13366                         if (ret < 0)
13367                                 return ret;
13368
13369                         state->cdclk.pipe = pipe;
13370                 } else if (intel_cdclk_needs_modeset(&dev_priv->cdclk.actual,
13371                                                      &state->cdclk.actual)) {
13372                         ret = intel_modeset_all_pipes(&state->base);
13373                         if (ret < 0)
13374                                 return ret;
13375
13376                         state->cdclk.pipe = INVALID_PIPE;
13377                 }
13378
13379                 DRM_DEBUG_KMS("New cdclk calculated to be logical %u kHz, actual %u kHz\n",
13380                               state->cdclk.logical.cdclk,
13381                               state->cdclk.actual.cdclk);
13382                 DRM_DEBUG_KMS("New voltage level calculated to be logical %u, actual %u\n",
13383                               state->cdclk.logical.voltage_level,
13384                               state->cdclk.actual.voltage_level);
13385         }
13386
13387         intel_modeset_clear_plls(state);
13388
13389         if (IS_HASWELL(dev_priv))
13390                 return haswell_mode_set_planes_workaround(state);
13391
13392         return 0;
13393 }
13394
13395 /*
13396  * Handle calculation of various watermark data at the end of the atomic check
13397  * phase.  The code here should be run after the per-crtc and per-plane 'check'
13398  * handlers to ensure that all derived state has been updated.
13399  */
13400 static int calc_watermark_data(struct intel_atomic_state *state)
13401 {
13402         struct drm_device *dev = state->base.dev;
13403         struct drm_i915_private *dev_priv = to_i915(dev);
13404
13405         /* Is there platform-specific watermark information to calculate? */
13406         if (dev_priv->display.compute_global_watermarks)
13407                 return dev_priv->display.compute_global_watermarks(state);
13408
13409         return 0;
13410 }
13411
13412 static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state,
13413                                      struct intel_crtc_state *new_crtc_state)
13414 {
13415         if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true))
13416                 return;
13417
13418         new_crtc_state->base.mode_changed = false;
13419         new_crtc_state->update_pipe = true;
13420
13421         /*
13422          * If we're not doing the full modeset we want to
13423          * keep the current M/N values as they may be
13424          * sufficiently different to the computed values
13425          * to cause problems.
13426          *
13427          * FIXME: should really copy more fuzzy state here
13428          */
13429         new_crtc_state->fdi_m_n = old_crtc_state->fdi_m_n;
13430         new_crtc_state->dp_m_n = old_crtc_state->dp_m_n;
13431         new_crtc_state->dp_m2_n2 = old_crtc_state->dp_m2_n2;
13432         new_crtc_state->has_drrs = old_crtc_state->has_drrs;
13433 }
13434
13435 /**
13436  * intel_atomic_check - validate state object
13437  * @dev: drm device
13438  * @_state: state to validate
13439  */
13440 static int intel_atomic_check(struct drm_device *dev,
13441                               struct drm_atomic_state *_state)
13442 {
13443         struct drm_i915_private *dev_priv = to_i915(dev);
13444         struct intel_atomic_state *state = to_intel_atomic_state(_state);
13445         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
13446         struct intel_crtc *crtc;
13447         int ret, i;
13448         bool any_ms = state->cdclk.force_min_cdclk_changed;
13449
13450         /* Catch I915_MODE_FLAG_INHERITED */
13451         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
13452                                             new_crtc_state, i) {
13453                 if (new_crtc_state->base.mode.private_flags !=
13454                     old_crtc_state->base.mode.private_flags)
13455                         new_crtc_state->base.mode_changed = true;
13456         }
13457
13458         ret = drm_atomic_helper_check_modeset(dev, &state->base);
13459         if (ret)
13460                 goto fail;
13461
13462         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
13463                                             new_crtc_state, i) {
13464                 if (!needs_modeset(new_crtc_state))
13465                         continue;
13466
13467                 if (!new_crtc_state->base.enable) {
13468                         any_ms = true;
13469                         continue;
13470                 }
13471
13472                 ret = intel_modeset_pipe_config(new_crtc_state);
13473                 if (ret)
13474                         goto fail;
13475
13476                 intel_crtc_check_fastset(old_crtc_state, new_crtc_state);
13477
13478                 if (needs_modeset(new_crtc_state))
13479                         any_ms = true;
13480         }
13481
13482         ret = drm_dp_mst_atomic_check(&state->base);
13483         if (ret)
13484                 goto fail;
13485
13486         if (any_ms) {
13487                 ret = intel_modeset_checks(state);
13488                 if (ret)
13489                         goto fail;
13490         } else {
13491                 state->cdclk.logical = dev_priv->cdclk.logical;
13492         }
13493
13494         ret = icl_add_linked_planes(state);
13495         if (ret)
13496                 goto fail;
13497
13498         ret = drm_atomic_helper_check_planes(dev, &state->base);
13499         if (ret)
13500                 goto fail;
13501
13502         intel_fbc_choose_crtc(dev_priv, state);
13503         ret = calc_watermark_data(state);
13504         if (ret)
13505                 goto fail;
13506
13507         ret = intel_bw_atomic_check(state);
13508         if (ret)
13509                 goto fail;
13510
13511         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
13512                                             new_crtc_state, i) {
13513                 if (!needs_modeset(new_crtc_state) &&
13514                     !new_crtc_state->update_pipe)
13515                         continue;
13516
13517                 intel_dump_pipe_config(new_crtc_state, state,
13518                                        needs_modeset(new_crtc_state) ?
13519                                        "[modeset]" : "[fastset]");
13520         }
13521
13522         return 0;
13523
13524  fail:
13525         if (ret == -EDEADLK)
13526                 return ret;
13527
13528         /*
13529          * FIXME would probably be nice to know which crtc specifically
13530          * caused the failure, in cases where we can pinpoint it.
13531          */
13532         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
13533                                             new_crtc_state, i)
13534                 intel_dump_pipe_config(new_crtc_state, state, "[failed]");
13535
13536         return ret;
13537 }
13538
13539 static int intel_atomic_prepare_commit(struct drm_device *dev,
13540                                        struct drm_atomic_state *state)
13541 {
13542         return drm_atomic_helper_prepare_planes(dev, state);
13543 }
13544
13545 u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
13546 {
13547         struct drm_device *dev = crtc->base.dev;
13548         struct drm_vblank_crtc *vblank = &dev->vblank[drm_crtc_index(&crtc->base)];
13549
13550         if (!vblank->max_vblank_count)
13551                 return (u32)drm_crtc_accurate_vblank_count(&crtc->base);
13552
13553         return crtc->base.funcs->get_vblank_counter(&crtc->base);
13554 }
13555
13556 static void intel_update_crtc(struct intel_crtc *crtc,
13557                               struct intel_atomic_state *state,
13558                               struct intel_crtc_state *old_crtc_state,
13559                               struct intel_crtc_state *new_crtc_state)
13560 {
13561         struct drm_device *dev = state->base.dev;
13562         struct drm_i915_private *dev_priv = to_i915(dev);
13563         bool modeset = needs_modeset(new_crtc_state);
13564         struct intel_plane_state *new_plane_state =
13565                 intel_atomic_get_new_plane_state(state,
13566                                                  to_intel_plane(crtc->base.primary));
13567
13568         if (modeset) {
13569                 update_scanline_offset(new_crtc_state);
13570                 dev_priv->display.crtc_enable(new_crtc_state, state);
13571
13572                 /* vblanks work again, re-enable pipe CRC. */
13573                 intel_crtc_enable_pipe_crc(crtc);
13574         } else {
13575                 intel_pre_plane_update(old_crtc_state, new_crtc_state);
13576
13577                 if (new_crtc_state->update_pipe)
13578                         intel_encoders_update_pipe(crtc, new_crtc_state, state);
13579         }
13580
13581         if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc)
13582                 intel_fbc_disable(crtc);
13583         else if (new_plane_state)
13584                 intel_fbc_enable(crtc, new_crtc_state, new_plane_state);
13585
13586         intel_begin_crtc_commit(state, crtc);
13587
13588         if (INTEL_GEN(dev_priv) >= 9)
13589                 skl_update_planes_on_crtc(state, crtc);
13590         else
13591                 i9xx_update_planes_on_crtc(state, crtc);
13592
13593         intel_finish_crtc_commit(state, crtc);
13594 }
13595
13596 static void intel_update_crtcs(struct intel_atomic_state *state)
13597 {
13598         struct intel_crtc *crtc;
13599         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
13600         int i;
13601
13602         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
13603                 if (!new_crtc_state->base.active)
13604                         continue;
13605
13606                 intel_update_crtc(crtc, state, old_crtc_state,
13607                                   new_crtc_state);
13608         }
13609 }
13610
13611 static void skl_update_crtcs(struct intel_atomic_state *state)
13612 {
13613         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
13614         struct intel_crtc *crtc;
13615         struct intel_crtc_state *old_crtc_state, *new_crtc_state;
13616         unsigned int updated = 0;
13617         bool progress;
13618         enum pipe pipe;
13619         int i;
13620         u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
13621         u8 required_slices = state->wm_results.ddb.enabled_slices;
13622         struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
13623
13624         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i)
13625                 /* ignore allocations for crtc's that have been turned off. */
13626                 if (new_crtc_state->base.active)
13627                         entries[i] = old_crtc_state->wm.skl.ddb;
13628
13629         /* If 2nd DBuf slice required, enable it here */
13630         if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices)
13631                 icl_dbuf_slices_update(dev_priv, required_slices);
13632
13633         /*
13634          * Whenever the number of active pipes changes, we need to make sure we
13635          * update the pipes in the right order so that their ddb allocations
13636          * never overlap with eachother inbetween CRTC updates. Otherwise we'll
13637          * cause pipe underruns and other bad stuff.
13638          */
13639         do {
13640                 progress = false;
13641
13642                 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
13643                         bool vbl_wait = false;
13644                         unsigned int cmask = drm_crtc_mask(&crtc->base);
13645
13646                         pipe = crtc->pipe;
13647
13648                         if (updated & cmask || !new_crtc_state->base.active)
13649                                 continue;
13650
13651                         if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
13652                                                         entries,
13653                                                         INTEL_INFO(dev_priv)->num_pipes, i))
13654                                 continue;
13655
13656                         updated |= cmask;
13657                         entries[i] = new_crtc_state->wm.skl.ddb;
13658
13659                         /*
13660                          * If this is an already active pipe, it's DDB changed,
13661                          * and this isn't the last pipe that needs updating
13662                          * then we need to wait for a vblank to pass for the
13663                          * new ddb allocation to take effect.
13664                          */
13665                         if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb,
13666                                                  &old_crtc_state->wm.skl.ddb) &&
13667                             !new_crtc_state->base.active_changed &&
13668                             state->wm_results.dirty_pipes != updated)
13669                                 vbl_wait = true;
13670
13671                         intel_update_crtc(crtc, state, old_crtc_state,
13672                                           new_crtc_state);
13673
13674                         if (vbl_wait)
13675                                 intel_wait_for_vblank(dev_priv, pipe);
13676
13677                         progress = true;
13678                 }
13679         } while (progress);
13680
13681         /* If 2nd DBuf slice is no more required disable it */
13682         if (INTEL_GEN(dev_priv) >= 11 && required_slices < hw_enabled_slices)
13683                 icl_dbuf_slices_update(dev_priv, required_slices);
13684 }
13685
13686 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
13687 {
13688         struct intel_atomic_state *state, *next;
13689         struct llist_node *freed;
13690
13691         freed = llist_del_all(&dev_priv->atomic_helper.free_list);
13692         llist_for_each_entry_safe(state, next, freed, freed)
13693                 drm_atomic_state_put(&state->base);
13694 }
13695
13696 static void intel_atomic_helper_free_state_worker(struct work_struct *work)
13697 {
13698         struct drm_i915_private *dev_priv =
13699                 container_of(work, typeof(*dev_priv), atomic_helper.free_work);
13700
13701         intel_atomic_helper_free_state(dev_priv);
13702 }
13703
13704 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
13705 {
13706         struct wait_queue_entry wait_fence, wait_reset;
13707         struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
13708
13709         init_wait_entry(&wait_fence, 0);
13710         init_wait_entry(&wait_reset, 0);
13711         for (;;) {
13712                 prepare_to_wait(&intel_state->commit_ready.wait,
13713                                 &wait_fence, TASK_UNINTERRUPTIBLE);
13714                 prepare_to_wait(&dev_priv->gpu_error.wait_queue,
13715                                 &wait_reset, TASK_UNINTERRUPTIBLE);
13716
13717
13718                 if (i915_sw_fence_done(&intel_state->commit_ready)
13719                     || test_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags))
13720                         break;
13721
13722                 schedule();
13723         }
13724         finish_wait(&intel_state->commit_ready.wait, &wait_fence);
13725         finish_wait(&dev_priv->gpu_error.wait_queue, &wait_reset);
13726 }
13727
13728 static void intel_atomic_cleanup_work(struct work_struct *work)
13729 {
13730         struct drm_atomic_state *state =
13731                 container_of(work, struct drm_atomic_state, commit_work);
13732         struct drm_i915_private *i915 = to_i915(state->dev);
13733
13734         drm_atomic_helper_cleanup_planes(&i915->drm, state);
13735         drm_atomic_helper_commit_cleanup_done(state);
13736         drm_atomic_state_put(state);
13737
13738         intel_atomic_helper_free_state(i915);
13739 }
13740
13741 static void intel_atomic_commit_tail(struct intel_atomic_state *state)
13742 {
13743         struct drm_device *dev = state->base.dev;
13744         struct drm_i915_private *dev_priv = to_i915(dev);
13745         struct intel_crtc_state *new_crtc_state, *old_crtc_state;
13746         struct intel_crtc *crtc;
13747         u64 put_domains[I915_MAX_PIPES] = {};
13748         intel_wakeref_t wakeref = 0;
13749         int i;
13750
13751         intel_atomic_commit_fence_wait(state);
13752
13753         drm_atomic_helper_wait_for_dependencies(&state->base);
13754
13755         if (state->modeset)
13756                 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
13757
13758         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
13759                 if (needs_modeset(new_crtc_state) ||
13760                     new_crtc_state->update_pipe) {
13761
13762                         put_domains[crtc->pipe] =
13763                                 modeset_get_crtc_power_domains(crtc,
13764                                         new_crtc_state);
13765                 }
13766
13767                 if (!needs_modeset(new_crtc_state))
13768                         continue;
13769
13770                 intel_pre_plane_update(old_crtc_state, new_crtc_state);
13771
13772                 if (old_crtc_state->base.active) {
13773                         intel_crtc_disable_planes(state, crtc);
13774
13775                         /*
13776                          * We need to disable pipe CRC before disabling the pipe,
13777                          * or we race against vblank off.
13778                          */
13779                         intel_crtc_disable_pipe_crc(crtc);
13780
13781                         dev_priv->display.crtc_disable(old_crtc_state, state);
13782                         crtc->active = false;
13783                         intel_fbc_disable(crtc);
13784                         intel_disable_shared_dpll(old_crtc_state);
13785
13786                         /*
13787                          * Underruns don't always raise
13788                          * interrupts, so check manually.
13789                          */
13790                         intel_check_cpu_fifo_underruns(dev_priv);
13791                         intel_check_pch_fifo_underruns(dev_priv);
13792
13793                         /* FIXME unify this for all platforms */
13794                         if (!new_crtc_state->base.active &&
13795                             !HAS_GMCH(dev_priv) &&
13796                             dev_priv->display.initial_watermarks)
13797                                 dev_priv->display.initial_watermarks(state,
13798                                                                      new_crtc_state);
13799                 }
13800         }
13801
13802         /* FIXME: Eventually get rid of our crtc->config pointer */
13803         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
13804                 crtc->config = new_crtc_state;
13805
13806         if (state->modeset) {
13807                 drm_atomic_helper_update_legacy_modeset_state(dev, &state->base);
13808
13809                 intel_set_cdclk_pre_plane_update(dev_priv,
13810                                                  &state->cdclk.actual,
13811                                                  &dev_priv->cdclk.actual,
13812                                                  state->cdclk.pipe);
13813
13814                 /*
13815                  * SKL workaround: bspec recommends we disable the SAGV when we
13816                  * have more then one pipe enabled
13817                  */
13818                 if (!intel_can_enable_sagv(state))
13819                         intel_disable_sagv(dev_priv);
13820
13821                 intel_modeset_verify_disabled(dev_priv, state);
13822         }
13823
13824         /* Complete the events for pipes that have now been disabled */
13825         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
13826                 bool modeset = needs_modeset(new_crtc_state);
13827
13828                 /* Complete events for now disable pipes here. */
13829                 if (modeset && !new_crtc_state->base.active && new_crtc_state->base.event) {
13830                         spin_lock_irq(&dev->event_lock);
13831                         drm_crtc_send_vblank_event(&crtc->base, new_crtc_state->base.event);
13832                         spin_unlock_irq(&dev->event_lock);
13833
13834                         new_crtc_state->base.event = NULL;
13835                 }
13836         }
13837
13838         /* Now enable the clocks, plane, pipe, and connectors that we set up. */
13839         dev_priv->display.update_crtcs(state);
13840
13841         if (state->modeset)
13842                 intel_set_cdclk_post_plane_update(dev_priv,
13843                                                   &state->cdclk.actual,
13844                                                   &dev_priv->cdclk.actual,
13845                                                   state->cdclk.pipe);
13846
13847         /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
13848          * already, but still need the state for the delayed optimization. To
13849          * fix this:
13850          * - wrap the optimization/post_plane_update stuff into a per-crtc work.
13851          * - schedule that vblank worker _before_ calling hw_done
13852          * - at the start of commit_tail, cancel it _synchrously
13853          * - switch over to the vblank wait helper in the core after that since
13854          *   we don't need out special handling any more.
13855          */
13856         drm_atomic_helper_wait_for_flip_done(dev, &state->base);
13857
13858         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
13859                 if (new_crtc_state->base.active &&
13860                     !needs_modeset(new_crtc_state) &&
13861                     (new_crtc_state->base.color_mgmt_changed ||
13862                      new_crtc_state->update_pipe))
13863                         intel_color_load_luts(new_crtc_state);
13864         }
13865
13866         /*
13867          * Now that the vblank has passed, we can go ahead and program the
13868          * optimal watermarks on platforms that need two-step watermark
13869          * programming.
13870          *
13871          * TODO: Move this (and other cleanup) to an async worker eventually.
13872          */
13873         for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
13874                 if (dev_priv->display.optimize_watermarks)
13875                         dev_priv->display.optimize_watermarks(state,
13876                                                               new_crtc_state);
13877         }
13878
13879         for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
13880                 intel_post_plane_update(old_crtc_state);
13881
13882                 if (put_domains[i])
13883                         modeset_put_power_domains(dev_priv, put_domains[i]);
13884
13885                 intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
13886         }
13887
13888         if (state->modeset)
13889                 intel_verify_planes(state);
13890
13891         if (state->modeset && intel_can_enable_sagv(state))
13892                 intel_enable_sagv(dev_priv);
13893
13894         drm_atomic_helper_commit_hw_done(&state->base);
13895
13896         if (state->modeset) {
13897                 /* As one of the primary mmio accessors, KMS has a high
13898                  * likelihood of triggering bugs in unclaimed access. After we
13899                  * finish modesetting, see if an error has been flagged, and if
13900                  * so enable debugging for the next modeset - and hope we catch
13901                  * the culprit.
13902                  */
13903                 intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
13904                 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
13905         }
13906         intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
13907
13908         /*
13909          * Defer the cleanup of the old state to a separate worker to not
13910          * impede the current task (userspace for blocking modesets) that
13911          * are executed inline. For out-of-line asynchronous modesets/flips,
13912          * deferring to a new worker seems overkill, but we would place a
13913          * schedule point (cond_resched()) here anyway to keep latencies
13914          * down.
13915          */
13916         INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work);
13917         queue_work(system_highpri_wq, &state->base.commit_work);
13918 }
13919
13920 static void intel_atomic_commit_work(struct work_struct *work)
13921 {
13922         struct intel_atomic_state *state =
13923                 container_of(work, struct intel_atomic_state, base.commit_work);
13924
13925         intel_atomic_commit_tail(state);
13926 }
13927
13928 static int __i915_sw_fence_call
13929 intel_atomic_commit_ready(struct i915_sw_fence *fence,
13930                           enum i915_sw_fence_notify notify)
13931 {
13932         struct intel_atomic_state *state =
13933                 container_of(fence, struct intel_atomic_state, commit_ready);
13934
13935         switch (notify) {
13936         case FENCE_COMPLETE:
13937                 /* we do blocking waits in the worker, nothing to do here */
13938                 break;
13939         case FENCE_FREE:
13940                 {
13941                         struct intel_atomic_helper *helper =
13942                                 &to_i915(state->base.dev)->atomic_helper;
13943
13944                         if (llist_add(&state->freed, &helper->free_list))
13945                                 schedule_work(&helper->free_work);
13946                         break;
13947                 }
13948         }
13949
13950         return NOTIFY_DONE;
13951 }
13952
13953 static void intel_atomic_track_fbs(struct drm_atomic_state *state)
13954 {
13955         struct drm_plane_state *old_plane_state, *new_plane_state;
13956         struct drm_plane *plane;
13957         int i;
13958
13959         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i)
13960                 i915_gem_track_fb(intel_fb_obj(old_plane_state->fb),
13961                                   intel_fb_obj(new_plane_state->fb),
13962                                   to_intel_plane(plane)->frontbuffer_bit);
13963 }
13964
13965 /**
13966  * intel_atomic_commit - commit validated state object
13967  * @dev: DRM device
13968  * @state: the top-level driver state object
13969  * @nonblock: nonblocking commit
13970  *
13971  * This function commits a top-level state object that has been validated
13972  * with drm_atomic_helper_check().
13973  *
13974  * RETURNS
13975  * Zero for success or -errno.
13976  */
13977 static int intel_atomic_commit(struct drm_device *dev,
13978                                struct drm_atomic_state *state,
13979                                bool nonblock)
13980 {
13981         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13982         struct drm_i915_private *dev_priv = to_i915(dev);
13983         int ret = 0;
13984
13985         intel_state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
13986
13987         drm_atomic_state_get(state);
13988         i915_sw_fence_init(&intel_state->commit_ready,
13989                            intel_atomic_commit_ready);
13990
13991         /*
13992          * The intel_legacy_cursor_update() fast path takes care
13993          * of avoiding the vblank waits for simple cursor
13994          * movement and flips. For cursor on/off and size changes,
13995          * we want to perform the vblank waits so that watermark
13996          * updates happen during the correct frames. Gen9+ have
13997          * double buffered watermarks and so shouldn't need this.
13998          *
13999          * Unset state->legacy_cursor_update before the call to
14000          * drm_atomic_helper_setup_commit() because otherwise
14001          * drm_atomic_helper_wait_for_flip_done() is a noop and
14002          * we get FIFO underruns because we didn't wait
14003          * for vblank.
14004          *
14005          * FIXME doing watermarks and fb cleanup from a vblank worker
14006          * (assuming we had any) would solve these problems.
14007          */
14008         if (INTEL_GEN(dev_priv) < 9 && state->legacy_cursor_update) {
14009                 struct intel_crtc_state *new_crtc_state;
14010                 struct intel_crtc *crtc;
14011                 int i;
14012
14013                 for_each_new_intel_crtc_in_state(intel_state, crtc, new_crtc_state, i)
14014                         if (new_crtc_state->wm.need_postvbl_update ||
14015                             new_crtc_state->update_wm_post)
14016                                 state->legacy_cursor_update = false;
14017         }
14018
14019         ret = intel_atomic_prepare_commit(dev, state);
14020         if (ret) {
14021                 DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
14022                 i915_sw_fence_commit(&intel_state->commit_ready);
14023                 intel_runtime_pm_put(&dev_priv->runtime_pm, intel_state->wakeref);
14024                 return ret;
14025         }
14026
14027         ret = drm_atomic_helper_setup_commit(state, nonblock);
14028         if (!ret)
14029                 ret = drm_atomic_helper_swap_state(state, true);
14030
14031         if (ret) {
14032                 i915_sw_fence_commit(&intel_state->commit_ready);
14033
14034                 drm_atomic_helper_cleanup_planes(dev, state);
14035                 intel_runtime_pm_put(&dev_priv->runtime_pm, intel_state->wakeref);
14036                 return ret;
14037         }
14038         dev_priv->wm.distrust_bios_wm = false;
14039         intel_shared_dpll_swap_state(state);
14040         intel_atomic_track_fbs(state);
14041
14042         if (intel_state->modeset) {
14043                 memcpy(dev_priv->min_cdclk, intel_state->min_cdclk,
14044                        sizeof(intel_state->min_cdclk));
14045                 memcpy(dev_priv->min_voltage_level,
14046                        intel_state->min_voltage_level,
14047                        sizeof(intel_state->min_voltage_level));
14048                 dev_priv->active_crtcs = intel_state->active_crtcs;
14049                 dev_priv->cdclk.force_min_cdclk =
14050                         intel_state->cdclk.force_min_cdclk;
14051
14052                 intel_cdclk_swap_state(intel_state);
14053         }
14054
14055         drm_atomic_state_get(state);
14056         INIT_WORK(&state->commit_work, intel_atomic_commit_work);
14057
14058         i915_sw_fence_commit(&intel_state->commit_ready);
14059         if (nonblock && intel_state->modeset) {
14060                 queue_work(dev_priv->modeset_wq, &state->commit_work);
14061         } else if (nonblock) {
14062                 queue_work(system_unbound_wq, &state->commit_work);
14063         } else {
14064                 if (intel_state->modeset)
14065                         flush_workqueue(dev_priv->modeset_wq);
14066                 intel_atomic_commit_tail(intel_state);
14067         }
14068
14069         return 0;
14070 }
14071
14072 struct wait_rps_boost {
14073         struct wait_queue_entry wait;
14074
14075         struct drm_crtc *crtc;
14076         struct i915_request *request;
14077 };
14078
14079 static int do_rps_boost(struct wait_queue_entry *_wait,
14080                         unsigned mode, int sync, void *key)
14081 {
14082         struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
14083         struct i915_request *rq = wait->request;
14084
14085         /*
14086          * If we missed the vblank, but the request is already running it
14087          * is reasonable to assume that it will complete before the next
14088          * vblank without our intervention, so leave RPS alone.
14089          */
14090         if (!i915_request_started(rq))
14091                 gen6_rps_boost(rq);
14092         i915_request_put(rq);
14093
14094         drm_crtc_vblank_put(wait->crtc);
14095
14096         list_del(&wait->wait.entry);
14097         kfree(wait);
14098         return 1;
14099 }
14100
14101 static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
14102                                        struct dma_fence *fence)
14103 {
14104         struct wait_rps_boost *wait;
14105
14106         if (!dma_fence_is_i915(fence))
14107                 return;
14108
14109         if (INTEL_GEN(to_i915(crtc->dev)) < 6)
14110                 return;
14111
14112         if (drm_crtc_vblank_get(crtc))
14113                 return;
14114
14115         wait = kmalloc(sizeof(*wait), GFP_KERNEL);
14116         if (!wait) {
14117                 drm_crtc_vblank_put(crtc);
14118                 return;
14119         }
14120
14121         wait->request = to_request(dma_fence_get(fence));
14122         wait->crtc = crtc;
14123
14124         wait->wait.func = do_rps_boost;
14125         wait->wait.flags = 0;
14126
14127         add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
14128 }
14129
14130 static int intel_plane_pin_fb(struct intel_plane_state *plane_state)
14131 {
14132         struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
14133         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
14134         struct drm_framebuffer *fb = plane_state->base.fb;
14135         struct i915_vma *vma;
14136
14137         if (plane->id == PLANE_CURSOR &&
14138             INTEL_INFO(dev_priv)->display.cursor_needs_physical) {
14139                 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
14140                 const int align = intel_cursor_alignment(dev_priv);
14141                 int err;
14142
14143                 err = i915_gem_object_attach_phys(obj, align);
14144                 if (err)
14145                         return err;
14146         }
14147
14148         vma = intel_pin_and_fence_fb_obj(fb,
14149                                          &plane_state->view,
14150                                          intel_plane_uses_fence(plane_state),
14151                                          &plane_state->flags);
14152         if (IS_ERR(vma))
14153                 return PTR_ERR(vma);
14154
14155         plane_state->vma = vma;
14156
14157         return 0;
14158 }
14159
14160 static void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
14161 {
14162         struct i915_vma *vma;
14163
14164         vma = fetch_and_zero(&old_plane_state->vma);
14165         if (vma)
14166                 intel_unpin_fb_vma(vma, old_plane_state->flags);
14167 }
14168
14169 static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj)
14170 {
14171         struct i915_sched_attr attr = {
14172                 .priority = I915_PRIORITY_DISPLAY,
14173         };
14174
14175         i915_gem_object_wait_priority(obj, 0, &attr);
14176 }
14177
14178 /**
14179  * intel_prepare_plane_fb - Prepare fb for usage on plane
14180  * @plane: drm plane to prepare for
14181  * @new_state: the plane state being prepared
14182  *
14183  * Prepares a framebuffer for usage on a display plane.  Generally this
14184  * involves pinning the underlying object and updating the frontbuffer tracking
14185  * bits.  Some older platforms need special physical address handling for
14186  * cursor planes.
14187  *
14188  * Must be called with struct_mutex held.
14189  *
14190  * Returns 0 on success, negative error code on failure.
14191  */
14192 int
14193 intel_prepare_plane_fb(struct drm_plane *plane,
14194                        struct drm_plane_state *new_state)
14195 {
14196         struct intel_atomic_state *intel_state =
14197                 to_intel_atomic_state(new_state->state);
14198         struct drm_i915_private *dev_priv = to_i915(plane->dev);
14199         struct drm_framebuffer *fb = new_state->fb;
14200         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
14201         struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
14202         int ret;
14203
14204         if (old_obj) {
14205                 struct intel_crtc_state *crtc_state =
14206                         intel_atomic_get_new_crtc_state(intel_state,
14207                                                         to_intel_crtc(plane->state->crtc));
14208
14209                 /* Big Hammer, we also need to ensure that any pending
14210                  * MI_WAIT_FOR_EVENT inside a user batch buffer on the
14211                  * current scanout is retired before unpinning the old
14212                  * framebuffer. Note that we rely on userspace rendering
14213                  * into the buffer attached to the pipe they are waiting
14214                  * on. If not, userspace generates a GPU hang with IPEHR
14215                  * point to the MI_WAIT_FOR_EVENT.
14216                  *
14217                  * This should only fail upon a hung GPU, in which case we
14218                  * can safely continue.
14219                  */
14220                 if (needs_modeset(crtc_state)) {
14221                         ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
14222                                                               old_obj->base.resv, NULL,
14223                                                               false, 0,
14224                                                               GFP_KERNEL);
14225                         if (ret < 0)
14226                                 return ret;
14227                 }
14228         }
14229
14230         if (new_state->fence) { /* explicit fencing */
14231                 ret = i915_sw_fence_await_dma_fence(&intel_state->commit_ready,
14232                                                     new_state->fence,
14233                                                     I915_FENCE_TIMEOUT,
14234                                                     GFP_KERNEL);
14235                 if (ret < 0)
14236                         return ret;
14237         }
14238
14239         if (!obj)
14240                 return 0;
14241
14242         ret = i915_gem_object_pin_pages(obj);
14243         if (ret)
14244                 return ret;
14245
14246         ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
14247         if (ret) {
14248                 i915_gem_object_unpin_pages(obj);
14249                 return ret;
14250         }
14251
14252         ret = intel_plane_pin_fb(to_intel_plane_state(new_state));
14253
14254         mutex_unlock(&dev_priv->drm.struct_mutex);
14255         i915_gem_object_unpin_pages(obj);
14256         if (ret)
14257                 return ret;
14258
14259         fb_obj_bump_render_priority(obj);
14260         intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
14261
14262         if (!new_state->fence) { /* implicit fencing */
14263                 struct dma_fence *fence;
14264
14265                 ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
14266                                                       obj->base.resv, NULL,
14267                                                       false, I915_FENCE_TIMEOUT,
14268                                                       GFP_KERNEL);
14269                 if (ret < 0)
14270                         return ret;
14271
14272                 fence = reservation_object_get_excl_rcu(obj->base.resv);
14273                 if (fence) {
14274                         add_rps_boost_after_vblank(new_state->crtc, fence);
14275                         dma_fence_put(fence);
14276                 }
14277         } else {
14278                 add_rps_boost_after_vblank(new_state->crtc, new_state->fence);
14279         }
14280
14281         /*
14282          * We declare pageflips to be interactive and so merit a small bias
14283          * towards upclocking to deliver the frame on time. By only changing
14284          * the RPS thresholds to sample more regularly and aim for higher
14285          * clocks we can hopefully deliver low power workloads (like kodi)
14286          * that are not quite steady state without resorting to forcing
14287          * maximum clocks following a vblank miss (see do_rps_boost()).
14288          */
14289         if (!intel_state->rps_interactive) {
14290                 intel_rps_mark_interactive(dev_priv, true);
14291                 intel_state->rps_interactive = true;
14292         }
14293
14294         return 0;
14295 }
14296
14297 /**
14298  * intel_cleanup_plane_fb - Cleans up an fb after plane use
14299  * @plane: drm plane to clean up for
14300  * @old_state: the state from the previous modeset
14301  *
14302  * Cleans up a framebuffer that has just been removed from a plane.
14303  *
14304  * Must be called with struct_mutex held.
14305  */
14306 void
14307 intel_cleanup_plane_fb(struct drm_plane *plane,
14308                        struct drm_plane_state *old_state)
14309 {
14310         struct intel_atomic_state *intel_state =
14311                 to_intel_atomic_state(old_state->state);
14312         struct drm_i915_private *dev_priv = to_i915(plane->dev);
14313
14314         if (intel_state->rps_interactive) {
14315                 intel_rps_mark_interactive(dev_priv, false);
14316                 intel_state->rps_interactive = false;
14317         }
14318
14319         /* Should only be called after a successful intel_prepare_plane_fb()! */
14320         mutex_lock(&dev_priv->drm.struct_mutex);
14321         intel_plane_unpin_fb(to_intel_plane_state(old_state));
14322         mutex_unlock(&dev_priv->drm.struct_mutex);
14323 }
14324
14325 int
14326 skl_max_scale(const struct intel_crtc_state *crtc_state,
14327               u32 pixel_format)
14328 {
14329         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
14330         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14331         int max_scale, mult;
14332         int crtc_clock, max_dotclk, tmpclk1, tmpclk2;
14333
14334         if (!crtc_state->base.enable)
14335                 return DRM_PLANE_HELPER_NO_SCALING;
14336
14337         crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
14338         max_dotclk = to_intel_atomic_state(crtc_state->base.state)->cdclk.logical.cdclk;
14339
14340         if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10)
14341                 max_dotclk *= 2;
14342
14343         if (WARN_ON_ONCE(!crtc_clock || max_dotclk < crtc_clock))
14344                 return DRM_PLANE_HELPER_NO_SCALING;
14345
14346         /*
14347          * skl max scale is lower of:
14348          *    close to 3 but not 3, -1 is for that purpose
14349          *            or
14350          *    cdclk/crtc_clock
14351          */
14352         mult = is_planar_yuv_format(pixel_format) ? 2 : 3;
14353         tmpclk1 = (1 << 16) * mult - 1;
14354         tmpclk2 = (1 << 8) * ((max_dotclk << 8) / crtc_clock);
14355         max_scale = min(tmpclk1, tmpclk2);
14356
14357         return max_scale;
14358 }
14359
14360 static void intel_begin_crtc_commit(struct intel_atomic_state *state,
14361                                     struct intel_crtc *crtc)
14362 {
14363         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14364         struct intel_crtc_state *old_crtc_state =
14365                 intel_atomic_get_old_crtc_state(state, crtc);
14366         struct intel_crtc_state *new_crtc_state =
14367                 intel_atomic_get_new_crtc_state(state, crtc);
14368         bool modeset = needs_modeset(new_crtc_state);
14369
14370         /* Perform vblank evasion around commit operation */
14371         intel_pipe_update_start(new_crtc_state);
14372
14373         if (modeset)
14374                 goto out;
14375
14376         if (new_crtc_state->base.color_mgmt_changed ||
14377             new_crtc_state->update_pipe)
14378                 intel_color_commit(new_crtc_state);
14379
14380         if (new_crtc_state->update_pipe)
14381                 intel_update_pipe_config(old_crtc_state, new_crtc_state);
14382         else if (INTEL_GEN(dev_priv) >= 9)
14383                 skl_detach_scalers(new_crtc_state);
14384
14385         if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
14386                 bdw_set_pipemisc(new_crtc_state);
14387
14388 out:
14389         if (dev_priv->display.atomic_update_watermarks)
14390                 dev_priv->display.atomic_update_watermarks(state,
14391                                                            new_crtc_state);
14392 }
14393
14394 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
14395                                   struct intel_crtc_state *crtc_state)
14396 {
14397         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14398
14399         if (!IS_GEN(dev_priv, 2))
14400                 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
14401
14402         if (crtc_state->has_pch_encoder) {
14403                 enum pipe pch_transcoder =
14404                         intel_crtc_pch_transcoder(crtc);
14405
14406                 intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
14407         }
14408 }
14409
14410 static void intel_finish_crtc_commit(struct intel_atomic_state *state,
14411                                      struct intel_crtc *crtc)
14412 {
14413         struct intel_crtc_state *old_crtc_state =
14414                 intel_atomic_get_old_crtc_state(state, crtc);
14415         struct intel_crtc_state *new_crtc_state =
14416                 intel_atomic_get_new_crtc_state(state, crtc);
14417
14418         intel_pipe_update_end(new_crtc_state);
14419
14420         if (new_crtc_state->update_pipe &&
14421             !needs_modeset(new_crtc_state) &&
14422             old_crtc_state->base.mode.private_flags & I915_MODE_FLAG_INHERITED)
14423                 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
14424 }
14425
14426 /**
14427  * intel_plane_destroy - destroy a plane
14428  * @plane: plane to destroy
14429  *
14430  * Common destruction function for all types of planes (primary, cursor,
14431  * sprite).
14432  */
14433 void intel_plane_destroy(struct drm_plane *plane)
14434 {
14435         drm_plane_cleanup(plane);
14436         kfree(to_intel_plane(plane));
14437 }
14438
14439 static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane,
14440                                             u32 format, u64 modifier)
14441 {
14442         switch (modifier) {
14443         case DRM_FORMAT_MOD_LINEAR:
14444         case I915_FORMAT_MOD_X_TILED:
14445                 break;
14446         default:
14447                 return false;
14448         }
14449
14450         switch (format) {
14451         case DRM_FORMAT_C8:
14452         case DRM_FORMAT_RGB565:
14453         case DRM_FORMAT_XRGB1555:
14454         case DRM_FORMAT_XRGB8888:
14455                 return modifier == DRM_FORMAT_MOD_LINEAR ||
14456                         modifier == I915_FORMAT_MOD_X_TILED;
14457         default:
14458                 return false;
14459         }
14460 }
14461
14462 static bool i965_plane_format_mod_supported(struct drm_plane *_plane,
14463                                             u32 format, u64 modifier)
14464 {
14465         switch (modifier) {
14466         case DRM_FORMAT_MOD_LINEAR:
14467         case I915_FORMAT_MOD_X_TILED:
14468                 break;
14469         default:
14470                 return false;
14471         }
14472
14473         switch (format) {
14474         case DRM_FORMAT_C8:
14475         case DRM_FORMAT_RGB565:
14476         case DRM_FORMAT_XRGB8888:
14477         case DRM_FORMAT_XBGR8888:
14478         case DRM_FORMAT_XRGB2101010:
14479         case DRM_FORMAT_XBGR2101010:
14480                 return modifier == DRM_FORMAT_MOD_LINEAR ||
14481                         modifier == I915_FORMAT_MOD_X_TILED;
14482         default:
14483                 return false;
14484         }
14485 }
14486
14487 static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
14488                                               u32 format, u64 modifier)
14489 {
14490         return modifier == DRM_FORMAT_MOD_LINEAR &&
14491                 format == DRM_FORMAT_ARGB8888;
14492 }
14493
14494 static const struct drm_plane_funcs i965_plane_funcs = {
14495         .update_plane = drm_atomic_helper_update_plane,
14496         .disable_plane = drm_atomic_helper_disable_plane,
14497         .destroy = intel_plane_destroy,
14498         .atomic_duplicate_state = intel_plane_duplicate_state,
14499         .atomic_destroy_state = intel_plane_destroy_state,
14500         .format_mod_supported = i965_plane_format_mod_supported,
14501 };
14502
14503 static const struct drm_plane_funcs i8xx_plane_funcs = {
14504         .update_plane = drm_atomic_helper_update_plane,
14505         .disable_plane = drm_atomic_helper_disable_plane,
14506         .destroy = intel_plane_destroy,
14507         .atomic_duplicate_state = intel_plane_duplicate_state,
14508         .atomic_destroy_state = intel_plane_destroy_state,
14509         .format_mod_supported = i8xx_plane_format_mod_supported,
14510 };
14511
14512 static int
14513 intel_legacy_cursor_update(struct drm_plane *plane,
14514                            struct drm_crtc *crtc,
14515                            struct drm_framebuffer *fb,
14516                            int crtc_x, int crtc_y,
14517                            unsigned int crtc_w, unsigned int crtc_h,
14518                            u32 src_x, u32 src_y,
14519                            u32 src_w, u32 src_h,
14520                            struct drm_modeset_acquire_ctx *ctx)
14521 {
14522         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
14523         int ret;
14524         struct drm_plane_state *old_plane_state, *new_plane_state;
14525         struct intel_plane *intel_plane = to_intel_plane(plane);
14526         struct drm_framebuffer *old_fb;
14527         struct intel_crtc_state *crtc_state =
14528                 to_intel_crtc_state(crtc->state);
14529         struct intel_crtc_state *new_crtc_state;
14530
14531         /*
14532          * When crtc is inactive or there is a modeset pending,
14533          * wait for it to complete in the slowpath
14534          */
14535         if (!crtc_state->base.active || needs_modeset(crtc_state) ||
14536             crtc_state->update_pipe)
14537                 goto slow;
14538
14539         old_plane_state = plane->state;
14540         /*
14541          * Don't do an async update if there is an outstanding commit modifying
14542          * the plane.  This prevents our async update's changes from getting
14543          * overridden by a previous synchronous update's state.
14544          */
14545         if (old_plane_state->commit &&
14546             !try_wait_for_completion(&old_plane_state->commit->hw_done))
14547                 goto slow;
14548
14549         /*
14550          * If any parameters change that may affect watermarks,
14551          * take the slowpath. Only changing fb or position should be
14552          * in the fastpath.
14553          */
14554         if (old_plane_state->crtc != crtc ||
14555             old_plane_state->src_w != src_w ||
14556             old_plane_state->src_h != src_h ||
14557             old_plane_state->crtc_w != crtc_w ||
14558             old_plane_state->crtc_h != crtc_h ||
14559             !old_plane_state->fb != !fb)
14560                 goto slow;
14561
14562         new_plane_state = intel_plane_duplicate_state(plane);
14563         if (!new_plane_state)
14564                 return -ENOMEM;
14565
14566         new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(crtc));
14567         if (!new_crtc_state) {
14568                 ret = -ENOMEM;
14569                 goto out_free;
14570         }
14571
14572         drm_atomic_set_fb_for_plane(new_plane_state, fb);
14573
14574         new_plane_state->src_x = src_x;
14575         new_plane_state->src_y = src_y;
14576         new_plane_state->src_w = src_w;
14577         new_plane_state->src_h = src_h;
14578         new_plane_state->crtc_x = crtc_x;
14579         new_plane_state->crtc_y = crtc_y;
14580         new_plane_state->crtc_w = crtc_w;
14581         new_plane_state->crtc_h = crtc_h;
14582
14583         ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state,
14584                                                   to_intel_plane_state(old_plane_state),
14585                                                   to_intel_plane_state(new_plane_state));
14586         if (ret)
14587                 goto out_free;
14588
14589         ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
14590         if (ret)
14591                 goto out_free;
14592
14593         ret = intel_plane_pin_fb(to_intel_plane_state(new_plane_state));
14594         if (ret)
14595                 goto out_unlock;
14596
14597         intel_fb_obj_flush(intel_fb_obj(fb), ORIGIN_FLIP);
14598
14599         old_fb = old_plane_state->fb;
14600         i915_gem_track_fb(intel_fb_obj(old_fb), intel_fb_obj(fb),
14601                           intel_plane->frontbuffer_bit);
14602
14603         /* Swap plane state */
14604         plane->state = new_plane_state;
14605
14606         /*
14607          * We cannot swap crtc_state as it may be in use by an atomic commit or
14608          * page flip that's running simultaneously. If we swap crtc_state and
14609          * destroy the old state, we will cause a use-after-free there.
14610          *
14611          * Only update active_planes, which is needed for our internal
14612          * bookkeeping. Either value will do the right thing when updating
14613          * planes atomically. If the cursor was part of the atomic update then
14614          * we would have taken the slowpath.
14615          */
14616         crtc_state->active_planes = new_crtc_state->active_planes;
14617
14618         if (plane->state->visible)
14619                 intel_update_plane(intel_plane, crtc_state,
14620                                    to_intel_plane_state(plane->state));
14621         else
14622                 intel_disable_plane(intel_plane, crtc_state);
14623
14624         intel_plane_unpin_fb(to_intel_plane_state(old_plane_state));
14625
14626 out_unlock:
14627         mutex_unlock(&dev_priv->drm.struct_mutex);
14628 out_free:
14629         if (new_crtc_state)
14630                 intel_crtc_destroy_state(crtc, &new_crtc_state->base);
14631         if (ret)
14632                 intel_plane_destroy_state(plane, new_plane_state);
14633         else
14634                 intel_plane_destroy_state(plane, old_plane_state);
14635         return ret;
14636
14637 slow:
14638         return drm_atomic_helper_update_plane(plane, crtc, fb,
14639                                               crtc_x, crtc_y, crtc_w, crtc_h,
14640                                               src_x, src_y, src_w, src_h, ctx);
14641 }
14642
14643 static const struct drm_plane_funcs intel_cursor_plane_funcs = {
14644         .update_plane = intel_legacy_cursor_update,
14645         .disable_plane = drm_atomic_helper_disable_plane,
14646         .destroy = intel_plane_destroy,
14647         .atomic_duplicate_state = intel_plane_duplicate_state,
14648         .atomic_destroy_state = intel_plane_destroy_state,
14649         .format_mod_supported = intel_cursor_format_mod_supported,
14650 };
14651
14652 static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv,
14653                                enum i9xx_plane_id i9xx_plane)
14654 {
14655         if (!HAS_FBC(dev_priv))
14656                 return false;
14657
14658         if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
14659                 return i9xx_plane == PLANE_A; /* tied to pipe A */
14660         else if (IS_IVYBRIDGE(dev_priv))
14661                 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B ||
14662                         i9xx_plane == PLANE_C;
14663         else if (INTEL_GEN(dev_priv) >= 4)
14664                 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B;
14665         else
14666                 return i9xx_plane == PLANE_A;
14667 }
14668
14669 static struct intel_plane *
14670 intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
14671 {
14672         struct intel_plane *plane;
14673         const struct drm_plane_funcs *plane_funcs;
14674         unsigned int supported_rotations;
14675         unsigned int possible_crtcs;
14676         const u64 *modifiers;
14677         const u32 *formats;
14678         int num_formats;
14679         int ret;
14680
14681         if (INTEL_GEN(dev_priv) >= 9)
14682                 return skl_universal_plane_create(dev_priv, pipe,
14683                                                   PLANE_PRIMARY);
14684
14685         plane = intel_plane_alloc();
14686         if (IS_ERR(plane))
14687                 return plane;
14688
14689         plane->pipe = pipe;
14690         /*
14691          * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
14692          * port is hooked to pipe B. Hence we want plane A feeding pipe B.
14693          */
14694         if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4)
14695                 plane->i9xx_plane = (enum i9xx_plane_id) !pipe;
14696         else
14697                 plane->i9xx_plane = (enum i9xx_plane_id) pipe;
14698         plane->id = PLANE_PRIMARY;
14699         plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id);
14700
14701         plane->has_fbc = i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane);
14702         if (plane->has_fbc) {
14703                 struct intel_fbc *fbc = &dev_priv->fbc;
14704
14705                 fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
14706         }
14707
14708         if (INTEL_GEN(dev_priv) >= 4) {
14709                 formats = i965_primary_formats;
14710                 num_formats = ARRAY_SIZE(i965_primary_formats);
14711                 modifiers = i9xx_format_modifiers;
14712
14713                 plane->max_stride = i9xx_plane_max_stride;
14714                 plane->update_plane = i9xx_update_plane;
14715                 plane->disable_plane = i9xx_disable_plane;
14716                 plane->get_hw_state = i9xx_plane_get_hw_state;
14717                 plane->check_plane = i9xx_plane_check;
14718
14719                 plane_funcs = &i965_plane_funcs;
14720         } else {
14721                 formats = i8xx_primary_formats;
14722                 num_formats = ARRAY_SIZE(i8xx_primary_formats);
14723                 modifiers = i9xx_format_modifiers;
14724
14725                 plane->max_stride = i9xx_plane_max_stride;
14726                 plane->update_plane = i9xx_update_plane;
14727                 plane->disable_plane = i9xx_disable_plane;
14728                 plane->get_hw_state = i9xx_plane_get_hw_state;
14729                 plane->check_plane = i9xx_plane_check;
14730
14731                 plane_funcs = &i8xx_plane_funcs;
14732         }
14733
14734         possible_crtcs = BIT(pipe);
14735
14736         if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
14737                 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
14738                                                possible_crtcs, plane_funcs,
14739                                                formats, num_formats, modifiers,
14740                                                DRM_PLANE_TYPE_PRIMARY,
14741                                                "primary %c", pipe_name(pipe));
14742         else
14743                 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
14744                                                possible_crtcs, plane_funcs,
14745                                                formats, num_formats, modifiers,
14746                                                DRM_PLANE_TYPE_PRIMARY,
14747                                                "plane %c",
14748                                                plane_name(plane->i9xx_plane));
14749         if (ret)
14750                 goto fail;
14751
14752         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
14753                 supported_rotations =
14754                         DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
14755                         DRM_MODE_REFLECT_X;
14756         } else if (INTEL_GEN(dev_priv) >= 4) {
14757                 supported_rotations =
14758                         DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
14759         } else {
14760                 supported_rotations = DRM_MODE_ROTATE_0;
14761         }
14762
14763         if (INTEL_GEN(dev_priv) >= 4)
14764                 drm_plane_create_rotation_property(&plane->base,
14765                                                    DRM_MODE_ROTATE_0,
14766                                                    supported_rotations);
14767
14768         drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
14769
14770         return plane;
14771
14772 fail:
14773         intel_plane_free(plane);
14774
14775         return ERR_PTR(ret);
14776 }
14777
14778 static struct intel_plane *
14779 intel_cursor_plane_create(struct drm_i915_private *dev_priv,
14780                           enum pipe pipe)
14781 {
14782         unsigned int possible_crtcs;
14783         struct intel_plane *cursor;
14784         int ret;
14785
14786         cursor = intel_plane_alloc();
14787         if (IS_ERR(cursor))
14788                 return cursor;
14789
14790         cursor->pipe = pipe;
14791         cursor->i9xx_plane = (enum i9xx_plane_id) pipe;
14792         cursor->id = PLANE_CURSOR;
14793         cursor->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, cursor->id);
14794
14795         if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
14796                 cursor->max_stride = i845_cursor_max_stride;
14797                 cursor->update_plane = i845_update_cursor;
14798                 cursor->disable_plane = i845_disable_cursor;
14799                 cursor->get_hw_state = i845_cursor_get_hw_state;
14800                 cursor->check_plane = i845_check_cursor;
14801         } else {
14802                 cursor->max_stride = i9xx_cursor_max_stride;
14803                 cursor->update_plane = i9xx_update_cursor;
14804                 cursor->disable_plane = i9xx_disable_cursor;
14805                 cursor->get_hw_state = i9xx_cursor_get_hw_state;
14806                 cursor->check_plane = i9xx_check_cursor;
14807         }
14808
14809         cursor->cursor.base = ~0;
14810         cursor->cursor.cntl = ~0;
14811
14812         if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv))
14813                 cursor->cursor.size = ~0;
14814
14815         possible_crtcs = BIT(pipe);
14816
14817         ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
14818                                        possible_crtcs, &intel_cursor_plane_funcs,
14819                                        intel_cursor_formats,
14820                                        ARRAY_SIZE(intel_cursor_formats),
14821                                        cursor_format_modifiers,
14822                                        DRM_PLANE_TYPE_CURSOR,
14823                                        "cursor %c", pipe_name(pipe));
14824         if (ret)
14825                 goto fail;
14826
14827         if (INTEL_GEN(dev_priv) >= 4)
14828                 drm_plane_create_rotation_property(&cursor->base,
14829                                                    DRM_MODE_ROTATE_0,
14830                                                    DRM_MODE_ROTATE_0 |
14831                                                    DRM_MODE_ROTATE_180);
14832
14833         drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
14834
14835         return cursor;
14836
14837 fail:
14838         intel_plane_free(cursor);
14839
14840         return ERR_PTR(ret);
14841 }
14842
14843 static void intel_crtc_init_scalers(struct intel_crtc *crtc,
14844                                     struct intel_crtc_state *crtc_state)
14845 {
14846         struct intel_crtc_scaler_state *scaler_state =
14847                 &crtc_state->scaler_state;
14848         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14849         int i;
14850
14851         crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[crtc->pipe];
14852         if (!crtc->num_scalers)
14853                 return;
14854
14855         for (i = 0; i < crtc->num_scalers; i++) {
14856                 struct intel_scaler *scaler = &scaler_state->scalers[i];
14857
14858                 scaler->in_use = 0;
14859                 scaler->mode = 0;
14860         }
14861
14862         scaler_state->scaler_id = -1;
14863 }
14864
14865 #define INTEL_CRTC_FUNCS \
14866         .gamma_set = drm_atomic_helper_legacy_gamma_set, \
14867         .set_config = drm_atomic_helper_set_config, \
14868         .destroy = intel_crtc_destroy, \
14869         .page_flip = drm_atomic_helper_page_flip, \
14870         .atomic_duplicate_state = intel_crtc_duplicate_state, \
14871         .atomic_destroy_state = intel_crtc_destroy_state, \
14872         .set_crc_source = intel_crtc_set_crc_source, \
14873         .verify_crc_source = intel_crtc_verify_crc_source, \
14874         .get_crc_sources = intel_crtc_get_crc_sources
14875
14876 static const struct drm_crtc_funcs bdw_crtc_funcs = {
14877         INTEL_CRTC_FUNCS,
14878
14879         .get_vblank_counter = g4x_get_vblank_counter,
14880         .enable_vblank = bdw_enable_vblank,
14881         .disable_vblank = bdw_disable_vblank,
14882 };
14883
14884 static const struct drm_crtc_funcs ilk_crtc_funcs = {
14885         INTEL_CRTC_FUNCS,
14886
14887         .get_vblank_counter = g4x_get_vblank_counter,
14888         .enable_vblank = ilk_enable_vblank,
14889         .disable_vblank = ilk_disable_vblank,
14890 };
14891
14892 static const struct drm_crtc_funcs g4x_crtc_funcs = {
14893         INTEL_CRTC_FUNCS,
14894
14895         .get_vblank_counter = g4x_get_vblank_counter,
14896         .enable_vblank = i965_enable_vblank,
14897         .disable_vblank = i965_disable_vblank,
14898 };
14899
14900 static const struct drm_crtc_funcs i965_crtc_funcs = {
14901         INTEL_CRTC_FUNCS,
14902
14903         .get_vblank_counter = i915_get_vblank_counter,
14904         .enable_vblank = i965_enable_vblank,
14905         .disable_vblank = i965_disable_vblank,
14906 };
14907
14908 static const struct drm_crtc_funcs i945gm_crtc_funcs = {
14909         INTEL_CRTC_FUNCS,
14910
14911         .get_vblank_counter = i915_get_vblank_counter,
14912         .enable_vblank = i945gm_enable_vblank,
14913         .disable_vblank = i945gm_disable_vblank,
14914 };
14915
14916 static const struct drm_crtc_funcs i915_crtc_funcs = {
14917         INTEL_CRTC_FUNCS,
14918
14919         .get_vblank_counter = i915_get_vblank_counter,
14920         .enable_vblank = i8xx_enable_vblank,
14921         .disable_vblank = i8xx_disable_vblank,
14922 };
14923
14924 static const struct drm_crtc_funcs i8xx_crtc_funcs = {
14925         INTEL_CRTC_FUNCS,
14926
14927         /* no hw vblank counter */
14928         .enable_vblank = i8xx_enable_vblank,
14929         .disable_vblank = i8xx_disable_vblank,
14930 };
14931
14932 static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
14933 {
14934         const struct drm_crtc_funcs *funcs;
14935         struct intel_crtc *intel_crtc;
14936         struct intel_crtc_state *crtc_state = NULL;
14937         struct intel_plane *primary = NULL;
14938         struct intel_plane *cursor = NULL;
14939         int sprite, ret;
14940
14941         intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
14942         if (!intel_crtc)
14943                 return -ENOMEM;
14944
14945         crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
14946         if (!crtc_state) {
14947                 ret = -ENOMEM;
14948                 goto fail;
14949         }
14950         __drm_atomic_helper_crtc_reset(&intel_crtc->base, &crtc_state->base);
14951         intel_crtc->config = crtc_state;
14952
14953         primary = intel_primary_plane_create(dev_priv, pipe);
14954         if (IS_ERR(primary)) {
14955                 ret = PTR_ERR(primary);
14956                 goto fail;
14957         }
14958         intel_crtc->plane_ids_mask |= BIT(primary->id);
14959
14960         for_each_sprite(dev_priv, pipe, sprite) {
14961                 struct intel_plane *plane;
14962
14963                 plane = intel_sprite_plane_create(dev_priv, pipe, sprite);
14964                 if (IS_ERR(plane)) {
14965                         ret = PTR_ERR(plane);
14966                         goto fail;
14967                 }
14968                 intel_crtc->plane_ids_mask |= BIT(plane->id);
14969         }
14970
14971         cursor = intel_cursor_plane_create(dev_priv, pipe);
14972         if (IS_ERR(cursor)) {
14973                 ret = PTR_ERR(cursor);
14974                 goto fail;
14975         }
14976         intel_crtc->plane_ids_mask |= BIT(cursor->id);
14977
14978         if (HAS_GMCH(dev_priv)) {
14979                 if (IS_CHERRYVIEW(dev_priv) ||
14980                     IS_VALLEYVIEW(dev_priv) || IS_G4X(dev_priv))
14981                         funcs = &g4x_crtc_funcs;
14982                 else if (IS_GEN(dev_priv, 4))
14983                         funcs = &i965_crtc_funcs;
14984                 else if (IS_I945GM(dev_priv))
14985                         funcs = &i945gm_crtc_funcs;
14986                 else if (IS_GEN(dev_priv, 3))
14987                         funcs = &i915_crtc_funcs;
14988                 else
14989                         funcs = &i8xx_crtc_funcs;
14990         } else {
14991                 if (INTEL_GEN(dev_priv) >= 8)
14992                         funcs = &bdw_crtc_funcs;
14993                 else
14994                         funcs = &ilk_crtc_funcs;
14995         }
14996
14997         ret = drm_crtc_init_with_planes(&dev_priv->drm, &intel_crtc->base,
14998                                         &primary->base, &cursor->base,
14999                                         funcs, "pipe %c", pipe_name(pipe));
15000         if (ret)
15001                 goto fail;
15002
15003         intel_crtc->pipe = pipe;
15004
15005         /* initialize shared scalers */
15006         intel_crtc_init_scalers(intel_crtc, crtc_state);
15007
15008         BUG_ON(pipe >= ARRAY_SIZE(dev_priv->pipe_to_crtc_mapping) ||
15009                dev_priv->pipe_to_crtc_mapping[pipe] != NULL);
15010         dev_priv->pipe_to_crtc_mapping[pipe] = intel_crtc;
15011
15012         if (INTEL_GEN(dev_priv) < 9) {
15013                 enum i9xx_plane_id i9xx_plane = primary->i9xx_plane;
15014
15015                 BUG_ON(i9xx_plane >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
15016                        dev_priv->plane_to_crtc_mapping[i9xx_plane] != NULL);
15017                 dev_priv->plane_to_crtc_mapping[i9xx_plane] = intel_crtc;
15018         }
15019
15020         drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
15021
15022         intel_color_init(intel_crtc);
15023
15024         WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
15025
15026         return 0;
15027
15028 fail:
15029         /*
15030          * drm_mode_config_cleanup() will free up any
15031          * crtcs/planes already initialized.
15032          */
15033         kfree(crtc_state);
15034         kfree(intel_crtc);
15035
15036         return ret;
15037 }
15038
15039 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
15040                                       struct drm_file *file)
15041 {
15042         struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
15043         struct drm_crtc *drmmode_crtc;
15044         struct intel_crtc *crtc;
15045
15046         drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
15047         if (!drmmode_crtc)
15048                 return -ENOENT;
15049
15050         crtc = to_intel_crtc(drmmode_crtc);
15051         pipe_from_crtc_id->pipe = crtc->pipe;
15052
15053         return 0;
15054 }
15055
15056 static int intel_encoder_clones(struct intel_encoder *encoder)
15057 {
15058         struct drm_device *dev = encoder->base.dev;
15059         struct intel_encoder *source_encoder;
15060         int index_mask = 0;
15061         int entry = 0;
15062
15063         for_each_intel_encoder(dev, source_encoder) {
15064                 if (encoders_cloneable(encoder, source_encoder))
15065                         index_mask |= (1 << entry);
15066
15067                 entry++;
15068         }
15069
15070         return index_mask;
15071 }
15072
15073 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
15074 {
15075         if (!IS_MOBILE(dev_priv))
15076                 return false;
15077
15078         if ((I915_READ(DP_A) & DP_DETECTED) == 0)
15079                 return false;
15080
15081         if (IS_GEN(dev_priv, 5) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
15082                 return false;
15083
15084         return true;
15085 }
15086
15087 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
15088 {
15089         if (INTEL_GEN(dev_priv) >= 9)
15090                 return false;
15091
15092         if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
15093                 return false;
15094
15095         if (HAS_PCH_LPT_H(dev_priv) &&
15096             I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
15097                 return false;
15098
15099         /* DDI E can't be used if DDI A requires 4 lanes */
15100         if (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
15101                 return false;
15102
15103         if (!dev_priv->vbt.int_crt_support)
15104                 return false;
15105
15106         return true;
15107 }
15108
15109 void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
15110 {
15111         int pps_num;
15112         int pps_idx;
15113
15114         if (HAS_DDI(dev_priv))
15115                 return;
15116         /*
15117          * This w/a is needed at least on CPT/PPT, but to be sure apply it
15118          * everywhere where registers can be write protected.
15119          */
15120         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
15121                 pps_num = 2;
15122         else
15123                 pps_num = 1;
15124
15125         for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
15126                 u32 val = I915_READ(PP_CONTROL(pps_idx));
15127
15128                 val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS;
15129                 I915_WRITE(PP_CONTROL(pps_idx), val);
15130         }
15131 }
15132
15133 static void intel_pps_init(struct drm_i915_private *dev_priv)
15134 {
15135         if (HAS_PCH_SPLIT(dev_priv) || IS_GEN9_LP(dev_priv))
15136                 dev_priv->pps_mmio_base = PCH_PPS_BASE;
15137         else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
15138                 dev_priv->pps_mmio_base = VLV_PPS_BASE;
15139         else
15140                 dev_priv->pps_mmio_base = PPS_BASE;
15141
15142         intel_pps_unlock_regs_wa(dev_priv);
15143 }
15144
15145 static void intel_setup_outputs(struct drm_i915_private *dev_priv)
15146 {
15147         struct intel_encoder *encoder;
15148         bool dpd_is_edp = false;
15149
15150         intel_pps_init(dev_priv);
15151
15152         if (!HAS_DISPLAY(dev_priv))
15153                 return;
15154
15155         if (IS_ELKHARTLAKE(dev_priv)) {
15156                 intel_ddi_init(dev_priv, PORT_A);
15157                 intel_ddi_init(dev_priv, PORT_B);
15158                 intel_ddi_init(dev_priv, PORT_C);
15159                 icl_dsi_init(dev_priv);
15160         } else if (INTEL_GEN(dev_priv) >= 11) {
15161                 intel_ddi_init(dev_priv, PORT_A);
15162                 intel_ddi_init(dev_priv, PORT_B);
15163                 intel_ddi_init(dev_priv, PORT_C);
15164                 intel_ddi_init(dev_priv, PORT_D);
15165                 intel_ddi_init(dev_priv, PORT_E);
15166                 /*
15167                  * On some ICL SKUs port F is not present. No strap bits for
15168                  * this, so rely on VBT.
15169                  * Work around broken VBTs on SKUs known to have no port F.
15170                  */
15171                 if (IS_ICL_WITH_PORT_F(dev_priv) &&
15172                     intel_bios_is_port_present(dev_priv, PORT_F))
15173                         intel_ddi_init(dev_priv, PORT_F);
15174
15175                 icl_dsi_init(dev_priv);
15176         } else if (IS_GEN9_LP(dev_priv)) {
15177                 /*
15178                  * FIXME: Broxton doesn't support port detection via the
15179                  * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
15180                  * detect the ports.
15181                  */
15182                 intel_ddi_init(dev_priv, PORT_A);
15183                 intel_ddi_init(dev_priv, PORT_B);
15184                 intel_ddi_init(dev_priv, PORT_C);
15185
15186                 vlv_dsi_init(dev_priv);
15187         } else if (HAS_DDI(dev_priv)) {
15188                 int found;
15189
15190                 if (intel_ddi_crt_present(dev_priv))
15191                         intel_crt_init(dev_priv);
15192
15193                 /*
15194                  * Haswell uses DDI functions to detect digital outputs.
15195                  * On SKL pre-D0 the strap isn't connected, so we assume
15196                  * it's there.
15197                  */
15198                 found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
15199                 /* WaIgnoreDDIAStrap: skl */
15200                 if (found || IS_GEN9_BC(dev_priv))
15201                         intel_ddi_init(dev_priv, PORT_A);
15202
15203                 /* DDI B, C, D, and F detection is indicated by the SFUSE_STRAP
15204                  * register */
15205                 found = I915_READ(SFUSE_STRAP);
15206
15207                 if (found & SFUSE_STRAP_DDIB_DETECTED)
15208                         intel_ddi_init(dev_priv, PORT_B);
15209                 if (found & SFUSE_STRAP_DDIC_DETECTED)
15210                         intel_ddi_init(dev_priv, PORT_C);
15211                 if (found & SFUSE_STRAP_DDID_DETECTED)
15212                         intel_ddi_init(dev_priv, PORT_D);
15213                 if (found & SFUSE_STRAP_DDIF_DETECTED)
15214                         intel_ddi_init(dev_priv, PORT_F);
15215                 /*
15216                  * On SKL we don't have a way to detect DDI-E so we rely on VBT.
15217                  */
15218                 if (IS_GEN9_BC(dev_priv) &&
15219                     intel_bios_is_port_present(dev_priv, PORT_E))
15220                         intel_ddi_init(dev_priv, PORT_E);
15221
15222         } else if (HAS_PCH_SPLIT(dev_priv)) {
15223                 int found;
15224
15225                 /*
15226                  * intel_edp_init_connector() depends on this completing first,
15227                  * to prevent the registration of both eDP and LVDS and the
15228                  * incorrect sharing of the PPS.
15229                  */
15230                 intel_lvds_init(dev_priv);
15231                 intel_crt_init(dev_priv);
15232
15233                 dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
15234
15235                 if (ilk_has_edp_a(dev_priv))
15236                         intel_dp_init(dev_priv, DP_A, PORT_A);
15237
15238                 if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
15239                         /* PCH SDVOB multiplex with HDMIB */
15240                         found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
15241                         if (!found)
15242                                 intel_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
15243                         if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
15244                                 intel_dp_init(dev_priv, PCH_DP_B, PORT_B);
15245                 }
15246
15247                 if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
15248                         intel_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
15249
15250                 if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
15251                         intel_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
15252
15253                 if (I915_READ(PCH_DP_C) & DP_DETECTED)
15254                         intel_dp_init(dev_priv, PCH_DP_C, PORT_C);
15255
15256                 if (I915_READ(PCH_DP_D) & DP_DETECTED)
15257                         intel_dp_init(dev_priv, PCH_DP_D, PORT_D);
15258         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
15259                 bool has_edp, has_port;
15260
15261                 if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support)
15262                         intel_crt_init(dev_priv);
15263
15264                 /*
15265                  * The DP_DETECTED bit is the latched state of the DDC
15266                  * SDA pin at boot. However since eDP doesn't require DDC
15267                  * (no way to plug in a DP->HDMI dongle) the DDC pins for
15268                  * eDP ports may have been muxed to an alternate function.
15269                  * Thus we can't rely on the DP_DETECTED bit alone to detect
15270                  * eDP ports. Consult the VBT as well as DP_DETECTED to
15271                  * detect eDP ports.
15272                  *
15273                  * Sadly the straps seem to be missing sometimes even for HDMI
15274                  * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
15275                  * and VBT for the presence of the port. Additionally we can't
15276                  * trust the port type the VBT declares as we've seen at least
15277                  * HDMI ports that the VBT claim are DP or eDP.
15278                  */
15279                 has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
15280                 has_port = intel_bios_is_port_present(dev_priv, PORT_B);
15281                 if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
15282                         has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B);
15283                 if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
15284                         intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
15285
15286                 has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
15287                 has_port = intel_bios_is_port_present(dev_priv, PORT_C);
15288                 if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
15289                         has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C);
15290                 if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
15291                         intel_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
15292
15293                 if (IS_CHERRYVIEW(dev_priv)) {
15294                         /*
15295                          * eDP not supported on port D,
15296                          * so no need to worry about it
15297                          */
15298                         has_port = intel_bios_is_port_present(dev_priv, PORT_D);
15299                         if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port)
15300                                 intel_dp_init(dev_priv, CHV_DP_D, PORT_D);
15301                         if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port)
15302                                 intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
15303                 }
15304
15305                 vlv_dsi_init(dev_priv);
15306         } else if (IS_PINEVIEW(dev_priv)) {
15307                 intel_lvds_init(dev_priv);
15308                 intel_crt_init(dev_priv);
15309         } else if (IS_GEN_RANGE(dev_priv, 3, 4)) {
15310                 bool found = false;
15311
15312                 if (IS_MOBILE(dev_priv))
15313                         intel_lvds_init(dev_priv);
15314
15315                 intel_crt_init(dev_priv);
15316
15317                 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
15318                         DRM_DEBUG_KMS("probing SDVOB\n");
15319                         found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
15320                         if (!found && IS_G4X(dev_priv)) {
15321                                 DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
15322                                 intel_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
15323                         }
15324
15325                         if (!found && IS_G4X(dev_priv))
15326                                 intel_dp_init(dev_priv, DP_B, PORT_B);
15327                 }
15328
15329                 /* Before G4X SDVOC doesn't have its own detect register */
15330
15331                 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
15332                         DRM_DEBUG_KMS("probing SDVOC\n");
15333                         found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
15334                 }
15335
15336                 if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
15337
15338                         if (IS_G4X(dev_priv)) {
15339                                 DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
15340                                 intel_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
15341                         }
15342                         if (IS_G4X(dev_priv))
15343                                 intel_dp_init(dev_priv, DP_C, PORT_C);
15344                 }
15345
15346                 if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED))
15347                         intel_dp_init(dev_priv, DP_D, PORT_D);
15348
15349                 if (SUPPORTS_TV(dev_priv))
15350                         intel_tv_init(dev_priv);
15351         } else if (IS_GEN(dev_priv, 2)) {
15352                 if (IS_I85X(dev_priv))
15353                         intel_lvds_init(dev_priv);
15354
15355                 intel_crt_init(dev_priv);
15356                 intel_dvo_init(dev_priv);
15357         }
15358
15359         intel_psr_init(dev_priv);
15360
15361         for_each_intel_encoder(&dev_priv->drm, encoder) {
15362                 encoder->base.possible_crtcs = encoder->crtc_mask;
15363                 encoder->base.possible_clones =
15364                         intel_encoder_clones(encoder);
15365         }
15366
15367         intel_init_pch_refclk(dev_priv);
15368
15369         drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
15370 }
15371
15372 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
15373 {
15374         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
15375         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
15376
15377         drm_framebuffer_cleanup(fb);
15378
15379         i915_gem_object_lock(obj);
15380         WARN_ON(!obj->framebuffer_references--);
15381         i915_gem_object_unlock(obj);
15382
15383         i915_gem_object_put(obj);
15384
15385         kfree(intel_fb);
15386 }
15387
15388 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
15389                                                 struct drm_file *file,
15390                                                 unsigned int *handle)
15391 {
15392         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
15393
15394         if (obj->userptr.mm) {
15395                 DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n");
15396                 return -EINVAL;
15397         }
15398
15399         return drm_gem_handle_create(file, &obj->base, handle);
15400 }
15401
15402 static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
15403                                         struct drm_file *file,
15404                                         unsigned flags, unsigned color,
15405                                         struct drm_clip_rect *clips,
15406                                         unsigned num_clips)
15407 {
15408         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
15409
15410         i915_gem_object_flush_if_display(obj);
15411         intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
15412
15413         return 0;
15414 }
15415
15416 static const struct drm_framebuffer_funcs intel_fb_funcs = {
15417         .destroy = intel_user_framebuffer_destroy,
15418         .create_handle = intel_user_framebuffer_create_handle,
15419         .dirty = intel_user_framebuffer_dirty,
15420 };
15421
15422 static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
15423                                   struct drm_i915_gem_object *obj,
15424                                   struct drm_mode_fb_cmd2 *mode_cmd)
15425 {
15426         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
15427         struct drm_framebuffer *fb = &intel_fb->base;
15428         u32 max_stride;
15429         unsigned int tiling, stride;
15430         int ret = -EINVAL;
15431         int i;
15432
15433         i915_gem_object_lock(obj);
15434         obj->framebuffer_references++;
15435         tiling = i915_gem_object_get_tiling(obj);
15436         stride = i915_gem_object_get_stride(obj);
15437         i915_gem_object_unlock(obj);
15438
15439         if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
15440                 /*
15441                  * If there's a fence, enforce that
15442                  * the fb modifier and tiling mode match.
15443                  */
15444                 if (tiling != I915_TILING_NONE &&
15445                     tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
15446                         DRM_DEBUG_KMS("tiling_mode doesn't match fb modifier\n");
15447                         goto err;
15448                 }
15449         } else {
15450                 if (tiling == I915_TILING_X) {
15451                         mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
15452                 } else if (tiling == I915_TILING_Y) {
15453                         DRM_DEBUG_KMS("No Y tiling for legacy addfb\n");
15454                         goto err;
15455                 }
15456         }
15457
15458         if (!drm_any_plane_has_format(&dev_priv->drm,
15459                                       mode_cmd->pixel_format,
15460                                       mode_cmd->modifier[0])) {
15461                 struct drm_format_name_buf format_name;
15462
15463                 DRM_DEBUG_KMS("unsupported pixel format %s / modifier 0x%llx\n",
15464                               drm_get_format_name(mode_cmd->pixel_format,
15465                                                   &format_name),
15466                               mode_cmd->modifier[0]);
15467                 goto err;
15468         }
15469
15470         /*
15471          * gen2/3 display engine uses the fence if present,
15472          * so the tiling mode must match the fb modifier exactly.
15473          */
15474         if (INTEL_GEN(dev_priv) < 4 &&
15475             tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
15476                 DRM_DEBUG_KMS("tiling_mode must match fb modifier exactly on gen2/3\n");
15477                 goto err;
15478         }
15479
15480         max_stride = intel_fb_max_stride(dev_priv, mode_cmd->pixel_format,
15481                                          mode_cmd->modifier[0]);
15482         if (mode_cmd->pitches[0] > max_stride) {
15483                 DRM_DEBUG_KMS("%s pitch (%u) must be at most %d\n",
15484                               mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
15485                               "tiled" : "linear",
15486                               mode_cmd->pitches[0], max_stride);
15487                 goto err;
15488         }
15489
15490         /*
15491          * If there's a fence, enforce that
15492          * the fb pitch and fence stride match.
15493          */
15494         if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
15495                 DRM_DEBUG_KMS("pitch (%d) must match tiling stride (%d)\n",
15496                               mode_cmd->pitches[0], stride);
15497                 goto err;
15498         }
15499
15500         /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
15501         if (mode_cmd->offsets[0] != 0)
15502                 goto err;
15503
15504         drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
15505
15506         for (i = 0; i < fb->format->num_planes; i++) {
15507                 u32 stride_alignment;
15508
15509                 if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
15510                         DRM_DEBUG_KMS("bad plane %d handle\n", i);
15511                         goto err;
15512                 }
15513
15514                 stride_alignment = intel_fb_stride_alignment(fb, i);
15515
15516                 /*
15517                  * Display WA #0531: skl,bxt,kbl,glk
15518                  *
15519                  * Render decompression and plane width > 3840
15520                  * combined with horizontal panning requires the
15521                  * plane stride to be a multiple of 4. We'll just
15522                  * require the entire fb to accommodate that to avoid
15523                  * potential runtime errors at plane configuration time.
15524                  */
15525                 if (IS_GEN(dev_priv, 9) && i == 0 && fb->width > 3840 &&
15526                     is_ccs_modifier(fb->modifier))
15527                         stride_alignment *= 4;
15528
15529                 if (fb->pitches[i] & (stride_alignment - 1)) {
15530                         DRM_DEBUG_KMS("plane %d pitch (%d) must be at least %u byte aligned\n",
15531                                       i, fb->pitches[i], stride_alignment);
15532                         goto err;
15533                 }
15534
15535                 fb->obj[i] = &obj->base;
15536         }
15537
15538         ret = intel_fill_fb_info(dev_priv, fb);
15539         if (ret)
15540                 goto err;
15541
15542         ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs);
15543         if (ret) {
15544                 DRM_ERROR("framebuffer init failed %d\n", ret);
15545                 goto err;
15546         }
15547
15548         return 0;
15549
15550 err:
15551         i915_gem_object_lock(obj);
15552         obj->framebuffer_references--;
15553         i915_gem_object_unlock(obj);
15554         return ret;
15555 }
15556
15557 static struct drm_framebuffer *
15558 intel_user_framebuffer_create(struct drm_device *dev,
15559                               struct drm_file *filp,
15560                               const struct drm_mode_fb_cmd2 *user_mode_cmd)
15561 {
15562         struct drm_framebuffer *fb;
15563         struct drm_i915_gem_object *obj;
15564         struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
15565
15566         obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
15567         if (!obj)
15568                 return ERR_PTR(-ENOENT);
15569
15570         fb = intel_framebuffer_create(obj, &mode_cmd);
15571         if (IS_ERR(fb))
15572                 i915_gem_object_put(obj);
15573
15574         return fb;
15575 }
15576
15577 static void intel_atomic_state_free(struct drm_atomic_state *state)
15578 {
15579         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
15580
15581         drm_atomic_state_default_release(state);
15582
15583         i915_sw_fence_fini(&intel_state->commit_ready);
15584
15585         kfree(state);
15586 }
15587
15588 static enum drm_mode_status
15589 intel_mode_valid(struct drm_device *dev,
15590                  const struct drm_display_mode *mode)
15591 {
15592         struct drm_i915_private *dev_priv = to_i915(dev);
15593         int hdisplay_max, htotal_max;
15594         int vdisplay_max, vtotal_max;
15595
15596         /*
15597          * Can't reject DBLSCAN here because Xorg ddxen can add piles
15598          * of DBLSCAN modes to the output's mode list when they detect
15599          * the scaling mode property on the connector. And they don't
15600          * ask the kernel to validate those modes in any way until
15601          * modeset time at which point the client gets a protocol error.
15602          * So in order to not upset those clients we silently ignore the
15603          * DBLSCAN flag on such connectors. For other connectors we will
15604          * reject modes with the DBLSCAN flag in encoder->compute_config().
15605          * And we always reject DBLSCAN modes in connector->mode_valid()
15606          * as we never want such modes on the connector's mode list.
15607          */
15608
15609         if (mode->vscan > 1)
15610                 return MODE_NO_VSCAN;
15611
15612         if (mode->flags & DRM_MODE_FLAG_HSKEW)
15613                 return MODE_H_ILLEGAL;
15614
15615         if (mode->flags & (DRM_MODE_FLAG_CSYNC |
15616                            DRM_MODE_FLAG_NCSYNC |
15617                            DRM_MODE_FLAG_PCSYNC))
15618                 return MODE_HSYNC;
15619
15620         if (mode->flags & (DRM_MODE_FLAG_BCAST |
15621                            DRM_MODE_FLAG_PIXMUX |
15622                            DRM_MODE_FLAG_CLKDIV2))
15623                 return MODE_BAD;
15624
15625         if (INTEL_GEN(dev_priv) >= 9 ||
15626             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
15627                 hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
15628                 vdisplay_max = 4096;
15629                 htotal_max = 8192;
15630                 vtotal_max = 8192;
15631         } else if (INTEL_GEN(dev_priv) >= 3) {
15632                 hdisplay_max = 4096;
15633                 vdisplay_max = 4096;
15634                 htotal_max = 8192;
15635                 vtotal_max = 8192;
15636         } else {
15637                 hdisplay_max = 2048;
15638                 vdisplay_max = 2048;
15639                 htotal_max = 4096;
15640                 vtotal_max = 4096;
15641         }
15642
15643         if (mode->hdisplay > hdisplay_max ||
15644             mode->hsync_start > htotal_max ||
15645             mode->hsync_end > htotal_max ||
15646             mode->htotal > htotal_max)
15647                 return MODE_H_ILLEGAL;
15648
15649         if (mode->vdisplay > vdisplay_max ||
15650             mode->vsync_start > vtotal_max ||
15651             mode->vsync_end > vtotal_max ||
15652             mode->vtotal > vtotal_max)
15653                 return MODE_V_ILLEGAL;
15654
15655         return MODE_OK;
15656 }
15657
15658 static const struct drm_mode_config_funcs intel_mode_funcs = {
15659         .fb_create = intel_user_framebuffer_create,
15660         .get_format_info = intel_get_format_info,
15661         .output_poll_changed = intel_fbdev_output_poll_changed,
15662         .mode_valid = intel_mode_valid,
15663         .atomic_check = intel_atomic_check,
15664         .atomic_commit = intel_atomic_commit,
15665         .atomic_state_alloc = intel_atomic_state_alloc,
15666         .atomic_state_clear = intel_atomic_state_clear,
15667         .atomic_state_free = intel_atomic_state_free,
15668 };
15669
15670 /**
15671  * intel_init_display_hooks - initialize the display modesetting hooks
15672  * @dev_priv: device private
15673  */
15674 void intel_init_display_hooks(struct drm_i915_private *dev_priv)
15675 {
15676         intel_init_cdclk_hooks(dev_priv);
15677
15678         if (INTEL_GEN(dev_priv) >= 9) {
15679                 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
15680                 dev_priv->display.get_initial_plane_config =
15681                         skylake_get_initial_plane_config;
15682                 dev_priv->display.crtc_compute_clock =
15683                         haswell_crtc_compute_clock;
15684                 dev_priv->display.crtc_enable = haswell_crtc_enable;
15685                 dev_priv->display.crtc_disable = haswell_crtc_disable;
15686         } else if (HAS_DDI(dev_priv)) {
15687                 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
15688                 dev_priv->display.get_initial_plane_config =
15689                         i9xx_get_initial_plane_config;
15690                 dev_priv->display.crtc_compute_clock =
15691                         haswell_crtc_compute_clock;
15692                 dev_priv->display.crtc_enable = haswell_crtc_enable;
15693                 dev_priv->display.crtc_disable = haswell_crtc_disable;
15694         } else if (HAS_PCH_SPLIT(dev_priv)) {
15695                 dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
15696                 dev_priv->display.get_initial_plane_config =
15697                         i9xx_get_initial_plane_config;
15698                 dev_priv->display.crtc_compute_clock =
15699                         ironlake_crtc_compute_clock;
15700                 dev_priv->display.crtc_enable = ironlake_crtc_enable;
15701                 dev_priv->display.crtc_disable = ironlake_crtc_disable;
15702         } else if (IS_CHERRYVIEW(dev_priv)) {
15703                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15704                 dev_priv->display.get_initial_plane_config =
15705                         i9xx_get_initial_plane_config;
15706                 dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock;
15707                 dev_priv->display.crtc_enable = valleyview_crtc_enable;
15708                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15709         } else if (IS_VALLEYVIEW(dev_priv)) {
15710                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15711                 dev_priv->display.get_initial_plane_config =
15712                         i9xx_get_initial_plane_config;
15713                 dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock;
15714                 dev_priv->display.crtc_enable = valleyview_crtc_enable;
15715                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15716         } else if (IS_G4X(dev_priv)) {
15717                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15718                 dev_priv->display.get_initial_plane_config =
15719                         i9xx_get_initial_plane_config;
15720                 dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock;
15721                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
15722                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15723         } else if (IS_PINEVIEW(dev_priv)) {
15724                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15725                 dev_priv->display.get_initial_plane_config =
15726                         i9xx_get_initial_plane_config;
15727                 dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
15728                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
15729                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15730         } else if (!IS_GEN(dev_priv, 2)) {
15731                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15732                 dev_priv->display.get_initial_plane_config =
15733                         i9xx_get_initial_plane_config;
15734                 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
15735                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
15736                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15737         } else {
15738                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15739                 dev_priv->display.get_initial_plane_config =
15740                         i9xx_get_initial_plane_config;
15741                 dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
15742                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
15743                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15744         }
15745
15746         if (IS_GEN(dev_priv, 5)) {
15747                 dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
15748         } else if (IS_GEN(dev_priv, 6)) {
15749                 dev_priv->display.fdi_link_train = gen6_fdi_link_train;
15750         } else if (IS_IVYBRIDGE(dev_priv)) {
15751                 /* FIXME: detect B0+ stepping and use auto training */
15752                 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
15753         } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
15754                 dev_priv->display.fdi_link_train = hsw_fdi_link_train;
15755         }
15756
15757         if (INTEL_GEN(dev_priv) >= 9)
15758                 dev_priv->display.update_crtcs = skl_update_crtcs;
15759         else
15760                 dev_priv->display.update_crtcs = intel_update_crtcs;
15761 }
15762
15763 static i915_reg_t i915_vgacntrl_reg(struct drm_i915_private *dev_priv)
15764 {
15765         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
15766                 return VLV_VGACNTRL;
15767         else if (INTEL_GEN(dev_priv) >= 5)
15768                 return CPU_VGACNTRL;
15769         else
15770                 return VGACNTRL;
15771 }
15772
15773 /* Disable the VGA plane that we never use */
15774 static void i915_disable_vga(struct drm_i915_private *dev_priv)
15775 {
15776         struct pci_dev *pdev = dev_priv->drm.pdev;
15777         u8 sr1;
15778         i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
15779
15780         /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
15781         vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
15782         outb(SR01, VGA_SR_INDEX);
15783         sr1 = inb(VGA_SR_DATA);
15784         outb(sr1 | 1<<5, VGA_SR_DATA);
15785         vga_put(pdev, VGA_RSRC_LEGACY_IO);
15786         udelay(300);
15787
15788         I915_WRITE(vga_reg, VGA_DISP_DISABLE);
15789         POSTING_READ(vga_reg);
15790 }
15791
15792 void intel_modeset_init_hw(struct drm_device *dev)
15793 {
15794         struct drm_i915_private *dev_priv = to_i915(dev);
15795
15796         intel_update_cdclk(dev_priv);
15797         intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
15798         dev_priv->cdclk.logical = dev_priv->cdclk.actual = dev_priv->cdclk.hw;
15799 }
15800
15801 /*
15802  * Calculate what we think the watermarks should be for the state we've read
15803  * out of the hardware and then immediately program those watermarks so that
15804  * we ensure the hardware settings match our internal state.
15805  *
15806  * We can calculate what we think WM's should be by creating a duplicate of the
15807  * current state (which was constructed during hardware readout) and running it
15808  * through the atomic check code to calculate new watermark values in the
15809  * state object.
15810  */
15811 static void sanitize_watermarks(struct drm_device *dev)
15812 {
15813         struct drm_i915_private *dev_priv = to_i915(dev);
15814         struct drm_atomic_state *state;
15815         struct intel_atomic_state *intel_state;
15816         struct intel_crtc *crtc;
15817         struct intel_crtc_state *crtc_state;
15818         struct drm_modeset_acquire_ctx ctx;
15819         int ret;
15820         int i;
15821
15822         /* Only supported on platforms that use atomic watermark design */
15823         if (!dev_priv->display.optimize_watermarks)
15824                 return;
15825
15826         /*
15827          * We need to hold connection_mutex before calling duplicate_state so
15828          * that the connector loop is protected.
15829          */
15830         drm_modeset_acquire_init(&ctx, 0);
15831 retry:
15832         ret = drm_modeset_lock_all_ctx(dev, &ctx);
15833         if (ret == -EDEADLK) {
15834                 drm_modeset_backoff(&ctx);
15835                 goto retry;
15836         } else if (WARN_ON(ret)) {
15837                 goto fail;
15838         }
15839
15840         state = drm_atomic_helper_duplicate_state(dev, &ctx);
15841         if (WARN_ON(IS_ERR(state)))
15842                 goto fail;
15843
15844         intel_state = to_intel_atomic_state(state);
15845
15846         /*
15847          * Hardware readout is the only time we don't want to calculate
15848          * intermediate watermarks (since we don't trust the current
15849          * watermarks).
15850          */
15851         if (!HAS_GMCH(dev_priv))
15852                 intel_state->skip_intermediate_wm = true;
15853
15854         ret = intel_atomic_check(dev, state);
15855         if (ret) {
15856                 /*
15857                  * If we fail here, it means that the hardware appears to be
15858                  * programmed in a way that shouldn't be possible, given our
15859                  * understanding of watermark requirements.  This might mean a
15860                  * mistake in the hardware readout code or a mistake in the
15861                  * watermark calculations for a given platform.  Raise a WARN
15862                  * so that this is noticeable.
15863                  *
15864                  * If this actually happens, we'll have to just leave the
15865                  * BIOS-programmed watermarks untouched and hope for the best.
15866                  */
15867                 WARN(true, "Could not determine valid watermarks for inherited state\n");
15868                 goto put_state;
15869         }
15870
15871         /* Write calculated watermark values back */
15872         for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
15873                 crtc_state->wm.need_postvbl_update = true;
15874                 dev_priv->display.optimize_watermarks(intel_state, crtc_state);
15875
15876                 to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm;
15877         }
15878
15879 put_state:
15880         drm_atomic_state_put(state);
15881 fail:
15882         drm_modeset_drop_locks(&ctx);
15883         drm_modeset_acquire_fini(&ctx);
15884 }
15885
15886 static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
15887 {
15888         if (IS_GEN(dev_priv, 5)) {
15889                 u32 fdi_pll_clk =
15890                         I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
15891
15892                 dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
15893         } else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) {
15894                 dev_priv->fdi_pll_freq = 270000;
15895         } else {
15896                 return;
15897         }
15898
15899         DRM_DEBUG_DRIVER("FDI PLL freq=%d\n", dev_priv->fdi_pll_freq);
15900 }
15901
15902 static int intel_initial_commit(struct drm_device *dev)
15903 {
15904         struct drm_atomic_state *state = NULL;
15905         struct drm_modeset_acquire_ctx ctx;
15906         struct drm_crtc *crtc;
15907         struct drm_crtc_state *crtc_state;
15908         int ret = 0;
15909
15910         state = drm_atomic_state_alloc(dev);
15911         if (!state)
15912                 return -ENOMEM;
15913
15914         drm_modeset_acquire_init(&ctx, 0);
15915
15916 retry:
15917         state->acquire_ctx = &ctx;
15918
15919         drm_for_each_crtc(crtc, dev) {
15920                 crtc_state = drm_atomic_get_crtc_state(state, crtc);
15921                 if (IS_ERR(crtc_state)) {
15922                         ret = PTR_ERR(crtc_state);
15923                         goto out;
15924                 }
15925
15926                 if (crtc_state->active) {
15927                         ret = drm_atomic_add_affected_planes(state, crtc);
15928                         if (ret)
15929                                 goto out;
15930
15931                         /*
15932                          * FIXME hack to force a LUT update to avoid the
15933                          * plane update forcing the pipe gamma on without
15934                          * having a proper LUT loaded. Remove once we
15935                          * have readout for pipe gamma enable.
15936                          */
15937                         crtc_state->color_mgmt_changed = true;
15938                 }
15939         }
15940
15941         ret = drm_atomic_commit(state);
15942
15943 out:
15944         if (ret == -EDEADLK) {
15945                 drm_atomic_state_clear(state);
15946                 drm_modeset_backoff(&ctx);
15947                 goto retry;
15948         }
15949
15950         drm_atomic_state_put(state);
15951
15952         drm_modeset_drop_locks(&ctx);
15953         drm_modeset_acquire_fini(&ctx);
15954
15955         return ret;
15956 }
15957
15958 int intel_modeset_init(struct drm_device *dev)
15959 {
15960         struct drm_i915_private *dev_priv = to_i915(dev);
15961         struct i915_ggtt *ggtt = &dev_priv->ggtt;
15962         enum pipe pipe;
15963         struct intel_crtc *crtc;
15964         int ret;
15965
15966         dev_priv->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
15967
15968         drm_mode_config_init(dev);
15969
15970         ret = intel_bw_init(dev_priv);
15971         if (ret)
15972                 return ret;
15973
15974         dev->mode_config.min_width = 0;
15975         dev->mode_config.min_height = 0;
15976
15977         dev->mode_config.preferred_depth = 24;
15978         dev->mode_config.prefer_shadow = 1;
15979
15980         dev->mode_config.allow_fb_modifiers = true;
15981
15982         dev->mode_config.funcs = &intel_mode_funcs;
15983
15984         init_llist_head(&dev_priv->atomic_helper.free_list);
15985         INIT_WORK(&dev_priv->atomic_helper.free_work,
15986                   intel_atomic_helper_free_state_worker);
15987
15988         intel_init_quirks(dev_priv);
15989
15990         intel_fbc_init(dev_priv);
15991
15992         intel_init_pm(dev_priv);
15993
15994         /*
15995          * There may be no VBT; and if the BIOS enabled SSC we can
15996          * just keep using it to avoid unnecessary flicker.  Whereas if the
15997          * BIOS isn't using it, don't assume it will work even if the VBT
15998          * indicates as much.
15999          */
16000         if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
16001                 bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
16002                                             DREF_SSC1_ENABLE);
16003
16004                 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
16005                         DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n",
16006                                      bios_lvds_use_ssc ? "en" : "dis",
16007                                      dev_priv->vbt.lvds_use_ssc ? "en" : "dis");
16008                         dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
16009                 }
16010         }
16011
16012         /*
16013          * Maximum framebuffer dimensions, chosen to match
16014          * the maximum render engine surface size on gen4+.
16015          */
16016         if (INTEL_GEN(dev_priv) >= 7) {
16017                 dev->mode_config.max_width = 16384;
16018                 dev->mode_config.max_height = 16384;
16019         } else if (INTEL_GEN(dev_priv) >= 4) {
16020                 dev->mode_config.max_width = 8192;
16021                 dev->mode_config.max_height = 8192;
16022         } else if (IS_GEN(dev_priv, 3)) {
16023                 dev->mode_config.max_width = 4096;
16024                 dev->mode_config.max_height = 4096;
16025         } else {
16026                 dev->mode_config.max_width = 2048;
16027                 dev->mode_config.max_height = 2048;
16028         }
16029
16030         if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
16031                 dev->mode_config.cursor_width = IS_I845G(dev_priv) ? 64 : 512;
16032                 dev->mode_config.cursor_height = 1023;
16033         } else if (IS_GEN(dev_priv, 2)) {
16034                 dev->mode_config.cursor_width = 64;
16035                 dev->mode_config.cursor_height = 64;
16036         } else {
16037                 dev->mode_config.cursor_width = 256;
16038                 dev->mode_config.cursor_height = 256;
16039         }
16040
16041         dev->mode_config.fb_base = ggtt->gmadr.start;
16042
16043         DRM_DEBUG_KMS("%d display pipe%s available.\n",
16044                       INTEL_INFO(dev_priv)->num_pipes,
16045                       INTEL_INFO(dev_priv)->num_pipes > 1 ? "s" : "");
16046
16047         for_each_pipe(dev_priv, pipe) {
16048                 ret = intel_crtc_init(dev_priv, pipe);
16049                 if (ret) {
16050                         drm_mode_config_cleanup(dev);
16051                         return ret;
16052                 }
16053         }
16054
16055         intel_shared_dpll_init(dev);
16056         intel_update_fdi_pll_freq(dev_priv);
16057
16058         intel_update_czclk(dev_priv);
16059         intel_modeset_init_hw(dev);
16060
16061         intel_hdcp_component_init(dev_priv);
16062
16063         if (dev_priv->max_cdclk_freq == 0)
16064                 intel_update_max_cdclk(dev_priv);
16065
16066         /* Just disable it once at startup */
16067         i915_disable_vga(dev_priv);
16068         intel_setup_outputs(dev_priv);
16069
16070         drm_modeset_lock_all(dev);
16071         intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
16072         drm_modeset_unlock_all(dev);
16073
16074         for_each_intel_crtc(dev, crtc) {
16075                 struct intel_initial_plane_config plane_config = {};
16076
16077                 if (!crtc->active)
16078                         continue;
16079
16080                 /*
16081                  * Note that reserving the BIOS fb up front prevents us
16082                  * from stuffing other stolen allocations like the ring
16083                  * on top.  This prevents some ugliness at boot time, and
16084                  * can even allow for smooth boot transitions if the BIOS
16085                  * fb is large enough for the active pipe configuration.
16086                  */
16087                 dev_priv->display.get_initial_plane_config(crtc,
16088                                                            &plane_config);
16089
16090                 /*
16091                  * If the fb is shared between multiple heads, we'll
16092                  * just get the first one.
16093                  */
16094                 intel_find_initial_plane_obj(crtc, &plane_config);
16095         }
16096
16097         /*
16098          * Make sure hardware watermarks really match the state we read out.
16099          * Note that we need to do this after reconstructing the BIOS fb's
16100          * since the watermark calculation done here will use pstate->fb.
16101          */
16102         if (!HAS_GMCH(dev_priv))
16103                 sanitize_watermarks(dev);
16104
16105         /*
16106          * Force all active planes to recompute their states. So that on
16107          * mode_setcrtc after probe, all the intel_plane_state variables
16108          * are already calculated and there is no assert_plane warnings
16109          * during bootup.
16110          */
16111         ret = intel_initial_commit(dev);
16112         if (ret)
16113                 DRM_DEBUG_KMS("Initial commit in probe failed.\n");
16114
16115         return 0;
16116 }
16117
16118 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
16119 {
16120         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
16121         /* 640x480@60Hz, ~25175 kHz */
16122         struct dpll clock = {
16123                 .m1 = 18,
16124                 .m2 = 7,
16125                 .p1 = 13,
16126                 .p2 = 4,
16127                 .n = 2,
16128         };
16129         u32 dpll, fp;
16130         int i;
16131
16132         WARN_ON(i9xx_calc_dpll_params(48000, &clock) != 25154);
16133
16134         DRM_DEBUG_KMS("enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
16135                       pipe_name(pipe), clock.vco, clock.dot);
16136
16137         fp = i9xx_dpll_compute_fp(&clock);
16138         dpll = DPLL_DVO_2X_MODE |
16139                 DPLL_VGA_MODE_DIS |
16140                 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
16141                 PLL_P2_DIVIDE_BY_4 |
16142                 PLL_REF_INPUT_DREFCLK |
16143                 DPLL_VCO_ENABLE;
16144
16145         I915_WRITE(FP0(pipe), fp);
16146         I915_WRITE(FP1(pipe), fp);
16147
16148         I915_WRITE(HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
16149         I915_WRITE(HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
16150         I915_WRITE(HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
16151         I915_WRITE(VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
16152         I915_WRITE(VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
16153         I915_WRITE(VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
16154         I915_WRITE(PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
16155
16156         /*
16157          * Apparently we need to have VGA mode enabled prior to changing
16158          * the P1/P2 dividers. Otherwise the DPLL will keep using the old
16159          * dividers, even though the register value does change.
16160          */
16161         I915_WRITE(DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
16162         I915_WRITE(DPLL(pipe), dpll);
16163
16164         /* Wait for the clocks to stabilize. */
16165         POSTING_READ(DPLL(pipe));
16166         udelay(150);
16167
16168         /* The pixel multiplier can only be updated once the
16169          * DPLL is enabled and the clocks are stable.
16170          *
16171          * So write it again.
16172          */
16173         I915_WRITE(DPLL(pipe), dpll);
16174
16175         /* We do this three times for luck */
16176         for (i = 0; i < 3 ; i++) {
16177                 I915_WRITE(DPLL(pipe), dpll);
16178                 POSTING_READ(DPLL(pipe));
16179                 udelay(150); /* wait for warmup */
16180         }
16181
16182         I915_WRITE(PIPECONF(pipe), PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
16183         POSTING_READ(PIPECONF(pipe));
16184
16185         intel_wait_for_pipe_scanline_moving(crtc);
16186 }
16187
16188 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
16189 {
16190         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
16191
16192         DRM_DEBUG_KMS("disabling pipe %c due to force quirk\n",
16193                       pipe_name(pipe));
16194
16195         WARN_ON(I915_READ(DSPCNTR(PLANE_A)) & DISPLAY_PLANE_ENABLE);
16196         WARN_ON(I915_READ(DSPCNTR(PLANE_B)) & DISPLAY_PLANE_ENABLE);
16197         WARN_ON(I915_READ(DSPCNTR(PLANE_C)) & DISPLAY_PLANE_ENABLE);
16198         WARN_ON(I915_READ(CURCNTR(PIPE_A)) & MCURSOR_MODE);
16199         WARN_ON(I915_READ(CURCNTR(PIPE_B)) & MCURSOR_MODE);
16200
16201         I915_WRITE(PIPECONF(pipe), 0);
16202         POSTING_READ(PIPECONF(pipe));
16203
16204         intel_wait_for_pipe_scanline_stopped(crtc);
16205
16206         I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
16207         POSTING_READ(DPLL(pipe));
16208 }
16209
16210 static void
16211 intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
16212 {
16213         struct intel_crtc *crtc;
16214
16215         if (INTEL_GEN(dev_priv) >= 4)
16216                 return;
16217
16218         for_each_intel_crtc(&dev_priv->drm, crtc) {
16219                 struct intel_plane *plane =
16220                         to_intel_plane(crtc->base.primary);
16221                 struct intel_crtc *plane_crtc;
16222                 enum pipe pipe;
16223
16224                 if (!plane->get_hw_state(plane, &pipe))
16225                         continue;
16226
16227                 if (pipe == crtc->pipe)
16228                         continue;
16229
16230                 DRM_DEBUG_KMS("[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
16231                               plane->base.base.id, plane->base.name);
16232
16233                 plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
16234                 intel_plane_disable_noatomic(plane_crtc, plane);
16235         }
16236 }
16237
16238 static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
16239 {
16240         struct drm_device *dev = crtc->base.dev;
16241         struct intel_encoder *encoder;
16242
16243         for_each_encoder_on_crtc(dev, &crtc->base, encoder)
16244                 return true;
16245
16246         return false;
16247 }
16248
16249 static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
16250 {
16251         struct drm_device *dev = encoder->base.dev;
16252         struct intel_connector *connector;
16253
16254         for_each_connector_on_encoder(dev, &encoder->base, connector)
16255                 return connector;
16256
16257         return NULL;
16258 }
16259
16260 static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
16261                               enum pipe pch_transcoder)
16262 {
16263         return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
16264                 (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A);
16265 }
16266
16267 static void intel_sanitize_crtc(struct intel_crtc *crtc,
16268                                 struct drm_modeset_acquire_ctx *ctx)
16269 {
16270         struct drm_device *dev = crtc->base.dev;
16271         struct drm_i915_private *dev_priv = to_i915(dev);
16272         struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
16273         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
16274
16275         /* Clear any frame start delays used for debugging left by the BIOS */
16276         if (crtc->active && !transcoder_is_dsi(cpu_transcoder)) {
16277                 i915_reg_t reg = PIPECONF(cpu_transcoder);
16278
16279                 I915_WRITE(reg,
16280                            I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
16281         }
16282
16283         if (crtc_state->base.active) {
16284                 struct intel_plane *plane;
16285
16286                 /* Disable everything but the primary plane */
16287                 for_each_intel_plane_on_crtc(dev, crtc, plane) {
16288                         const struct intel_plane_state *plane_state =
16289                                 to_intel_plane_state(plane->base.state);
16290
16291                         if (plane_state->base.visible &&
16292                             plane->base.type != DRM_PLANE_TYPE_PRIMARY)
16293                                 intel_plane_disable_noatomic(crtc, plane);
16294                 }
16295
16296                 /*
16297                  * Disable any background color set by the BIOS, but enable the
16298                  * gamma and CSC to match how we program our planes.
16299                  */
16300                 if (INTEL_GEN(dev_priv) >= 9)
16301                         I915_WRITE(SKL_BOTTOM_COLOR(crtc->pipe),
16302                                    SKL_BOTTOM_COLOR_GAMMA_ENABLE |
16303                                    SKL_BOTTOM_COLOR_CSC_ENABLE);
16304         }
16305
16306         /* Adjust the state of the output pipe according to whether we
16307          * have active connectors/encoders. */
16308         if (crtc_state->base.active && !intel_crtc_has_encoders(crtc))
16309                 intel_crtc_disable_noatomic(&crtc->base, ctx);
16310
16311         if (crtc_state->base.active || HAS_GMCH(dev_priv)) {
16312                 /*
16313                  * We start out with underrun reporting disabled to avoid races.
16314                  * For correct bookkeeping mark this on active crtcs.
16315                  *
16316                  * Also on gmch platforms we dont have any hardware bits to
16317                  * disable the underrun reporting. Which means we need to start
16318                  * out with underrun reporting disabled also on inactive pipes,
16319                  * since otherwise we'll complain about the garbage we read when
16320                  * e.g. coming up after runtime pm.
16321                  *
16322                  * No protection against concurrent access is required - at
16323                  * worst a fifo underrun happens which also sets this to false.
16324                  */
16325                 crtc->cpu_fifo_underrun_disabled = true;
16326                 /*
16327                  * We track the PCH trancoder underrun reporting state
16328                  * within the crtc. With crtc for pipe A housing the underrun
16329                  * reporting state for PCH transcoder A, crtc for pipe B housing
16330                  * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
16331                  * and marking underrun reporting as disabled for the non-existing
16332                  * PCH transcoders B and C would prevent enabling the south
16333                  * error interrupt (see cpt_can_enable_serr_int()).
16334                  */
16335                 if (has_pch_trancoder(dev_priv, crtc->pipe))
16336                         crtc->pch_fifo_underrun_disabled = true;
16337         }
16338 }
16339
16340 static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
16341 {
16342         struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
16343
16344         /*
16345          * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
16346          * the hardware when a high res displays plugged in. DPLL P
16347          * divider is zero, and the pipe timings are bonkers. We'll
16348          * try to disable everything in that case.
16349          *
16350          * FIXME would be nice to be able to sanitize this state
16351          * without several WARNs, but for now let's take the easy
16352          * road.
16353          */
16354         return IS_GEN(dev_priv, 6) &&
16355                 crtc_state->base.active &&
16356                 crtc_state->shared_dpll &&
16357                 crtc_state->port_clock == 0;
16358 }
16359
16360 static void intel_sanitize_encoder(struct intel_encoder *encoder)
16361 {
16362         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
16363         struct intel_connector *connector;
16364         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
16365         struct intel_crtc_state *crtc_state = crtc ?
16366                 to_intel_crtc_state(crtc->base.state) : NULL;
16367
16368         /* We need to check both for a crtc link (meaning that the
16369          * encoder is active and trying to read from a pipe) and the
16370          * pipe itself being active. */
16371         bool has_active_crtc = crtc_state &&
16372                 crtc_state->base.active;
16373
16374         if (crtc_state && has_bogus_dpll_config(crtc_state)) {
16375                 DRM_DEBUG_KMS("BIOS has misprogrammed the hardware. Disabling pipe %c\n",
16376                               pipe_name(crtc->pipe));
16377                 has_active_crtc = false;
16378         }
16379
16380         connector = intel_encoder_find_connector(encoder);
16381         if (connector && !has_active_crtc) {
16382                 DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
16383                               encoder->base.base.id,
16384                               encoder->base.name);
16385
16386                 /* Connector is active, but has no active pipe. This is
16387                  * fallout from our resume register restoring. Disable
16388                  * the encoder manually again. */
16389                 if (crtc_state) {
16390                         struct drm_encoder *best_encoder;
16391
16392                         DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
16393                                       encoder->base.base.id,
16394                                       encoder->base.name);
16395
16396                         /* avoid oopsing in case the hooks consult best_encoder */
16397                         best_encoder = connector->base.state->best_encoder;
16398                         connector->base.state->best_encoder = &encoder->base;
16399
16400                         if (encoder->disable)
16401                                 encoder->disable(encoder, crtc_state,
16402                                                  connector->base.state);
16403                         if (encoder->post_disable)
16404                                 encoder->post_disable(encoder, crtc_state,
16405                                                       connector->base.state);
16406
16407                         connector->base.state->best_encoder = best_encoder;
16408                 }
16409                 encoder->base.crtc = NULL;
16410
16411                 /* Inconsistent output/port/pipe state happens presumably due to
16412                  * a bug in one of the get_hw_state functions. Or someplace else
16413                  * in our code, like the register restore mess on resume. Clamp
16414                  * things to off as a safer default. */
16415
16416                 connector->base.dpms = DRM_MODE_DPMS_OFF;
16417                 connector->base.encoder = NULL;
16418         }
16419
16420         /* notify opregion of the sanitized encoder state */
16421         intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
16422
16423         if (INTEL_GEN(dev_priv) >= 11)
16424                 icl_sanitize_encoder_pll_mapping(encoder);
16425 }
16426
16427 void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv)
16428 {
16429         i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
16430
16431         if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
16432                 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
16433                 i915_disable_vga(dev_priv);
16434         }
16435 }
16436
16437 void i915_redisable_vga(struct drm_i915_private *dev_priv)
16438 {
16439         intel_wakeref_t wakeref;
16440
16441         /*
16442          * This function can be called both from intel_modeset_setup_hw_state or
16443          * at a very early point in our resume sequence, where the power well
16444          * structures are not yet restored. Since this function is at a very
16445          * paranoid "someone might have enabled VGA while we were not looking"
16446          * level, just check if the power well is enabled instead of trying to
16447          * follow the "don't touch the power well if we don't need it" policy
16448          * the rest of the driver uses.
16449          */
16450         wakeref = intel_display_power_get_if_enabled(dev_priv,
16451                                                      POWER_DOMAIN_VGA);
16452         if (!wakeref)
16453                 return;
16454
16455         i915_redisable_vga_power_on(dev_priv);
16456
16457         intel_display_power_put(dev_priv, POWER_DOMAIN_VGA, wakeref);
16458 }
16459
16460 /* FIXME read out full plane state for all planes */
16461 static void readout_plane_state(struct drm_i915_private *dev_priv)
16462 {
16463         struct intel_plane *plane;
16464         struct intel_crtc *crtc;
16465
16466         for_each_intel_plane(&dev_priv->drm, plane) {
16467                 struct intel_plane_state *plane_state =
16468                         to_intel_plane_state(plane->base.state);
16469                 struct intel_crtc_state *crtc_state;
16470                 enum pipe pipe = PIPE_A;
16471                 bool visible;
16472
16473                 visible = plane->get_hw_state(plane, &pipe);
16474
16475                 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
16476                 crtc_state = to_intel_crtc_state(crtc->base.state);
16477
16478                 intel_set_plane_visible(crtc_state, plane_state, visible);
16479
16480                 DRM_DEBUG_KMS("[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
16481                               plane->base.base.id, plane->base.name,
16482                               enableddisabled(visible), pipe_name(pipe));
16483         }
16484
16485         for_each_intel_crtc(&dev_priv->drm, crtc) {
16486                 struct intel_crtc_state *crtc_state =
16487                         to_intel_crtc_state(crtc->base.state);
16488
16489                 fixup_active_planes(crtc_state);
16490         }
16491 }
16492
16493 static void intel_modeset_readout_hw_state(struct drm_device *dev)
16494 {
16495         struct drm_i915_private *dev_priv = to_i915(dev);
16496         enum pipe pipe;
16497         struct intel_crtc *crtc;
16498         struct intel_encoder *encoder;
16499         struct intel_connector *connector;
16500         struct drm_connector_list_iter conn_iter;
16501         int i;
16502
16503         dev_priv->active_crtcs = 0;
16504
16505         for_each_intel_crtc(dev, crtc) {
16506                 struct intel_crtc_state *crtc_state =
16507                         to_intel_crtc_state(crtc->base.state);
16508
16509                 __drm_atomic_helper_crtc_destroy_state(&crtc_state->base);
16510                 memset(crtc_state, 0, sizeof(*crtc_state));
16511                 __drm_atomic_helper_crtc_reset(&crtc->base, &crtc_state->base);
16512
16513                 crtc_state->base.active = crtc_state->base.enable =
16514                         dev_priv->display.get_pipe_config(crtc, crtc_state);
16515
16516                 crtc->base.enabled = crtc_state->base.enable;
16517                 crtc->active = crtc_state->base.active;
16518
16519                 if (crtc_state->base.active)
16520                         dev_priv->active_crtcs |= 1 << crtc->pipe;
16521
16522                 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
16523                               crtc->base.base.id, crtc->base.name,
16524                               enableddisabled(crtc_state->base.active));
16525         }
16526
16527         readout_plane_state(dev_priv);
16528
16529         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
16530                 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
16531
16532                 pll->on = pll->info->funcs->get_hw_state(dev_priv, pll,
16533                                                         &pll->state.hw_state);
16534                 pll->state.crtc_mask = 0;
16535                 for_each_intel_crtc(dev, crtc) {
16536                         struct intel_crtc_state *crtc_state =
16537                                 to_intel_crtc_state(crtc->base.state);
16538
16539                         if (crtc_state->base.active &&
16540                             crtc_state->shared_dpll == pll)
16541                                 pll->state.crtc_mask |= 1 << crtc->pipe;
16542                 }
16543                 pll->active_mask = pll->state.crtc_mask;
16544
16545                 DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
16546                               pll->info->name, pll->state.crtc_mask, pll->on);
16547         }
16548
16549         for_each_intel_encoder(dev, encoder) {
16550                 pipe = 0;
16551
16552                 if (encoder->get_hw_state(encoder, &pipe)) {
16553                         struct intel_crtc_state *crtc_state;
16554
16555                         crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
16556                         crtc_state = to_intel_crtc_state(crtc->base.state);
16557
16558                         encoder->base.crtc = &crtc->base;
16559                         encoder->get_config(encoder, crtc_state);
16560                 } else {
16561                         encoder->base.crtc = NULL;
16562                 }
16563
16564                 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
16565                               encoder->base.base.id, encoder->base.name,
16566                               enableddisabled(encoder->base.crtc),
16567                               pipe_name(pipe));
16568         }
16569
16570         drm_connector_list_iter_begin(dev, &conn_iter);
16571         for_each_intel_connector_iter(connector, &conn_iter) {
16572                 if (connector->get_hw_state(connector)) {
16573                         connector->base.dpms = DRM_MODE_DPMS_ON;
16574
16575                         encoder = connector->encoder;
16576                         connector->base.encoder = &encoder->base;
16577
16578                         if (encoder->base.crtc &&
16579                             encoder->base.crtc->state->active) {
16580                                 /*
16581                                  * This has to be done during hardware readout
16582                                  * because anything calling .crtc_disable may
16583                                  * rely on the connector_mask being accurate.
16584                                  */
16585                                 encoder->base.crtc->state->connector_mask |=
16586                                         drm_connector_mask(&connector->base);
16587                                 encoder->base.crtc->state->encoder_mask |=
16588                                         drm_encoder_mask(&encoder->base);
16589                         }
16590
16591                 } else {
16592                         connector->base.dpms = DRM_MODE_DPMS_OFF;
16593                         connector->base.encoder = NULL;
16594                 }
16595                 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
16596                               connector->base.base.id, connector->base.name,
16597                               enableddisabled(connector->base.encoder));
16598         }
16599         drm_connector_list_iter_end(&conn_iter);
16600
16601         for_each_intel_crtc(dev, crtc) {
16602                 struct intel_bw_state *bw_state =
16603                         to_intel_bw_state(dev_priv->bw_obj.state);
16604                 struct intel_crtc_state *crtc_state =
16605                         to_intel_crtc_state(crtc->base.state);
16606                 struct intel_plane *plane;
16607                 int min_cdclk = 0;
16608
16609                 memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
16610                 if (crtc_state->base.active) {
16611                         intel_mode_from_pipe_config(&crtc->base.mode, crtc_state);
16612                         crtc->base.mode.hdisplay = crtc_state->pipe_src_w;
16613                         crtc->base.mode.vdisplay = crtc_state->pipe_src_h;
16614                         intel_mode_from_pipe_config(&crtc_state->base.adjusted_mode, crtc_state);
16615                         WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
16616
16617                         /*
16618                          * The initial mode needs to be set in order to keep
16619                          * the atomic core happy. It wants a valid mode if the
16620                          * crtc's enabled, so we do the above call.
16621                          *
16622                          * But we don't set all the derived state fully, hence
16623                          * set a flag to indicate that a full recalculation is
16624                          * needed on the next commit.
16625                          */
16626                         crtc_state->base.mode.private_flags = I915_MODE_FLAG_INHERITED;
16627
16628                         intel_crtc_compute_pixel_rate(crtc_state);
16629
16630                         if (dev_priv->display.modeset_calc_cdclk) {
16631                                 min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
16632                                 if (WARN_ON(min_cdclk < 0))
16633                                         min_cdclk = 0;
16634                         }
16635
16636                         drm_calc_timestamping_constants(&crtc->base,
16637                                                         &crtc_state->base.adjusted_mode);
16638                         update_scanline_offset(crtc_state);
16639                 }
16640
16641                 dev_priv->min_cdclk[crtc->pipe] = min_cdclk;
16642                 dev_priv->min_voltage_level[crtc->pipe] =
16643                         crtc_state->min_voltage_level;
16644
16645                 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
16646                         const struct intel_plane_state *plane_state =
16647                                 to_intel_plane_state(plane->base.state);
16648
16649                         /*
16650                          * FIXME don't have the fb yet, so can't
16651                          * use intel_plane_data_rate() :(
16652                          */
16653                         if (plane_state->base.visible)
16654                                 crtc_state->data_rate[plane->id] =
16655                                         4 * crtc_state->pixel_rate;
16656                 }
16657
16658                 intel_bw_crtc_update(bw_state, crtc_state);
16659
16660                 intel_pipe_config_sanity_check(dev_priv, crtc_state);
16661         }
16662 }
16663
16664 static void
16665 get_encoder_power_domains(struct drm_i915_private *dev_priv)
16666 {
16667         struct intel_encoder *encoder;
16668
16669         for_each_intel_encoder(&dev_priv->drm, encoder) {
16670                 struct intel_crtc_state *crtc_state;
16671
16672                 if (!encoder->get_power_domains)
16673                         continue;
16674
16675                 /*
16676                  * MST-primary and inactive encoders don't have a crtc state
16677                  * and neither of these require any power domain references.
16678                  */
16679                 if (!encoder->base.crtc)
16680                         continue;
16681
16682                 crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
16683                 encoder->get_power_domains(encoder, crtc_state);
16684         }
16685 }
16686
16687 static void intel_early_display_was(struct drm_i915_private *dev_priv)
16688 {
16689         /* Display WA #1185 WaDisableDARBFClkGating:cnl,glk */
16690         if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
16691                 I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
16692                            DARBF_GATING_DIS);
16693
16694         if (IS_HASWELL(dev_priv)) {
16695                 /*
16696                  * WaRsPkgCStateDisplayPMReq:hsw
16697                  * System hang if this isn't done before disabling all planes!
16698                  */
16699                 I915_WRITE(CHICKEN_PAR1_1,
16700                            I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
16701         }
16702 }
16703
16704 static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
16705                                        enum port port, i915_reg_t hdmi_reg)
16706 {
16707         u32 val = I915_READ(hdmi_reg);
16708
16709         if (val & SDVO_ENABLE ||
16710             (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
16711                 return;
16712
16713         DRM_DEBUG_KMS("Sanitizing transcoder select for HDMI %c\n",
16714                       port_name(port));
16715
16716         val &= ~SDVO_PIPE_SEL_MASK;
16717         val |= SDVO_PIPE_SEL(PIPE_A);
16718
16719         I915_WRITE(hdmi_reg, val);
16720 }
16721
16722 static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
16723                                      enum port port, i915_reg_t dp_reg)
16724 {
16725         u32 val = I915_READ(dp_reg);
16726
16727         if (val & DP_PORT_EN ||
16728             (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
16729                 return;
16730
16731         DRM_DEBUG_KMS("Sanitizing transcoder select for DP %c\n",
16732                       port_name(port));
16733
16734         val &= ~DP_PIPE_SEL_MASK;
16735         val |= DP_PIPE_SEL(PIPE_A);
16736
16737         I915_WRITE(dp_reg, val);
16738 }
16739
16740 static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
16741 {
16742         /*
16743          * The BIOS may select transcoder B on some of the PCH
16744          * ports even it doesn't enable the port. This would trip
16745          * assert_pch_dp_disabled() and assert_pch_hdmi_disabled().
16746          * Sanitize the transcoder select bits to prevent that. We
16747          * assume that the BIOS never actually enabled the port,
16748          * because if it did we'd actually have to toggle the port
16749          * on and back off to make the transcoder A select stick
16750          * (see. intel_dp_link_down(), intel_disable_hdmi(),
16751          * intel_disable_sdvo()).
16752          */
16753         ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B);
16754         ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C);
16755         ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D);
16756
16757         /* PCH SDVOB multiplex with HDMIB */
16758         ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB);
16759         ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC);
16760         ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID);
16761 }
16762
16763 /* Scan out the current hw modeset state,
16764  * and sanitizes it to the current state
16765  */
16766 static void
16767 intel_modeset_setup_hw_state(struct drm_device *dev,
16768                              struct drm_modeset_acquire_ctx *ctx)
16769 {
16770         struct drm_i915_private *dev_priv = to_i915(dev);
16771         struct intel_crtc_state *crtc_state;
16772         struct intel_encoder *encoder;
16773         struct intel_crtc *crtc;
16774         intel_wakeref_t wakeref;
16775         int i;
16776
16777         wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
16778
16779         intel_early_display_was(dev_priv);
16780         intel_modeset_readout_hw_state(dev);
16781
16782         /* HW state is read out, now we need to sanitize this mess. */
16783         get_encoder_power_domains(dev_priv);
16784
16785         if (HAS_PCH_IBX(dev_priv))
16786                 ibx_sanitize_pch_ports(dev_priv);
16787
16788         /*
16789          * intel_sanitize_plane_mapping() may need to do vblank
16790          * waits, so we need vblank interrupts restored beforehand.
16791          */
16792         for_each_intel_crtc(&dev_priv->drm, crtc) {
16793                 crtc_state = to_intel_crtc_state(crtc->base.state);
16794
16795                 drm_crtc_vblank_reset(&crtc->base);
16796
16797                 if (crtc_state->base.active)
16798                         intel_crtc_vblank_on(crtc_state);
16799         }
16800
16801         intel_sanitize_plane_mapping(dev_priv);
16802
16803         for_each_intel_encoder(dev, encoder)
16804                 intel_sanitize_encoder(encoder);
16805
16806         for_each_intel_crtc(&dev_priv->drm, crtc) {
16807                 crtc_state = to_intel_crtc_state(crtc->base.state);
16808                 intel_sanitize_crtc(crtc, ctx);
16809                 intel_dump_pipe_config(crtc_state, NULL, "[setup_hw_state]");
16810         }
16811
16812         intel_modeset_update_connector_atomic_state(dev);
16813
16814         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
16815                 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
16816
16817                 if (!pll->on || pll->active_mask)
16818                         continue;
16819
16820                 DRM_DEBUG_KMS("%s enabled but not in use, disabling\n",
16821                               pll->info->name);
16822
16823                 pll->info->funcs->disable(dev_priv, pll);
16824                 pll->on = false;
16825         }
16826
16827         if (IS_G4X(dev_priv)) {
16828                 g4x_wm_get_hw_state(dev_priv);
16829                 g4x_wm_sanitize(dev_priv);
16830         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
16831                 vlv_wm_get_hw_state(dev_priv);
16832                 vlv_wm_sanitize(dev_priv);
16833         } else if (INTEL_GEN(dev_priv) >= 9) {
16834                 skl_wm_get_hw_state(dev_priv);
16835         } else if (HAS_PCH_SPLIT(dev_priv)) {
16836                 ilk_wm_get_hw_state(dev_priv);
16837         }
16838
16839         for_each_intel_crtc(dev, crtc) {
16840                 u64 put_domains;
16841
16842                 crtc_state = to_intel_crtc_state(crtc->base.state);
16843                 put_domains = modeset_get_crtc_power_domains(crtc, crtc_state);
16844                 if (WARN_ON(put_domains))
16845                         modeset_put_power_domains(dev_priv, put_domains);
16846         }
16847
16848         intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
16849
16850         intel_fbc_init_pipe_state(dev_priv);
16851 }
16852
16853 void intel_display_resume(struct drm_device *dev)
16854 {
16855         struct drm_i915_private *dev_priv = to_i915(dev);
16856         struct drm_atomic_state *state = dev_priv->modeset_restore_state;
16857         struct drm_modeset_acquire_ctx ctx;
16858         int ret;
16859
16860         dev_priv->modeset_restore_state = NULL;
16861         if (state)
16862                 state->acquire_ctx = &ctx;
16863
16864         drm_modeset_acquire_init(&ctx, 0);
16865
16866         while (1) {
16867                 ret = drm_modeset_lock_all_ctx(dev, &ctx);
16868                 if (ret != -EDEADLK)
16869                         break;
16870
16871                 drm_modeset_backoff(&ctx);
16872         }
16873
16874         if (!ret)
16875                 ret = __intel_display_resume(dev, state, &ctx);
16876
16877         intel_enable_ipc(dev_priv);
16878         drm_modeset_drop_locks(&ctx);
16879         drm_modeset_acquire_fini(&ctx);
16880
16881         if (ret)
16882                 DRM_ERROR("Restoring old state failed with %i\n", ret);
16883         if (state)
16884                 drm_atomic_state_put(state);
16885 }
16886
16887 static void intel_hpd_poll_fini(struct drm_device *dev)
16888 {
16889         struct intel_connector *connector;
16890         struct drm_connector_list_iter conn_iter;
16891
16892         /* Kill all the work that may have been queued by hpd. */
16893         drm_connector_list_iter_begin(dev, &conn_iter);
16894         for_each_intel_connector_iter(connector, &conn_iter) {
16895                 if (connector->modeset_retry_work.func)
16896                         cancel_work_sync(&connector->modeset_retry_work);
16897                 if (connector->hdcp.shim) {
16898                         cancel_delayed_work_sync(&connector->hdcp.check_work);
16899                         cancel_work_sync(&connector->hdcp.prop_work);
16900                 }
16901         }
16902         drm_connector_list_iter_end(&conn_iter);
16903 }
16904
16905 void intel_modeset_cleanup(struct drm_device *dev)
16906 {
16907         struct drm_i915_private *dev_priv = to_i915(dev);
16908
16909         flush_workqueue(dev_priv->modeset_wq);
16910
16911         flush_work(&dev_priv->atomic_helper.free_work);
16912         WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list));
16913
16914         /*
16915          * Interrupts and polling as the first thing to avoid creating havoc.
16916          * Too much stuff here (turning of connectors, ...) would
16917          * experience fancy races otherwise.
16918          */
16919         intel_irq_uninstall(dev_priv);
16920
16921         /*
16922          * Due to the hpd irq storm handling the hotplug work can re-arm the
16923          * poll handlers. Hence disable polling after hpd handling is shut down.
16924          */
16925         intel_hpd_poll_fini(dev);
16926
16927         /* poll work can call into fbdev, hence clean that up afterwards */
16928         intel_fbdev_fini(dev_priv);
16929
16930         intel_unregister_dsm_handler();
16931
16932         intel_fbc_global_disable(dev_priv);
16933
16934         /* flush any delayed tasks or pending work */
16935         flush_scheduled_work();
16936
16937         intel_hdcp_component_fini(dev_priv);
16938
16939         drm_mode_config_cleanup(dev);
16940
16941         intel_overlay_cleanup(dev_priv);
16942
16943         intel_gmbus_teardown(dev_priv);
16944
16945         destroy_workqueue(dev_priv->modeset_wq);
16946
16947         intel_fbc_cleanup_cfb(dev_priv);
16948 }
16949
16950 /*
16951  * set vga decode state - true == enable VGA decode
16952  */
16953 int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, bool state)
16954 {
16955         unsigned reg = INTEL_GEN(dev_priv) >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
16956         u16 gmch_ctrl;
16957
16958         if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) {
16959                 DRM_ERROR("failed to read control word\n");
16960                 return -EIO;
16961         }
16962
16963         if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state)
16964                 return 0;
16965
16966         if (state)
16967                 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
16968         else
16969                 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
16970
16971         if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) {
16972                 DRM_ERROR("failed to write control word\n");
16973                 return -EIO;
16974         }
16975
16976         return 0;
16977 }
16978
16979 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
16980
16981 struct intel_display_error_state {
16982
16983         u32 power_well_driver;
16984
16985         struct intel_cursor_error_state {
16986                 u32 control;
16987                 u32 position;
16988                 u32 base;
16989                 u32 size;
16990         } cursor[I915_MAX_PIPES];
16991
16992         struct intel_pipe_error_state {
16993                 bool power_domain_on;
16994                 u32 source;
16995                 u32 stat;
16996         } pipe[I915_MAX_PIPES];
16997
16998         struct intel_plane_error_state {
16999                 u32 control;
17000                 u32 stride;
17001                 u32 size;
17002                 u32 pos;
17003                 u32 addr;
17004                 u32 surface;
17005                 u32 tile_offset;
17006         } plane[I915_MAX_PIPES];
17007
17008         struct intel_transcoder_error_state {
17009                 bool available;
17010                 bool power_domain_on;
17011                 enum transcoder cpu_transcoder;
17012
17013                 u32 conf;
17014
17015                 u32 htotal;
17016                 u32 hblank;
17017                 u32 hsync;
17018                 u32 vtotal;
17019                 u32 vblank;
17020                 u32 vsync;
17021         } transcoder[4];
17022 };
17023
17024 struct intel_display_error_state *
17025 intel_display_capture_error_state(struct drm_i915_private *dev_priv)
17026 {
17027         struct intel_display_error_state *error;
17028         int transcoders[] = {
17029                 TRANSCODER_A,
17030                 TRANSCODER_B,
17031                 TRANSCODER_C,
17032                 TRANSCODER_EDP,
17033         };
17034         int i;
17035
17036         BUILD_BUG_ON(ARRAY_SIZE(transcoders) != ARRAY_SIZE(error->transcoder));
17037
17038         if (!HAS_DISPLAY(dev_priv))
17039                 return NULL;
17040
17041         error = kzalloc(sizeof(*error), GFP_ATOMIC);
17042         if (error == NULL)
17043                 return NULL;
17044
17045         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
17046                 error->power_well_driver = I915_READ(HSW_PWR_WELL_CTL2);
17047
17048         for_each_pipe(dev_priv, i) {
17049                 error->pipe[i].power_domain_on =
17050                         __intel_display_power_is_enabled(dev_priv,
17051                                                          POWER_DOMAIN_PIPE(i));
17052                 if (!error->pipe[i].power_domain_on)
17053                         continue;
17054
17055                 error->cursor[i].control = I915_READ(CURCNTR(i));
17056                 error->cursor[i].position = I915_READ(CURPOS(i));
17057                 error->cursor[i].base = I915_READ(CURBASE(i));
17058
17059                 error->plane[i].control = I915_READ(DSPCNTR(i));
17060                 error->plane[i].stride = I915_READ(DSPSTRIDE(i));
17061                 if (INTEL_GEN(dev_priv) <= 3) {
17062                         error->plane[i].size = I915_READ(DSPSIZE(i));
17063                         error->plane[i].pos = I915_READ(DSPPOS(i));
17064                 }
17065                 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
17066                         error->plane[i].addr = I915_READ(DSPADDR(i));
17067                 if (INTEL_GEN(dev_priv) >= 4) {
17068                         error->plane[i].surface = I915_READ(DSPSURF(i));
17069                         error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
17070                 }
17071
17072                 error->pipe[i].source = I915_READ(PIPESRC(i));
17073
17074                 if (HAS_GMCH(dev_priv))
17075                         error->pipe[i].stat = I915_READ(PIPESTAT(i));
17076         }
17077
17078         for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
17079                 enum transcoder cpu_transcoder = transcoders[i];
17080
17081                 if (!INTEL_INFO(dev_priv)->trans_offsets[cpu_transcoder])
17082                         continue;
17083
17084                 error->transcoder[i].available = true;
17085                 error->transcoder[i].power_domain_on =
17086                         __intel_display_power_is_enabled(dev_priv,
17087                                 POWER_DOMAIN_TRANSCODER(cpu_transcoder));
17088                 if (!error->transcoder[i].power_domain_on)
17089                         continue;
17090
17091                 error->transcoder[i].cpu_transcoder = cpu_transcoder;
17092
17093                 error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
17094                 error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
17095                 error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
17096                 error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
17097                 error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
17098                 error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
17099                 error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
17100         }
17101
17102         return error;
17103 }
17104
17105 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
17106
17107 void
17108 intel_display_print_error_state(struct drm_i915_error_state_buf *m,
17109                                 struct intel_display_error_state *error)
17110 {
17111         struct drm_i915_private *dev_priv = m->i915;
17112         int i;
17113
17114         if (!error)
17115                 return;
17116
17117         err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev_priv)->num_pipes);
17118         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
17119                 err_printf(m, "PWR_WELL_CTL2: %08x\n",
17120                            error->power_well_driver);
17121         for_each_pipe(dev_priv, i) {
17122                 err_printf(m, "Pipe [%d]:\n", i);
17123                 err_printf(m, "  Power: %s\n",
17124                            onoff(error->pipe[i].power_domain_on));
17125                 err_printf(m, "  SRC: %08x\n", error->pipe[i].source);
17126                 err_printf(m, "  STAT: %08x\n", error->pipe[i].stat);
17127
17128                 err_printf(m, "Plane [%d]:\n", i);
17129                 err_printf(m, "  CNTR: %08x\n", error->plane[i].control);
17130                 err_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
17131                 if (INTEL_GEN(dev_priv) <= 3) {
17132                         err_printf(m, "  SIZE: %08x\n", error->plane[i].size);
17133                         err_printf(m, "  POS: %08x\n", error->plane[i].pos);
17134                 }
17135                 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
17136                         err_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
17137                 if (INTEL_GEN(dev_priv) >= 4) {
17138                         err_printf(m, "  SURF: %08x\n", error->plane[i].surface);
17139                         err_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
17140                 }
17141
17142                 err_printf(m, "Cursor [%d]:\n", i);
17143                 err_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
17144                 err_printf(m, "  POS: %08x\n", error->cursor[i].position);
17145                 err_printf(m, "  BASE: %08x\n", error->cursor[i].base);
17146         }
17147
17148         for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
17149                 if (!error->transcoder[i].available)
17150                         continue;
17151
17152                 err_printf(m, "CPU transcoder: %s\n",
17153                            transcoder_name(error->transcoder[i].cpu_transcoder));
17154                 err_printf(m, "  Power: %s\n",
17155                            onoff(error->transcoder[i].power_domain_on));
17156                 err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
17157                 err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
17158                 err_printf(m, "  HBLANK: %08x\n", error->transcoder[i].hblank);
17159                 err_printf(m, "  HSYNC: %08x\n", error->transcoder[i].hsync);
17160                 err_printf(m, "  VTOTAL: %08x\n", error->transcoder[i].vtotal);
17161                 err_printf(m, "  VBLANK: %08x\n", error->transcoder[i].vblank);
17162                 err_printf(m, "  VSYNC: %08x\n", error->transcoder[i].vsync);
17163         }
17164 }
17165
17166 #endif