Merge drm/drm-next into drm-intel-next-queued
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / display / intel_dp.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Keith Packard <keithp@keithp.com>
25  *
26  */
27
28 #include <linux/export.h>
29 #include <linux/i2c.h>
30 #include <linux/notifier.h>
31 #include <linux/reboot.h>
32 #include <linux/slab.h>
33 #include <linux/types.h>
34
35 #include <asm/byteorder.h>
36
37 #include <drm/drm_atomic_helper.h>
38 #include <drm/drm_crtc.h>
39 #include <drm/drm_dp_helper.h>
40 #include <drm/drm_edid.h>
41 #include <drm/drm_hdcp.h>
42 #include <drm/drm_probe_helper.h>
43 #include <drm/i915_drm.h>
44
45 #include "i915_debugfs.h"
46 #include "i915_drv.h"
47 #include "i915_trace.h"
48 #include "intel_atomic.h"
49 #include "intel_audio.h"
50 #include "intel_connector.h"
51 #include "intel_ddi.h"
52 #include "intel_display_debugfs.h"
53 #include "intel_display_types.h"
54 #include "intel_dp.h"
55 #include "intel_dp_link_training.h"
56 #include "intel_dp_mst.h"
57 #include "intel_dpio_phy.h"
58 #include "intel_fifo_underrun.h"
59 #include "intel_hdcp.h"
60 #include "intel_hdmi.h"
61 #include "intel_hotplug.h"
62 #include "intel_lspcon.h"
63 #include "intel_lvds.h"
64 #include "intel_panel.h"
65 #include "intel_psr.h"
66 #include "intel_sideband.h"
67 #include "intel_tc.h"
68 #include "intel_vdsc.h"
69
70 #define DP_DPRX_ESI_LEN 14
71
72 /* DP DSC throughput values used for slice count calculations KPixels/s */
73 #define DP_DSC_PEAK_PIXEL_RATE                  2720000
74 #define DP_DSC_MAX_ENC_THROUGHPUT_0             340000
75 #define DP_DSC_MAX_ENC_THROUGHPUT_1             400000
76
77 /* DP DSC FEC Overhead factor = 1/(0.972261) */
78 #define DP_DSC_FEC_OVERHEAD_FACTOR              972261
79
80 /* Compliance test status bits  */
81 #define INTEL_DP_RESOLUTION_SHIFT_MASK  0
82 #define INTEL_DP_RESOLUTION_PREFERRED   (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
83 #define INTEL_DP_RESOLUTION_STANDARD    (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
84 #define INTEL_DP_RESOLUTION_FAILSAFE    (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
85
86 struct dp_link_dpll {
87         int clock;
88         struct dpll dpll;
89 };
90
91 static const struct dp_link_dpll g4x_dpll[] = {
92         { 162000,
93                 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
94         { 270000,
95                 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
96 };
97
98 static const struct dp_link_dpll pch_dpll[] = {
99         { 162000,
100                 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
101         { 270000,
102                 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
103 };
104
105 static const struct dp_link_dpll vlv_dpll[] = {
106         { 162000,
107                 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
108         { 270000,
109                 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
110 };
111
112 /*
113  * CHV supports eDP 1.4 that have  more link rates.
114  * Below only provides the fixed rate but exclude variable rate.
115  */
116 static const struct dp_link_dpll chv_dpll[] = {
117         /*
118          * CHV requires to program fractional division for m2.
119          * m2 is stored in fixed point format using formula below
120          * (m2_int << 22) | m2_fraction
121          */
122         { 162000,       /* m2_int = 32, m2_fraction = 1677722 */
123                 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
124         { 270000,       /* m2_int = 27, m2_fraction = 0 */
125                 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
126 };
127
128 /* Constants for DP DSC configurations */
129 static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15};
130
131 /* With Single pipe configuration, HW is capable of supporting maximum
132  * of 4 slices per line.
133  */
134 static const u8 valid_dsc_slicecount[] = {1, 2, 4};
135
136 /**
137  * intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH)
138  * @intel_dp: DP struct
139  *
140  * If a CPU or PCH DP output is attached to an eDP panel, this function
141  * will return true, and false otherwise.
142  */
143 bool intel_dp_is_edp(struct intel_dp *intel_dp)
144 {
145         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
146
147         return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
148 }
149
150 static void intel_dp_link_down(struct intel_encoder *encoder,
151                                const struct intel_crtc_state *old_crtc_state);
152 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
153 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
154 static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
155                                            const struct intel_crtc_state *crtc_state);
156 static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
157                                       enum pipe pipe);
158 static void intel_dp_unset_edid(struct intel_dp *intel_dp);
159
160 /* update sink rates from dpcd */
161 static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
162 {
163         static const int dp_rates[] = {
164                 162000, 270000, 540000, 810000
165         };
166         int i, max_rate;
167
168         max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]);
169
170         for (i = 0; i < ARRAY_SIZE(dp_rates); i++) {
171                 if (dp_rates[i] > max_rate)
172                         break;
173                 intel_dp->sink_rates[i] = dp_rates[i];
174         }
175
176         intel_dp->num_sink_rates = i;
177 }
178
179 /* Get length of rates array potentially limited by max_rate. */
180 static int intel_dp_rate_limit_len(const int *rates, int len, int max_rate)
181 {
182         int i;
183
184         /* Limit results by potentially reduced max rate */
185         for (i = 0; i < len; i++) {
186                 if (rates[len - i - 1] <= max_rate)
187                         return len - i;
188         }
189
190         return 0;
191 }
192
193 /* Get length of common rates array potentially limited by max_rate. */
194 static int intel_dp_common_len_rate_limit(const struct intel_dp *intel_dp,
195                                           int max_rate)
196 {
197         return intel_dp_rate_limit_len(intel_dp->common_rates,
198                                        intel_dp->num_common_rates, max_rate);
199 }
200
201 /* Theoretical max between source and sink */
202 static int intel_dp_max_common_rate(struct intel_dp *intel_dp)
203 {
204         return intel_dp->common_rates[intel_dp->num_common_rates - 1];
205 }
206
207 /* Theoretical max between source and sink */
208 static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp)
209 {
210         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
211         int source_max = intel_dig_port->max_lanes;
212         int sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
213         int fia_max = intel_tc_port_fia_max_lane_count(intel_dig_port);
214
215         return min3(source_max, sink_max, fia_max);
216 }
217
218 int intel_dp_max_lane_count(struct intel_dp *intel_dp)
219 {
220         return intel_dp->max_link_lane_count;
221 }
222
223 int
224 intel_dp_link_required(int pixel_clock, int bpp)
225 {
226         /* pixel_clock is in kHz, divide bpp by 8 for bit to Byte conversion */
227         return DIV_ROUND_UP(pixel_clock * bpp, 8);
228 }
229
230 int
231 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
232 {
233         /* max_link_clock is the link symbol clock (LS_Clk) in kHz and not the
234          * link rate that is generally expressed in Gbps. Since, 8 bits of data
235          * is transmitted every LS_Clk per lane, there is no need to account for
236          * the channel encoding that is done in the PHY layer here.
237          */
238
239         return max_link_clock * max_lanes;
240 }
241
242 static int
243 intel_dp_downstream_max_dotclock(struct intel_dp *intel_dp)
244 {
245         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
246         struct intel_encoder *encoder = &intel_dig_port->base;
247         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
248         int max_dotclk = dev_priv->max_dotclk_freq;
249         int ds_max_dotclk;
250
251         int type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
252
253         if (type != DP_DS_PORT_TYPE_VGA)
254                 return max_dotclk;
255
256         ds_max_dotclk = drm_dp_downstream_max_clock(intel_dp->dpcd,
257                                                     intel_dp->downstream_ports);
258
259         if (ds_max_dotclk != 0)
260                 max_dotclk = min(max_dotclk, ds_max_dotclk);
261
262         return max_dotclk;
263 }
264
265 static int cnl_max_source_rate(struct intel_dp *intel_dp)
266 {
267         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
268         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
269         enum port port = dig_port->base.port;
270
271         u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
272
273         /* Low voltage SKUs are limited to max of 5.4G */
274         if (voltage == VOLTAGE_INFO_0_85V)
275                 return 540000;
276
277         /* For this SKU 8.1G is supported in all ports */
278         if (IS_CNL_WITH_PORT_F(dev_priv))
279                 return 810000;
280
281         /* For other SKUs, max rate on ports A and D is 5.4G */
282         if (port == PORT_A || port == PORT_D)
283                 return 540000;
284
285         return 810000;
286 }
287
288 static int icl_max_source_rate(struct intel_dp *intel_dp)
289 {
290         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
291         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
292         enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
293
294         if (intel_phy_is_combo(dev_priv, phy) &&
295             !IS_ELKHARTLAKE(dev_priv) &&
296             !intel_dp_is_edp(intel_dp))
297                 return 540000;
298
299         return 810000;
300 }
301
302 static void
303 intel_dp_set_source_rates(struct intel_dp *intel_dp)
304 {
305         /* The values must be in increasing order */
306         static const int cnl_rates[] = {
307                 162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000
308         };
309         static const int bxt_rates[] = {
310                 162000, 216000, 243000, 270000, 324000, 432000, 540000
311         };
312         static const int skl_rates[] = {
313                 162000, 216000, 270000, 324000, 432000, 540000
314         };
315         static const int hsw_rates[] = {
316                 162000, 270000, 540000
317         };
318         static const int g4x_rates[] = {
319                 162000, 270000
320         };
321         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
322         struct intel_encoder *encoder = &dig_port->base;
323         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
324         const int *source_rates;
325         int size, max_rate = 0, vbt_max_rate;
326
327         /* This should only be done once */
328         drm_WARN_ON(&dev_priv->drm,
329                     intel_dp->source_rates || intel_dp->num_source_rates);
330
331         if (INTEL_GEN(dev_priv) >= 10) {
332                 source_rates = cnl_rates;
333                 size = ARRAY_SIZE(cnl_rates);
334                 if (IS_GEN(dev_priv, 10))
335                         max_rate = cnl_max_source_rate(intel_dp);
336                 else
337                         max_rate = icl_max_source_rate(intel_dp);
338         } else if (IS_GEN9_LP(dev_priv)) {
339                 source_rates = bxt_rates;
340                 size = ARRAY_SIZE(bxt_rates);
341         } else if (IS_GEN9_BC(dev_priv)) {
342                 source_rates = skl_rates;
343                 size = ARRAY_SIZE(skl_rates);
344         } else if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) ||
345                    IS_BROADWELL(dev_priv)) {
346                 source_rates = hsw_rates;
347                 size = ARRAY_SIZE(hsw_rates);
348         } else {
349                 source_rates = g4x_rates;
350                 size = ARRAY_SIZE(g4x_rates);
351         }
352
353         vbt_max_rate = intel_bios_dp_max_link_rate(encoder);
354         if (max_rate && vbt_max_rate)
355                 max_rate = min(max_rate, vbt_max_rate);
356         else if (vbt_max_rate)
357                 max_rate = vbt_max_rate;
358
359         if (max_rate)
360                 size = intel_dp_rate_limit_len(source_rates, size, max_rate);
361
362         intel_dp->source_rates = source_rates;
363         intel_dp->num_source_rates = size;
364 }
365
366 static int intersect_rates(const int *source_rates, int source_len,
367                            const int *sink_rates, int sink_len,
368                            int *common_rates)
369 {
370         int i = 0, j = 0, k = 0;
371
372         while (i < source_len && j < sink_len) {
373                 if (source_rates[i] == sink_rates[j]) {
374                         if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
375                                 return k;
376                         common_rates[k] = source_rates[i];
377                         ++k;
378                         ++i;
379                         ++j;
380                 } else if (source_rates[i] < sink_rates[j]) {
381                         ++i;
382                 } else {
383                         ++j;
384                 }
385         }
386         return k;
387 }
388
389 /* return index of rate in rates array, or -1 if not found */
390 static int intel_dp_rate_index(const int *rates, int len, int rate)
391 {
392         int i;
393
394         for (i = 0; i < len; i++)
395                 if (rate == rates[i])
396                         return i;
397
398         return -1;
399 }
400
401 static void intel_dp_set_common_rates(struct intel_dp *intel_dp)
402 {
403         WARN_ON(!intel_dp->num_source_rates || !intel_dp->num_sink_rates);
404
405         intel_dp->num_common_rates = intersect_rates(intel_dp->source_rates,
406                                                      intel_dp->num_source_rates,
407                                                      intel_dp->sink_rates,
408                                                      intel_dp->num_sink_rates,
409                                                      intel_dp->common_rates);
410
411         /* Paranoia, there should always be something in common. */
412         if (WARN_ON(intel_dp->num_common_rates == 0)) {
413                 intel_dp->common_rates[0] = 162000;
414                 intel_dp->num_common_rates = 1;
415         }
416 }
417
418 static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate,
419                                        u8 lane_count)
420 {
421         /*
422          * FIXME: we need to synchronize the current link parameters with
423          * hardware readout. Currently fast link training doesn't work on
424          * boot-up.
425          */
426         if (link_rate == 0 ||
427             link_rate > intel_dp->max_link_rate)
428                 return false;
429
430         if (lane_count == 0 ||
431             lane_count > intel_dp_max_lane_count(intel_dp))
432                 return false;
433
434         return true;
435 }
436
437 static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp,
438                                                      int link_rate,
439                                                      u8 lane_count)
440 {
441         const struct drm_display_mode *fixed_mode =
442                 intel_dp->attached_connector->panel.fixed_mode;
443         int mode_rate, max_rate;
444
445         mode_rate = intel_dp_link_required(fixed_mode->clock, 18);
446         max_rate = intel_dp_max_data_rate(link_rate, lane_count);
447         if (mode_rate > max_rate)
448                 return false;
449
450         return true;
451 }
452
453 int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
454                                             int link_rate, u8 lane_count)
455 {
456         int index;
457
458         index = intel_dp_rate_index(intel_dp->common_rates,
459                                     intel_dp->num_common_rates,
460                                     link_rate);
461         if (index > 0) {
462                 if (intel_dp_is_edp(intel_dp) &&
463                     !intel_dp_can_link_train_fallback_for_edp(intel_dp,
464                                                               intel_dp->common_rates[index - 1],
465                                                               lane_count)) {
466                         DRM_DEBUG_KMS("Retrying Link training for eDP with same parameters\n");
467                         return 0;
468                 }
469                 intel_dp->max_link_rate = intel_dp->common_rates[index - 1];
470                 intel_dp->max_link_lane_count = lane_count;
471         } else if (lane_count > 1) {
472                 if (intel_dp_is_edp(intel_dp) &&
473                     !intel_dp_can_link_train_fallback_for_edp(intel_dp,
474                                                               intel_dp_max_common_rate(intel_dp),
475                                                               lane_count >> 1)) {
476                         DRM_DEBUG_KMS("Retrying Link training for eDP with same parameters\n");
477                         return 0;
478                 }
479                 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
480                 intel_dp->max_link_lane_count = lane_count >> 1;
481         } else {
482                 DRM_ERROR("Link Training Unsuccessful\n");
483                 return -1;
484         }
485
486         return 0;
487 }
488
489 u32 intel_dp_mode_to_fec_clock(u32 mode_clock)
490 {
491         return div_u64(mul_u32_u32(mode_clock, 1000000U),
492                        DP_DSC_FEC_OVERHEAD_FACTOR);
493 }
494
495 static int
496 small_joiner_ram_size_bits(struct drm_i915_private *i915)
497 {
498         if (INTEL_GEN(i915) >= 11)
499                 return 7680 * 8;
500         else
501                 return 6144 * 8;
502 }
503
504 static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915,
505                                        u32 link_clock, u32 lane_count,
506                                        u32 mode_clock, u32 mode_hdisplay)
507 {
508         u32 bits_per_pixel, max_bpp_small_joiner_ram;
509         int i;
510
511         /*
512          * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)*
513          * (LinkSymbolClock)* 8 * (TimeSlotsPerMTP)
514          * for SST -> TimeSlotsPerMTP is 1,
515          * for MST -> TimeSlotsPerMTP has to be calculated
516          */
517         bits_per_pixel = (link_clock * lane_count * 8) /
518                          intel_dp_mode_to_fec_clock(mode_clock);
519         drm_dbg_kms(&i915->drm, "Max link bpp: %u\n", bits_per_pixel);
520
521         /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */
522         max_bpp_small_joiner_ram = small_joiner_ram_size_bits(i915) /
523                 mode_hdisplay;
524         drm_dbg_kms(&i915->drm, "Max small joiner bpp: %u\n",
525                     max_bpp_small_joiner_ram);
526
527         /*
528          * Greatest allowed DSC BPP = MIN (output BPP from available Link BW
529          * check, output bpp from small joiner RAM check)
530          */
531         bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram);
532
533         /* Error out if the max bpp is less than smallest allowed valid bpp */
534         if (bits_per_pixel < valid_dsc_bpp[0]) {
535                 drm_dbg_kms(&i915->drm, "Unsupported BPP %u, min %u\n",
536                             bits_per_pixel, valid_dsc_bpp[0]);
537                 return 0;
538         }
539
540         /* Find the nearest match in the array of known BPPs from VESA */
541         for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) {
542                 if (bits_per_pixel < valid_dsc_bpp[i + 1])
543                         break;
544         }
545         bits_per_pixel = valid_dsc_bpp[i];
546
547         /*
548          * Compressed BPP in U6.4 format so multiply by 16, for Gen 11,
549          * fractional part is 0
550          */
551         return bits_per_pixel << 4;
552 }
553
554 static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
555                                        int mode_clock, int mode_hdisplay)
556 {
557         u8 min_slice_count, i;
558         int max_slice_width;
559
560         if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE)
561                 min_slice_count = DIV_ROUND_UP(mode_clock,
562                                                DP_DSC_MAX_ENC_THROUGHPUT_0);
563         else
564                 min_slice_count = DIV_ROUND_UP(mode_clock,
565                                                DP_DSC_MAX_ENC_THROUGHPUT_1);
566
567         max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd);
568         if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) {
569                 DRM_DEBUG_KMS("Unsupported slice width %d by DP DSC Sink device\n",
570                               max_slice_width);
571                 return 0;
572         }
573         /* Also take into account max slice width */
574         min_slice_count = min_t(u8, min_slice_count,
575                                 DIV_ROUND_UP(mode_hdisplay,
576                                              max_slice_width));
577
578         /* Find the closest match to the valid slice count values */
579         for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) {
580                 if (valid_dsc_slicecount[i] >
581                     drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
582                                                     false))
583                         break;
584                 if (min_slice_count  <= valid_dsc_slicecount[i])
585                         return valid_dsc_slicecount[i];
586         }
587
588         DRM_DEBUG_KMS("Unsupported Slice Count %d\n", min_slice_count);
589         return 0;
590 }
591
592 static bool intel_dp_hdisplay_bad(struct drm_i915_private *dev_priv,
593                                   int hdisplay)
594 {
595         /*
596          * Older platforms don't like hdisplay==4096 with DP.
597          *
598          * On ILK/SNB/IVB the pipe seems to be somewhat running (scanline
599          * and frame counter increment), but we don't get vblank interrupts,
600          * and the pipe underruns immediately. The link also doesn't seem
601          * to get trained properly.
602          *
603          * On CHV the vblank interrupts don't seem to disappear but
604          * otherwise the symptoms are similar.
605          *
606          * TODO: confirm the behaviour on HSW+
607          */
608         return hdisplay == 4096 && !HAS_DDI(dev_priv);
609 }
610
611 static enum drm_mode_status
612 intel_dp_mode_valid(struct drm_connector *connector,
613                     struct drm_display_mode *mode)
614 {
615         struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
616         struct intel_connector *intel_connector = to_intel_connector(connector);
617         struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
618         struct drm_i915_private *dev_priv = to_i915(connector->dev);
619         int target_clock = mode->clock;
620         int max_rate, mode_rate, max_lanes, max_link_clock;
621         int max_dotclk;
622         u16 dsc_max_output_bpp = 0;
623         u8 dsc_slice_count = 0;
624
625         if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
626                 return MODE_NO_DBLESCAN;
627
628         max_dotclk = intel_dp_downstream_max_dotclock(intel_dp);
629
630         if (intel_dp_is_edp(intel_dp) && fixed_mode) {
631                 if (mode->hdisplay > fixed_mode->hdisplay)
632                         return MODE_PANEL;
633
634                 if (mode->vdisplay > fixed_mode->vdisplay)
635                         return MODE_PANEL;
636
637                 target_clock = fixed_mode->clock;
638         }
639
640         max_link_clock = intel_dp_max_link_rate(intel_dp);
641         max_lanes = intel_dp_max_lane_count(intel_dp);
642
643         max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
644         mode_rate = intel_dp_link_required(target_clock, 18);
645
646         if (intel_dp_hdisplay_bad(dev_priv, mode->hdisplay))
647                 return MODE_H_ILLEGAL;
648
649         /*
650          * Output bpp is stored in 6.4 format so right shift by 4 to get the
651          * integer value since we support only integer values of bpp.
652          */
653         if ((INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) &&
654             drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)) {
655                 if (intel_dp_is_edp(intel_dp)) {
656                         dsc_max_output_bpp =
657                                 drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4;
658                         dsc_slice_count =
659                                 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
660                                                                 true);
661                 } else if (drm_dp_sink_supports_fec(intel_dp->fec_capable)) {
662                         dsc_max_output_bpp =
663                                 intel_dp_dsc_get_output_bpp(dev_priv,
664                                                             max_link_clock,
665                                                             max_lanes,
666                                                             target_clock,
667                                                             mode->hdisplay) >> 4;
668                         dsc_slice_count =
669                                 intel_dp_dsc_get_slice_count(intel_dp,
670                                                              target_clock,
671                                                              mode->hdisplay);
672                 }
673         }
674
675         if ((mode_rate > max_rate && !(dsc_max_output_bpp && dsc_slice_count)) ||
676             target_clock > max_dotclk)
677                 return MODE_CLOCK_HIGH;
678
679         if (mode->clock < 10000)
680                 return MODE_CLOCK_LOW;
681
682         if (mode->flags & DRM_MODE_FLAG_DBLCLK)
683                 return MODE_H_ILLEGAL;
684
685         return intel_mode_valid_max_plane_size(dev_priv, mode);
686 }
687
688 u32 intel_dp_pack_aux(const u8 *src, int src_bytes)
689 {
690         int i;
691         u32 v = 0;
692
693         if (src_bytes > 4)
694                 src_bytes = 4;
695         for (i = 0; i < src_bytes; i++)
696                 v |= ((u32)src[i]) << ((3 - i) * 8);
697         return v;
698 }
699
700 static void intel_dp_unpack_aux(u32 src, u8 *dst, int dst_bytes)
701 {
702         int i;
703         if (dst_bytes > 4)
704                 dst_bytes = 4;
705         for (i = 0; i < dst_bytes; i++)
706                 dst[i] = src >> ((3-i) * 8);
707 }
708
709 static void
710 intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp);
711 static void
712 intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
713                                               bool force_disable_vdd);
714 static void
715 intel_dp_pps_init(struct intel_dp *intel_dp);
716
717 static intel_wakeref_t
718 pps_lock(struct intel_dp *intel_dp)
719 {
720         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
721         intel_wakeref_t wakeref;
722
723         /*
724          * See intel_power_sequencer_reset() why we need
725          * a power domain reference here.
726          */
727         wakeref = intel_display_power_get(dev_priv,
728                                           intel_aux_power_domain(dp_to_dig_port(intel_dp)));
729
730         mutex_lock(&dev_priv->pps_mutex);
731
732         return wakeref;
733 }
734
735 static intel_wakeref_t
736 pps_unlock(struct intel_dp *intel_dp, intel_wakeref_t wakeref)
737 {
738         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
739
740         mutex_unlock(&dev_priv->pps_mutex);
741         intel_display_power_put(dev_priv,
742                                 intel_aux_power_domain(dp_to_dig_port(intel_dp)),
743                                 wakeref);
744         return 0;
745 }
746
747 #define with_pps_lock(dp, wf) \
748         for ((wf) = pps_lock(dp); (wf); (wf) = pps_unlock((dp), (wf)))
749
750 static void
751 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
752 {
753         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
754         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
755         enum pipe pipe = intel_dp->pps_pipe;
756         bool pll_enabled, release_cl_override = false;
757         enum dpio_phy phy = DPIO_PHY(pipe);
758         enum dpio_channel ch = vlv_pipe_to_channel(pipe);
759         u32 DP;
760
761         if (drm_WARN(&dev_priv->drm,
762                      intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN,
763                      "skipping pipe %c power sequencer kick due to [ENCODER:%d:%s] being active\n",
764                      pipe_name(pipe), intel_dig_port->base.base.base.id,
765                      intel_dig_port->base.base.name))
766                 return;
767
768         drm_dbg_kms(&dev_priv->drm,
769                     "kicking pipe %c power sequencer for [ENCODER:%d:%s]\n",
770                     pipe_name(pipe), intel_dig_port->base.base.base.id,
771                     intel_dig_port->base.base.name);
772
773         /* Preserve the BIOS-computed detected bit. This is
774          * supposed to be read-only.
775          */
776         DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED;
777         DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
778         DP |= DP_PORT_WIDTH(1);
779         DP |= DP_LINK_TRAIN_PAT_1;
780
781         if (IS_CHERRYVIEW(dev_priv))
782                 DP |= DP_PIPE_SEL_CHV(pipe);
783         else
784                 DP |= DP_PIPE_SEL(pipe);
785
786         pll_enabled = intel_de_read(dev_priv, DPLL(pipe)) & DPLL_VCO_ENABLE;
787
788         /*
789          * The DPLL for the pipe must be enabled for this to work.
790          * So enable temporarily it if it's not already enabled.
791          */
792         if (!pll_enabled) {
793                 release_cl_override = IS_CHERRYVIEW(dev_priv) &&
794                         !chv_phy_powergate_ch(dev_priv, phy, ch, true);
795
796                 if (vlv_force_pll_on(dev_priv, pipe, IS_CHERRYVIEW(dev_priv) ?
797                                      &chv_dpll[0].dpll : &vlv_dpll[0].dpll)) {
798                         drm_err(&dev_priv->drm,
799                                 "Failed to force on pll for pipe %c!\n",
800                                 pipe_name(pipe));
801                         return;
802                 }
803         }
804
805         /*
806          * Similar magic as in intel_dp_enable_port().
807          * We _must_ do this port enable + disable trick
808          * to make this power sequencer lock onto the port.
809          * Otherwise even VDD force bit won't work.
810          */
811         intel_de_write(dev_priv, intel_dp->output_reg, DP);
812         intel_de_posting_read(dev_priv, intel_dp->output_reg);
813
814         intel_de_write(dev_priv, intel_dp->output_reg, DP | DP_PORT_EN);
815         intel_de_posting_read(dev_priv, intel_dp->output_reg);
816
817         intel_de_write(dev_priv, intel_dp->output_reg, DP & ~DP_PORT_EN);
818         intel_de_posting_read(dev_priv, intel_dp->output_reg);
819
820         if (!pll_enabled) {
821                 vlv_force_pll_off(dev_priv, pipe);
822
823                 if (release_cl_override)
824                         chv_phy_powergate_ch(dev_priv, phy, ch, false);
825         }
826 }
827
828 static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
829 {
830         struct intel_encoder *encoder;
831         unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
832
833         /*
834          * We don't have power sequencer currently.
835          * Pick one that's not used by other ports.
836          */
837         for_each_intel_dp(&dev_priv->drm, encoder) {
838                 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
839
840                 if (encoder->type == INTEL_OUTPUT_EDP) {
841                         drm_WARN_ON(&dev_priv->drm,
842                                     intel_dp->active_pipe != INVALID_PIPE &&
843                                     intel_dp->active_pipe !=
844                                     intel_dp->pps_pipe);
845
846                         if (intel_dp->pps_pipe != INVALID_PIPE)
847                                 pipes &= ~(1 << intel_dp->pps_pipe);
848                 } else {
849                         drm_WARN_ON(&dev_priv->drm,
850                                     intel_dp->pps_pipe != INVALID_PIPE);
851
852                         if (intel_dp->active_pipe != INVALID_PIPE)
853                                 pipes &= ~(1 << intel_dp->active_pipe);
854                 }
855         }
856
857         if (pipes == 0)
858                 return INVALID_PIPE;
859
860         return ffs(pipes) - 1;
861 }
862
863 static enum pipe
864 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
865 {
866         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
867         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
868         enum pipe pipe;
869
870         lockdep_assert_held(&dev_priv->pps_mutex);
871
872         /* We should never land here with regular DP ports */
873         drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
874
875         drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE &&
876                     intel_dp->active_pipe != intel_dp->pps_pipe);
877
878         if (intel_dp->pps_pipe != INVALID_PIPE)
879                 return intel_dp->pps_pipe;
880
881         pipe = vlv_find_free_pps(dev_priv);
882
883         /*
884          * Didn't find one. This should not happen since there
885          * are two power sequencers and up to two eDP ports.
886          */
887         if (drm_WARN_ON(&dev_priv->drm, pipe == INVALID_PIPE))
888                 pipe = PIPE_A;
889
890         vlv_steal_power_sequencer(dev_priv, pipe);
891         intel_dp->pps_pipe = pipe;
892
893         drm_dbg_kms(&dev_priv->drm,
894                     "picked pipe %c power sequencer for [ENCODER:%d:%s]\n",
895                     pipe_name(intel_dp->pps_pipe),
896                     intel_dig_port->base.base.base.id,
897                     intel_dig_port->base.base.name);
898
899         /* init power sequencer on this pipe and port */
900         intel_dp_init_panel_power_sequencer(intel_dp);
901         intel_dp_init_panel_power_sequencer_registers(intel_dp, true);
902
903         /*
904          * Even vdd force doesn't work until we've made
905          * the power sequencer lock in on the port.
906          */
907         vlv_power_sequencer_kick(intel_dp);
908
909         return intel_dp->pps_pipe;
910 }
911
912 static int
913 bxt_power_sequencer_idx(struct intel_dp *intel_dp)
914 {
915         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
916         int backlight_controller = dev_priv->vbt.backlight.controller;
917
918         lockdep_assert_held(&dev_priv->pps_mutex);
919
920         /* We should never land here with regular DP ports */
921         drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
922
923         if (!intel_dp->pps_reset)
924                 return backlight_controller;
925
926         intel_dp->pps_reset = false;
927
928         /*
929          * Only the HW needs to be reprogrammed, the SW state is fixed and
930          * has been setup during connector init.
931          */
932         intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
933
934         return backlight_controller;
935 }
936
937 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
938                                enum pipe pipe);
939
940 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
941                                enum pipe pipe)
942 {
943         return intel_de_read(dev_priv, PP_STATUS(pipe)) & PP_ON;
944 }
945
946 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
947                                 enum pipe pipe)
948 {
949         return intel_de_read(dev_priv, PP_CONTROL(pipe)) & EDP_FORCE_VDD;
950 }
951
952 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
953                          enum pipe pipe)
954 {
955         return true;
956 }
957
958 static enum pipe
959 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
960                      enum port port,
961                      vlv_pipe_check pipe_check)
962 {
963         enum pipe pipe;
964
965         for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
966                 u32 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(pipe)) &
967                         PANEL_PORT_SELECT_MASK;
968
969                 if (port_sel != PANEL_PORT_SELECT_VLV(port))
970                         continue;
971
972                 if (!pipe_check(dev_priv, pipe))
973                         continue;
974
975                 return pipe;
976         }
977
978         return INVALID_PIPE;
979 }
980
981 static void
982 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
983 {
984         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
985         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
986         enum port port = intel_dig_port->base.port;
987
988         lockdep_assert_held(&dev_priv->pps_mutex);
989
990         /* try to find a pipe with this port selected */
991         /* first pick one where the panel is on */
992         intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
993                                                   vlv_pipe_has_pp_on);
994         /* didn't find one? pick one where vdd is on */
995         if (intel_dp->pps_pipe == INVALID_PIPE)
996                 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
997                                                           vlv_pipe_has_vdd_on);
998         /* didn't find one? pick one with just the correct port */
999         if (intel_dp->pps_pipe == INVALID_PIPE)
1000                 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
1001                                                           vlv_pipe_any);
1002
1003         /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
1004         if (intel_dp->pps_pipe == INVALID_PIPE) {
1005                 drm_dbg_kms(&dev_priv->drm,
1006                             "no initial power sequencer for [ENCODER:%d:%s]\n",
1007                             intel_dig_port->base.base.base.id,
1008                             intel_dig_port->base.base.name);
1009                 return;
1010         }
1011
1012         drm_dbg_kms(&dev_priv->drm,
1013                     "initial power sequencer for [ENCODER:%d:%s]: pipe %c\n",
1014                     intel_dig_port->base.base.base.id,
1015                     intel_dig_port->base.base.name,
1016                     pipe_name(intel_dp->pps_pipe));
1017
1018         intel_dp_init_panel_power_sequencer(intel_dp);
1019         intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
1020 }
1021
1022 void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
1023 {
1024         struct intel_encoder *encoder;
1025
1026         if (drm_WARN_ON(&dev_priv->drm,
1027                         !(IS_VALLEYVIEW(dev_priv) ||
1028                           IS_CHERRYVIEW(dev_priv) ||
1029                           IS_GEN9_LP(dev_priv))))
1030                 return;
1031
1032         /*
1033          * We can't grab pps_mutex here due to deadlock with power_domain
1034          * mutex when power_domain functions are called while holding pps_mutex.
1035          * That also means that in order to use pps_pipe the code needs to
1036          * hold both a power domain reference and pps_mutex, and the power domain
1037          * reference get/put must be done while _not_ holding pps_mutex.
1038          * pps_{lock,unlock}() do these steps in the correct order, so one
1039          * should use them always.
1040          */
1041
1042         for_each_intel_dp(&dev_priv->drm, encoder) {
1043                 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1044
1045                 drm_WARN_ON(&dev_priv->drm,
1046                             intel_dp->active_pipe != INVALID_PIPE);
1047
1048                 if (encoder->type != INTEL_OUTPUT_EDP)
1049                         continue;
1050
1051                 if (IS_GEN9_LP(dev_priv))
1052                         intel_dp->pps_reset = true;
1053                 else
1054                         intel_dp->pps_pipe = INVALID_PIPE;
1055         }
1056 }
1057
1058 struct pps_registers {
1059         i915_reg_t pp_ctrl;
1060         i915_reg_t pp_stat;
1061         i915_reg_t pp_on;
1062         i915_reg_t pp_off;
1063         i915_reg_t pp_div;
1064 };
1065
1066 static void intel_pps_get_registers(struct intel_dp *intel_dp,
1067                                     struct pps_registers *regs)
1068 {
1069         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1070         int pps_idx = 0;
1071
1072         memset(regs, 0, sizeof(*regs));
1073
1074         if (IS_GEN9_LP(dev_priv))
1075                 pps_idx = bxt_power_sequencer_idx(intel_dp);
1076         else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1077                 pps_idx = vlv_power_sequencer_pipe(intel_dp);
1078
1079         regs->pp_ctrl = PP_CONTROL(pps_idx);
1080         regs->pp_stat = PP_STATUS(pps_idx);
1081         regs->pp_on = PP_ON_DELAYS(pps_idx);
1082         regs->pp_off = PP_OFF_DELAYS(pps_idx);
1083
1084         /* Cycle delay moved from PP_DIVISOR to PP_CONTROL */
1085         if (IS_GEN9_LP(dev_priv) || INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
1086                 regs->pp_div = INVALID_MMIO_REG;
1087         else
1088                 regs->pp_div = PP_DIVISOR(pps_idx);
1089 }
1090
1091 static i915_reg_t
1092 _pp_ctrl_reg(struct intel_dp *intel_dp)
1093 {
1094         struct pps_registers regs;
1095
1096         intel_pps_get_registers(intel_dp, &regs);
1097
1098         return regs.pp_ctrl;
1099 }
1100
1101 static i915_reg_t
1102 _pp_stat_reg(struct intel_dp *intel_dp)
1103 {
1104         struct pps_registers regs;
1105
1106         intel_pps_get_registers(intel_dp, &regs);
1107
1108         return regs.pp_stat;
1109 }
1110
1111 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
1112    This function only applicable when panel PM state is not to be tracked */
1113 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
1114                               void *unused)
1115 {
1116         struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
1117                                                  edp_notifier);
1118         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1119         intel_wakeref_t wakeref;
1120
1121         if (!intel_dp_is_edp(intel_dp) || code != SYS_RESTART)
1122                 return 0;
1123
1124         with_pps_lock(intel_dp, wakeref) {
1125                 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1126                         enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
1127                         i915_reg_t pp_ctrl_reg, pp_div_reg;
1128                         u32 pp_div;
1129
1130                         pp_ctrl_reg = PP_CONTROL(pipe);
1131                         pp_div_reg  = PP_DIVISOR(pipe);
1132                         pp_div = intel_de_read(dev_priv, pp_div_reg);
1133                         pp_div &= PP_REFERENCE_DIVIDER_MASK;
1134
1135                         /* 0x1F write to PP_DIV_REG sets max cycle delay */
1136                         intel_de_write(dev_priv, pp_div_reg, pp_div | 0x1F);
1137                         intel_de_write(dev_priv, pp_ctrl_reg,
1138                                        PANEL_UNLOCK_REGS);
1139                         msleep(intel_dp->panel_power_cycle_delay);
1140                 }
1141         }
1142
1143         return 0;
1144 }
1145
1146 static bool edp_have_panel_power(struct intel_dp *intel_dp)
1147 {
1148         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1149
1150         lockdep_assert_held(&dev_priv->pps_mutex);
1151
1152         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
1153             intel_dp->pps_pipe == INVALID_PIPE)
1154                 return false;
1155
1156         return (intel_de_read(dev_priv, _pp_stat_reg(intel_dp)) & PP_ON) != 0;
1157 }
1158
1159 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
1160 {
1161         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1162
1163         lockdep_assert_held(&dev_priv->pps_mutex);
1164
1165         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
1166             intel_dp->pps_pipe == INVALID_PIPE)
1167                 return false;
1168
1169         return intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
1170 }
1171
1172 static void
1173 intel_dp_check_edp(struct intel_dp *intel_dp)
1174 {
1175         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1176
1177         if (!intel_dp_is_edp(intel_dp))
1178                 return;
1179
1180         if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
1181                 drm_WARN(&dev_priv->drm, 1,
1182                          "eDP powered off while attempting aux channel communication.\n");
1183                 drm_dbg_kms(&dev_priv->drm, "Status 0x%08x Control 0x%08x\n",
1184                             intel_de_read(dev_priv, _pp_stat_reg(intel_dp)),
1185                             intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)));
1186         }
1187 }
1188
1189 static u32
1190 intel_dp_aux_wait_done(struct intel_dp *intel_dp)
1191 {
1192         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1193         i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
1194         const unsigned int timeout_ms = 10;
1195         u32 status;
1196         bool done;
1197
1198 #define C (((status = intel_uncore_read_notrace(&i915->uncore, ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
1199         done = wait_event_timeout(i915->gmbus_wait_queue, C,
1200                                   msecs_to_jiffies_timeout(timeout_ms));
1201
1202         /* just trace the final value */
1203         trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
1204
1205         if (!done)
1206                 drm_err(&i915->drm,
1207                         "%s: did not complete or timeout within %ums (status 0x%08x)\n",
1208                         intel_dp->aux.name, timeout_ms, status);
1209 #undef C
1210
1211         return status;
1212 }
1213
1214 static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1215 {
1216         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1217
1218         if (index)
1219                 return 0;
1220
1221         /*
1222          * The clock divider is based off the hrawclk, and would like to run at
1223          * 2MHz.  So, take the hrawclk value and divide by 2000 and use that
1224          */
1225         return DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq, 2000);
1226 }
1227
1228 static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1229 {
1230         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1231         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1232         u32 freq;
1233
1234         if (index)
1235                 return 0;
1236
1237         /*
1238          * The clock divider is based off the cdclk or PCH rawclk, and would
1239          * like to run at 2MHz.  So, take the cdclk or PCH rawclk value and
1240          * divide by 2000 and use that
1241          */
1242         if (dig_port->aux_ch == AUX_CH_A)
1243                 freq = dev_priv->cdclk.hw.cdclk;
1244         else
1245                 freq = RUNTIME_INFO(dev_priv)->rawclk_freq;
1246         return DIV_ROUND_CLOSEST(freq, 2000);
1247 }
1248
1249 static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1250 {
1251         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1252         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1253
1254         if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) {
1255                 /* Workaround for non-ULT HSW */
1256                 switch (index) {
1257                 case 0: return 63;
1258                 case 1: return 72;
1259                 default: return 0;
1260                 }
1261         }
1262
1263         return ilk_get_aux_clock_divider(intel_dp, index);
1264 }
1265
1266 static u32 skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1267 {
1268         /*
1269          * SKL doesn't need us to program the AUX clock divider (Hardware will
1270          * derive the clock from CDCLK automatically). We still implement the
1271          * get_aux_clock_divider vfunc to plug-in into the existing code.
1272          */
1273         return index ? 0 : 1;
1274 }
1275
1276 static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
1277                                 int send_bytes,
1278                                 u32 aux_clock_divider)
1279 {
1280         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1281         struct drm_i915_private *dev_priv =
1282                         to_i915(intel_dig_port->base.base.dev);
1283         u32 precharge, timeout;
1284
1285         if (IS_GEN(dev_priv, 6))
1286                 precharge = 3;
1287         else
1288                 precharge = 5;
1289
1290         if (IS_BROADWELL(dev_priv))
1291                 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
1292         else
1293                 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
1294
1295         return DP_AUX_CH_CTL_SEND_BUSY |
1296                DP_AUX_CH_CTL_DONE |
1297                DP_AUX_CH_CTL_INTERRUPT |
1298                DP_AUX_CH_CTL_TIME_OUT_ERROR |
1299                timeout |
1300                DP_AUX_CH_CTL_RECEIVE_ERROR |
1301                (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
1302                (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
1303                (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
1304 }
1305
1306 static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
1307                                 int send_bytes,
1308                                 u32 unused)
1309 {
1310         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1311         struct drm_i915_private *i915 =
1312                         to_i915(intel_dig_port->base.base.dev);
1313         enum phy phy = intel_port_to_phy(i915, intel_dig_port->base.port);
1314         u32 ret;
1315
1316         ret = DP_AUX_CH_CTL_SEND_BUSY |
1317               DP_AUX_CH_CTL_DONE |
1318               DP_AUX_CH_CTL_INTERRUPT |
1319               DP_AUX_CH_CTL_TIME_OUT_ERROR |
1320               DP_AUX_CH_CTL_TIME_OUT_MAX |
1321               DP_AUX_CH_CTL_RECEIVE_ERROR |
1322               (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
1323               DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
1324               DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
1325
1326         if (intel_phy_is_tc(i915, phy) &&
1327             intel_dig_port->tc_mode == TC_PORT_TBT_ALT)
1328                 ret |= DP_AUX_CH_CTL_TBT_IO;
1329
1330         return ret;
1331 }
1332
1333 static int
1334 intel_dp_aux_xfer(struct intel_dp *intel_dp,
1335                   const u8 *send, int send_bytes,
1336                   u8 *recv, int recv_size,
1337                   u32 aux_send_ctl_flags)
1338 {
1339         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1340         struct drm_i915_private *i915 =
1341                         to_i915(intel_dig_port->base.base.dev);
1342         struct intel_uncore *uncore = &i915->uncore;
1343         enum phy phy = intel_port_to_phy(i915, intel_dig_port->base.port);
1344         bool is_tc_port = intel_phy_is_tc(i915, phy);
1345         i915_reg_t ch_ctl, ch_data[5];
1346         u32 aux_clock_divider;
1347         enum intel_display_power_domain aux_domain =
1348                 intel_aux_power_domain(intel_dig_port);
1349         intel_wakeref_t aux_wakeref;
1350         intel_wakeref_t pps_wakeref;
1351         int i, ret, recv_bytes;
1352         int try, clock = 0;
1353         u32 status;
1354         bool vdd;
1355
1356         ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
1357         for (i = 0; i < ARRAY_SIZE(ch_data); i++)
1358                 ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i);
1359
1360         if (is_tc_port)
1361                 intel_tc_port_lock(intel_dig_port);
1362
1363         aux_wakeref = intel_display_power_get(i915, aux_domain);
1364         pps_wakeref = pps_lock(intel_dp);
1365
1366         /*
1367          * We will be called with VDD already enabled for dpcd/edid/oui reads.
1368          * In such cases we want to leave VDD enabled and it's up to upper layers
1369          * to turn it off. But for eg. i2c-dev access we need to turn it on/off
1370          * ourselves.
1371          */
1372         vdd = edp_panel_vdd_on(intel_dp);
1373
1374         /* dp aux is extremely sensitive to irq latency, hence request the
1375          * lowest possible wakeup latency and so prevent the cpu from going into
1376          * deep sleep states.
1377          */
1378         pm_qos_update_request(&i915->pm_qos, 0);
1379
1380         intel_dp_check_edp(intel_dp);
1381
1382         /* Try to wait for any previous AUX channel activity */
1383         for (try = 0; try < 3; try++) {
1384                 status = intel_uncore_read_notrace(uncore, ch_ctl);
1385                 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
1386                         break;
1387                 msleep(1);
1388         }
1389         /* just trace the final value */
1390         trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
1391
1392         if (try == 3) {
1393                 const u32 status = intel_uncore_read(uncore, ch_ctl);
1394
1395                 if (status != intel_dp->aux_busy_last_status) {
1396                         drm_WARN(&i915->drm, 1,
1397                                  "%s: not started (status 0x%08x)\n",
1398                                  intel_dp->aux.name, status);
1399                         intel_dp->aux_busy_last_status = status;
1400                 }
1401
1402                 ret = -EBUSY;
1403                 goto out;
1404         }
1405
1406         /* Only 5 data registers! */
1407         if (drm_WARN_ON(&i915->drm, send_bytes > 20 || recv_size > 20)) {
1408                 ret = -E2BIG;
1409                 goto out;
1410         }
1411
1412         while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
1413                 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
1414                                                           send_bytes,
1415                                                           aux_clock_divider);
1416
1417                 send_ctl |= aux_send_ctl_flags;
1418
1419                 /* Must try at least 3 times according to DP spec */
1420                 for (try = 0; try < 5; try++) {
1421                         /* Load the send data into the aux channel data registers */
1422                         for (i = 0; i < send_bytes; i += 4)
1423                                 intel_uncore_write(uncore,
1424                                                    ch_data[i >> 2],
1425                                                    intel_dp_pack_aux(send + i,
1426                                                                      send_bytes - i));
1427
1428                         /* Send the command and wait for it to complete */
1429                         intel_uncore_write(uncore, ch_ctl, send_ctl);
1430
1431                         status = intel_dp_aux_wait_done(intel_dp);
1432
1433                         /* Clear done status and any errors */
1434                         intel_uncore_write(uncore,
1435                                            ch_ctl,
1436                                            status |
1437                                            DP_AUX_CH_CTL_DONE |
1438                                            DP_AUX_CH_CTL_TIME_OUT_ERROR |
1439                                            DP_AUX_CH_CTL_RECEIVE_ERROR);
1440
1441                         /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
1442                          *   400us delay required for errors and timeouts
1443                          *   Timeout errors from the HW already meet this
1444                          *   requirement so skip to next iteration
1445                          */
1446                         if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
1447                                 continue;
1448
1449                         if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1450                                 usleep_range(400, 500);
1451                                 continue;
1452                         }
1453                         if (status & DP_AUX_CH_CTL_DONE)
1454                                 goto done;
1455                 }
1456         }
1457
1458         if ((status & DP_AUX_CH_CTL_DONE) == 0) {
1459                 drm_err(&i915->drm, "%s: not done (status 0x%08x)\n",
1460                         intel_dp->aux.name, status);
1461                 ret = -EBUSY;
1462                 goto out;
1463         }
1464
1465 done:
1466         /* Check for timeout or receive error.
1467          * Timeouts occur when the sink is not connected
1468          */
1469         if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1470                 drm_err(&i915->drm, "%s: receive error (status 0x%08x)\n",
1471                         intel_dp->aux.name, status);
1472                 ret = -EIO;
1473                 goto out;
1474         }
1475
1476         /* Timeouts occur when the device isn't connected, so they're
1477          * "normal" -- don't fill the kernel log with these */
1478         if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
1479                 drm_dbg_kms(&i915->drm, "%s: timeout (status 0x%08x)\n",
1480                             intel_dp->aux.name, status);
1481                 ret = -ETIMEDOUT;
1482                 goto out;
1483         }
1484
1485         /* Unload any bytes sent back from the other side */
1486         recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
1487                       DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
1488
1489         /*
1490          * By BSpec: "Message sizes of 0 or >20 are not allowed."
1491          * We have no idea of what happened so we return -EBUSY so
1492          * drm layer takes care for the necessary retries.
1493          */
1494         if (recv_bytes == 0 || recv_bytes > 20) {
1495                 drm_dbg_kms(&i915->drm,
1496                             "%s: Forbidden recv_bytes = %d on aux transaction\n",
1497                             intel_dp->aux.name, recv_bytes);
1498                 ret = -EBUSY;
1499                 goto out;
1500         }
1501
1502         if (recv_bytes > recv_size)
1503                 recv_bytes = recv_size;
1504
1505         for (i = 0; i < recv_bytes; i += 4)
1506                 intel_dp_unpack_aux(intel_uncore_read(uncore, ch_data[i >> 2]),
1507                                     recv + i, recv_bytes - i);
1508
1509         ret = recv_bytes;
1510 out:
1511         pm_qos_update_request(&i915->pm_qos, PM_QOS_DEFAULT_VALUE);
1512
1513         if (vdd)
1514                 edp_panel_vdd_off(intel_dp, false);
1515
1516         pps_unlock(intel_dp, pps_wakeref);
1517         intel_display_power_put_async(i915, aux_domain, aux_wakeref);
1518
1519         if (is_tc_port)
1520                 intel_tc_port_unlock(intel_dig_port);
1521
1522         return ret;
1523 }
1524
1525 #define BARE_ADDRESS_SIZE       3
1526 #define HEADER_SIZE             (BARE_ADDRESS_SIZE + 1)
1527
1528 static void
1529 intel_dp_aux_header(u8 txbuf[HEADER_SIZE],
1530                     const struct drm_dp_aux_msg *msg)
1531 {
1532         txbuf[0] = (msg->request << 4) | ((msg->address >> 16) & 0xf);
1533         txbuf[1] = (msg->address >> 8) & 0xff;
1534         txbuf[2] = msg->address & 0xff;
1535         txbuf[3] = msg->size - 1;
1536 }
1537
1538 static ssize_t
1539 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
1540 {
1541         struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
1542         u8 txbuf[20], rxbuf[20];
1543         size_t txsize, rxsize;
1544         int ret;
1545
1546         intel_dp_aux_header(txbuf, msg);
1547
1548         switch (msg->request & ~DP_AUX_I2C_MOT) {
1549         case DP_AUX_NATIVE_WRITE:
1550         case DP_AUX_I2C_WRITE:
1551         case DP_AUX_I2C_WRITE_STATUS_UPDATE:
1552                 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
1553                 rxsize = 2; /* 0 or 1 data bytes */
1554
1555                 if (WARN_ON(txsize > 20))
1556                         return -E2BIG;
1557
1558                 WARN_ON(!msg->buffer != !msg->size);
1559
1560                 if (msg->buffer)
1561                         memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
1562
1563                 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
1564                                         rxbuf, rxsize, 0);
1565                 if (ret > 0) {
1566                         msg->reply = rxbuf[0] >> 4;
1567
1568                         if (ret > 1) {
1569                                 /* Number of bytes written in a short write. */
1570                                 ret = clamp_t(int, rxbuf[1], 0, msg->size);
1571                         } else {
1572                                 /* Return payload size. */
1573                                 ret = msg->size;
1574                         }
1575                 }
1576                 break;
1577
1578         case DP_AUX_NATIVE_READ:
1579         case DP_AUX_I2C_READ:
1580                 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
1581                 rxsize = msg->size + 1;
1582
1583                 if (WARN_ON(rxsize > 20))
1584                         return -E2BIG;
1585
1586                 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
1587                                         rxbuf, rxsize, 0);
1588                 if (ret > 0) {
1589                         msg->reply = rxbuf[0] >> 4;
1590                         /*
1591                          * Assume happy day, and copy the data. The caller is
1592                          * expected to check msg->reply before touching it.
1593                          *
1594                          * Return payload size.
1595                          */
1596                         ret--;
1597                         memcpy(msg->buffer, rxbuf + 1, ret);
1598                 }
1599                 break;
1600
1601         default:
1602                 ret = -EINVAL;
1603                 break;
1604         }
1605
1606         return ret;
1607 }
1608
1609
1610 static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp)
1611 {
1612         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1613         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1614         enum aux_ch aux_ch = dig_port->aux_ch;
1615
1616         switch (aux_ch) {
1617         case AUX_CH_B:
1618         case AUX_CH_C:
1619         case AUX_CH_D:
1620                 return DP_AUX_CH_CTL(aux_ch);
1621         default:
1622                 MISSING_CASE(aux_ch);
1623                 return DP_AUX_CH_CTL(AUX_CH_B);
1624         }
1625 }
1626
1627 static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index)
1628 {
1629         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1630         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1631         enum aux_ch aux_ch = dig_port->aux_ch;
1632
1633         switch (aux_ch) {
1634         case AUX_CH_B:
1635         case AUX_CH_C:
1636         case AUX_CH_D:
1637                 return DP_AUX_CH_DATA(aux_ch, index);
1638         default:
1639                 MISSING_CASE(aux_ch);
1640                 return DP_AUX_CH_DATA(AUX_CH_B, index);
1641         }
1642 }
1643
1644 static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp)
1645 {
1646         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1647         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1648         enum aux_ch aux_ch = dig_port->aux_ch;
1649
1650         switch (aux_ch) {
1651         case AUX_CH_A:
1652                 return DP_AUX_CH_CTL(aux_ch);
1653         case AUX_CH_B:
1654         case AUX_CH_C:
1655         case AUX_CH_D:
1656                 return PCH_DP_AUX_CH_CTL(aux_ch);
1657         default:
1658                 MISSING_CASE(aux_ch);
1659                 return DP_AUX_CH_CTL(AUX_CH_A);
1660         }
1661 }
1662
1663 static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index)
1664 {
1665         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1666         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1667         enum aux_ch aux_ch = dig_port->aux_ch;
1668
1669         switch (aux_ch) {
1670         case AUX_CH_A:
1671                 return DP_AUX_CH_DATA(aux_ch, index);
1672         case AUX_CH_B:
1673         case AUX_CH_C:
1674         case AUX_CH_D:
1675                 return PCH_DP_AUX_CH_DATA(aux_ch, index);
1676         default:
1677                 MISSING_CASE(aux_ch);
1678                 return DP_AUX_CH_DATA(AUX_CH_A, index);
1679         }
1680 }
1681
1682 static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
1683 {
1684         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1685         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1686         enum aux_ch aux_ch = dig_port->aux_ch;
1687
1688         switch (aux_ch) {
1689         case AUX_CH_A:
1690         case AUX_CH_B:
1691         case AUX_CH_C:
1692         case AUX_CH_D:
1693         case AUX_CH_E:
1694         case AUX_CH_F:
1695         case AUX_CH_G:
1696                 return DP_AUX_CH_CTL(aux_ch);
1697         default:
1698                 MISSING_CASE(aux_ch);
1699                 return DP_AUX_CH_CTL(AUX_CH_A);
1700         }
1701 }
1702
1703 static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index)
1704 {
1705         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1706         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1707         enum aux_ch aux_ch = dig_port->aux_ch;
1708
1709         switch (aux_ch) {
1710         case AUX_CH_A:
1711         case AUX_CH_B:
1712         case AUX_CH_C:
1713         case AUX_CH_D:
1714         case AUX_CH_E:
1715         case AUX_CH_F:
1716         case AUX_CH_G:
1717                 return DP_AUX_CH_DATA(aux_ch, index);
1718         default:
1719                 MISSING_CASE(aux_ch);
1720                 return DP_AUX_CH_DATA(AUX_CH_A, index);
1721         }
1722 }
1723
1724 static void
1725 intel_dp_aux_fini(struct intel_dp *intel_dp)
1726 {
1727         kfree(intel_dp->aux.name);
1728 }
1729
1730 static void
1731 intel_dp_aux_init(struct intel_dp *intel_dp)
1732 {
1733         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1734         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1735         struct intel_encoder *encoder = &dig_port->base;
1736
1737         if (INTEL_GEN(dev_priv) >= 9) {
1738                 intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg;
1739                 intel_dp->aux_ch_data_reg = skl_aux_data_reg;
1740         } else if (HAS_PCH_SPLIT(dev_priv)) {
1741                 intel_dp->aux_ch_ctl_reg = ilk_aux_ctl_reg;
1742                 intel_dp->aux_ch_data_reg = ilk_aux_data_reg;
1743         } else {
1744                 intel_dp->aux_ch_ctl_reg = g4x_aux_ctl_reg;
1745                 intel_dp->aux_ch_data_reg = g4x_aux_data_reg;
1746         }
1747
1748         if (INTEL_GEN(dev_priv) >= 9)
1749                 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
1750         else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
1751                 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
1752         else if (HAS_PCH_SPLIT(dev_priv))
1753                 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
1754         else
1755                 intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
1756
1757         if (INTEL_GEN(dev_priv) >= 9)
1758                 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
1759         else
1760                 intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
1761
1762         drm_dp_aux_init(&intel_dp->aux);
1763
1764         /* Failure to allocate our preferred name is not critical */
1765         intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX %c/port %c",
1766                                        aux_ch_name(dig_port->aux_ch),
1767                                        port_name(encoder->port));
1768         intel_dp->aux.transfer = intel_dp_aux_transfer;
1769 }
1770
1771 bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
1772 {
1773         int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1];
1774
1775         return max_rate >= 540000;
1776 }
1777
1778 bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp)
1779 {
1780         int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1];
1781
1782         return max_rate >= 810000;
1783 }
1784
1785 static void
1786 intel_dp_set_clock(struct intel_encoder *encoder,
1787                    struct intel_crtc_state *pipe_config)
1788 {
1789         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1790         const struct dp_link_dpll *divisor = NULL;
1791         int i, count = 0;
1792
1793         if (IS_G4X(dev_priv)) {
1794                 divisor = g4x_dpll;
1795                 count = ARRAY_SIZE(g4x_dpll);
1796         } else if (HAS_PCH_SPLIT(dev_priv)) {
1797                 divisor = pch_dpll;
1798                 count = ARRAY_SIZE(pch_dpll);
1799         } else if (IS_CHERRYVIEW(dev_priv)) {
1800                 divisor = chv_dpll;
1801                 count = ARRAY_SIZE(chv_dpll);
1802         } else if (IS_VALLEYVIEW(dev_priv)) {
1803                 divisor = vlv_dpll;
1804                 count = ARRAY_SIZE(vlv_dpll);
1805         }
1806
1807         if (divisor && count) {
1808                 for (i = 0; i < count; i++) {
1809                         if (pipe_config->port_clock == divisor[i].clock) {
1810                                 pipe_config->dpll = divisor[i].dpll;
1811                                 pipe_config->clock_set = true;
1812                                 break;
1813                         }
1814                 }
1815         }
1816 }
1817
1818 static void snprintf_int_array(char *str, size_t len,
1819                                const int *array, int nelem)
1820 {
1821         int i;
1822
1823         str[0] = '\0';
1824
1825         for (i = 0; i < nelem; i++) {
1826                 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1827                 if (r >= len)
1828                         return;
1829                 str += r;
1830                 len -= r;
1831         }
1832 }
1833
1834 static void intel_dp_print_rates(struct intel_dp *intel_dp)
1835 {
1836         char str[128]; /* FIXME: too big for stack? */
1837
1838         if (!drm_debug_enabled(DRM_UT_KMS))
1839                 return;
1840
1841         snprintf_int_array(str, sizeof(str),
1842                            intel_dp->source_rates, intel_dp->num_source_rates);
1843         DRM_DEBUG_KMS("source rates: %s\n", str);
1844
1845         snprintf_int_array(str, sizeof(str),
1846                            intel_dp->sink_rates, intel_dp->num_sink_rates);
1847         DRM_DEBUG_KMS("sink rates: %s\n", str);
1848
1849         snprintf_int_array(str, sizeof(str),
1850                            intel_dp->common_rates, intel_dp->num_common_rates);
1851         DRM_DEBUG_KMS("common rates: %s\n", str);
1852 }
1853
1854 int
1855 intel_dp_max_link_rate(struct intel_dp *intel_dp)
1856 {
1857         int len;
1858
1859         len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->max_link_rate);
1860         if (WARN_ON(len <= 0))
1861                 return 162000;
1862
1863         return intel_dp->common_rates[len - 1];
1864 }
1865
1866 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1867 {
1868         int i = intel_dp_rate_index(intel_dp->sink_rates,
1869                                     intel_dp->num_sink_rates, rate);
1870
1871         if (WARN_ON(i < 0))
1872                 i = 0;
1873
1874         return i;
1875 }
1876
1877 void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1878                            u8 *link_bw, u8 *rate_select)
1879 {
1880         /* eDP 1.4 rate select method. */
1881         if (intel_dp->use_rate_select) {
1882                 *link_bw = 0;
1883                 *rate_select =
1884                         intel_dp_rate_select(intel_dp, port_clock);
1885         } else {
1886                 *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1887                 *rate_select = 0;
1888         }
1889 }
1890
1891 static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp,
1892                                          const struct intel_crtc_state *pipe_config)
1893 {
1894         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1895
1896         /* On TGL, FEC is supported on all Pipes */
1897         if (INTEL_GEN(dev_priv) >= 12)
1898                 return true;
1899
1900         if (IS_GEN(dev_priv, 11) && pipe_config->cpu_transcoder != TRANSCODER_A)
1901                 return true;
1902
1903         return false;
1904 }
1905
1906 static bool intel_dp_supports_fec(struct intel_dp *intel_dp,
1907                                   const struct intel_crtc_state *pipe_config)
1908 {
1909         return intel_dp_source_supports_fec(intel_dp, pipe_config) &&
1910                 drm_dp_sink_supports_fec(intel_dp->fec_capable);
1911 }
1912
1913 static bool intel_dp_supports_dsc(struct intel_dp *intel_dp,
1914                                   const struct intel_crtc_state *crtc_state)
1915 {
1916         struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
1917
1918         if (!intel_dp_is_edp(intel_dp) && !crtc_state->fec_enable)
1919                 return false;
1920
1921         return intel_dsc_source_support(encoder, crtc_state) &&
1922                 drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd);
1923 }
1924
1925 static int intel_dp_compute_bpp(struct intel_dp *intel_dp,
1926                                 struct intel_crtc_state *pipe_config)
1927 {
1928         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1929         struct intel_connector *intel_connector = intel_dp->attached_connector;
1930         int bpp, bpc;
1931
1932         bpp = pipe_config->pipe_bpp;
1933         bpc = drm_dp_downstream_max_bpc(intel_dp->dpcd, intel_dp->downstream_ports);
1934
1935         if (bpc > 0)
1936                 bpp = min(bpp, 3*bpc);
1937
1938         if (intel_dp_is_edp(intel_dp)) {
1939                 /* Get bpp from vbt only for panels that dont have bpp in edid */
1940                 if (intel_connector->base.display_info.bpc == 0 &&
1941                     dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp) {
1942                         drm_dbg_kms(&dev_priv->drm,
1943                                     "clamping bpp for eDP panel to BIOS-provided %i\n",
1944                                     dev_priv->vbt.edp.bpp);
1945                         bpp = dev_priv->vbt.edp.bpp;
1946                 }
1947         }
1948
1949         return bpp;
1950 }
1951
1952 /* Adjust link config limits based on compliance test requests. */
1953 void
1954 intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
1955                                   struct intel_crtc_state *pipe_config,
1956                                   struct link_config_limits *limits)
1957 {
1958         /* For DP Compliance we override the computed bpp for the pipe */
1959         if (intel_dp->compliance.test_data.bpc != 0) {
1960                 int bpp = 3 * intel_dp->compliance.test_data.bpc;
1961
1962                 limits->min_bpp = limits->max_bpp = bpp;
1963                 pipe_config->dither_force_disable = bpp == 6 * 3;
1964
1965                 DRM_DEBUG_KMS("Setting pipe_bpp to %d\n", bpp);
1966         }
1967
1968         /* Use values requested by Compliance Test Request */
1969         if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
1970                 int index;
1971
1972                 /* Validate the compliance test data since max values
1973                  * might have changed due to link train fallback.
1974                  */
1975                 if (intel_dp_link_params_valid(intel_dp, intel_dp->compliance.test_link_rate,
1976                                                intel_dp->compliance.test_lane_count)) {
1977                         index = intel_dp_rate_index(intel_dp->common_rates,
1978                                                     intel_dp->num_common_rates,
1979                                                     intel_dp->compliance.test_link_rate);
1980                         if (index >= 0)
1981                                 limits->min_clock = limits->max_clock = index;
1982                         limits->min_lane_count = limits->max_lane_count =
1983                                 intel_dp->compliance.test_lane_count;
1984                 }
1985         }
1986 }
1987
1988 static int intel_dp_output_bpp(const struct intel_crtc_state *crtc_state, int bpp)
1989 {
1990         /*
1991          * bpp value was assumed to RGB format. And YCbCr 4:2:0 output
1992          * format of the number of bytes per pixel will be half the number
1993          * of bytes of RGB pixel.
1994          */
1995         if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
1996                 bpp /= 2;
1997
1998         return bpp;
1999 }
2000
2001 /* Optimize link config in order: max bpp, min clock, min lanes */
2002 static int
2003 intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
2004                                   struct intel_crtc_state *pipe_config,
2005                                   const struct link_config_limits *limits)
2006 {
2007         struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
2008         int bpp, clock, lane_count;
2009         int mode_rate, link_clock, link_avail;
2010
2011         for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
2012                 int output_bpp = intel_dp_output_bpp(pipe_config, bpp);
2013
2014                 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
2015                                                    output_bpp);
2016
2017                 for (clock = limits->min_clock; clock <= limits->max_clock; clock++) {
2018                         for (lane_count = limits->min_lane_count;
2019                              lane_count <= limits->max_lane_count;
2020                              lane_count <<= 1) {
2021                                 link_clock = intel_dp->common_rates[clock];
2022                                 link_avail = intel_dp_max_data_rate(link_clock,
2023                                                                     lane_count);
2024
2025                                 if (mode_rate <= link_avail) {
2026                                         pipe_config->lane_count = lane_count;
2027                                         pipe_config->pipe_bpp = bpp;
2028                                         pipe_config->port_clock = link_clock;
2029
2030                                         return 0;
2031                                 }
2032                         }
2033                 }
2034         }
2035
2036         return -EINVAL;
2037 }
2038
2039 static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc)
2040 {
2041         int i, num_bpc;
2042         u8 dsc_bpc[3] = {0};
2043
2044         num_bpc = drm_dp_dsc_sink_supported_input_bpcs(intel_dp->dsc_dpcd,
2045                                                        dsc_bpc);
2046         for (i = 0; i < num_bpc; i++) {
2047                 if (dsc_max_bpc >= dsc_bpc[i])
2048                         return dsc_bpc[i] * 3;
2049         }
2050
2051         return 0;
2052 }
2053
2054 #define DSC_SUPPORTED_VERSION_MIN               1
2055
2056 static int intel_dp_dsc_compute_params(struct intel_encoder *encoder,
2057                                        struct intel_crtc_state *crtc_state)
2058 {
2059         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2060         struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
2061         u8 line_buf_depth;
2062         int ret;
2063
2064         ret = intel_dsc_compute_params(encoder, crtc_state);
2065         if (ret)
2066                 return ret;
2067
2068         /*
2069          * Slice Height of 8 works for all currently available panels. So start
2070          * with that if pic_height is an integral multiple of 8. Eventually add
2071          * logic to try multiple slice heights.
2072          */
2073         if (vdsc_cfg->pic_height % 8 == 0)
2074                 vdsc_cfg->slice_height = 8;
2075         else if (vdsc_cfg->pic_height % 4 == 0)
2076                 vdsc_cfg->slice_height = 4;
2077         else
2078                 vdsc_cfg->slice_height = 2;
2079
2080         vdsc_cfg->dsc_version_major =
2081                 (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] &
2082                  DP_DSC_MAJOR_MASK) >> DP_DSC_MAJOR_SHIFT;
2083         vdsc_cfg->dsc_version_minor =
2084                 min(DSC_SUPPORTED_VERSION_MIN,
2085                     (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] &
2086                      DP_DSC_MINOR_MASK) >> DP_DSC_MINOR_SHIFT);
2087
2088         vdsc_cfg->convert_rgb = intel_dp->dsc_dpcd[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT] &
2089                 DP_DSC_RGB;
2090
2091         line_buf_depth = drm_dp_dsc_sink_line_buf_depth(intel_dp->dsc_dpcd);
2092         if (!line_buf_depth) {
2093                 DRM_DEBUG_KMS("DSC Sink Line Buffer Depth invalid\n");
2094                 return -EINVAL;
2095         }
2096
2097         if (vdsc_cfg->dsc_version_minor == 2)
2098                 vdsc_cfg->line_buf_depth = (line_buf_depth == DSC_1_2_MAX_LINEBUF_DEPTH_BITS) ?
2099                         DSC_1_2_MAX_LINEBUF_DEPTH_VAL : line_buf_depth;
2100         else
2101                 vdsc_cfg->line_buf_depth = (line_buf_depth > DSC_1_1_MAX_LINEBUF_DEPTH_BITS) ?
2102                         DSC_1_1_MAX_LINEBUF_DEPTH_BITS : line_buf_depth;
2103
2104         vdsc_cfg->block_pred_enable =
2105                 intel_dp->dsc_dpcd[DP_DSC_BLK_PREDICTION_SUPPORT - DP_DSC_SUPPORT] &
2106                 DP_DSC_BLK_PREDICTION_IS_SUPPORTED;
2107
2108         return drm_dsc_compute_rc_parameters(vdsc_cfg);
2109 }
2110
2111 static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
2112                                        struct intel_crtc_state *pipe_config,
2113                                        struct drm_connector_state *conn_state,
2114                                        struct link_config_limits *limits)
2115 {
2116         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2117         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
2118         struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
2119         u8 dsc_max_bpc;
2120         int pipe_bpp;
2121         int ret;
2122
2123         pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) &&
2124                 intel_dp_supports_fec(intel_dp, pipe_config);
2125
2126         if (!intel_dp_supports_dsc(intel_dp, pipe_config))
2127                 return -EINVAL;
2128
2129         /* Max DSC Input BPC for ICL is 10 and for TGL+ is 12 */
2130         if (INTEL_GEN(dev_priv) >= 12)
2131                 dsc_max_bpc = min_t(u8, 12, conn_state->max_requested_bpc);
2132         else
2133                 dsc_max_bpc = min_t(u8, 10,
2134                                     conn_state->max_requested_bpc);
2135
2136         pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, dsc_max_bpc);
2137
2138         /* Min Input BPC for ICL+ is 8 */
2139         if (pipe_bpp < 8 * 3) {
2140                 drm_dbg_kms(&dev_priv->drm,
2141                             "No DSC support for less than 8bpc\n");
2142                 return -EINVAL;
2143         }
2144
2145         /*
2146          * For now enable DSC for max bpp, max link rate, max lane count.
2147          * Optimize this later for the minimum possible link rate/lane count
2148          * with DSC enabled for the requested mode.
2149          */
2150         pipe_config->pipe_bpp = pipe_bpp;
2151         pipe_config->port_clock = intel_dp->common_rates[limits->max_clock];
2152         pipe_config->lane_count = limits->max_lane_count;
2153
2154         if (intel_dp_is_edp(intel_dp)) {
2155                 pipe_config->dsc.compressed_bpp =
2156                         min_t(u16, drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4,
2157                               pipe_config->pipe_bpp);
2158                 pipe_config->dsc.slice_count =
2159                         drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
2160                                                         true);
2161         } else {
2162                 u16 dsc_max_output_bpp;
2163                 u8 dsc_dp_slice_count;
2164
2165                 dsc_max_output_bpp =
2166                         intel_dp_dsc_get_output_bpp(dev_priv,
2167                                                     pipe_config->port_clock,
2168                                                     pipe_config->lane_count,
2169                                                     adjusted_mode->crtc_clock,
2170                                                     adjusted_mode->crtc_hdisplay);
2171                 dsc_dp_slice_count =
2172                         intel_dp_dsc_get_slice_count(intel_dp,
2173                                                      adjusted_mode->crtc_clock,
2174                                                      adjusted_mode->crtc_hdisplay);
2175                 if (!dsc_max_output_bpp || !dsc_dp_slice_count) {
2176                         drm_dbg_kms(&dev_priv->drm,
2177                                     "Compressed BPP/Slice Count not supported\n");
2178                         return -EINVAL;
2179                 }
2180                 pipe_config->dsc.compressed_bpp = min_t(u16,
2181                                                                dsc_max_output_bpp >> 4,
2182                                                                pipe_config->pipe_bpp);
2183                 pipe_config->dsc.slice_count = dsc_dp_slice_count;
2184         }
2185         /*
2186          * VDSC engine operates at 1 Pixel per clock, so if peak pixel rate
2187          * is greater than the maximum Cdclock and if slice count is even
2188          * then we need to use 2 VDSC instances.
2189          */
2190         if (adjusted_mode->crtc_clock > dev_priv->max_cdclk_freq) {
2191                 if (pipe_config->dsc.slice_count > 1) {
2192                         pipe_config->dsc.dsc_split = true;
2193                 } else {
2194                         drm_dbg_kms(&dev_priv->drm,
2195                                     "Cannot split stream to use 2 VDSC instances\n");
2196                         return -EINVAL;
2197                 }
2198         }
2199
2200         ret = intel_dp_dsc_compute_params(&dig_port->base, pipe_config);
2201         if (ret < 0) {
2202                 drm_dbg_kms(&dev_priv->drm,
2203                             "Cannot compute valid DSC parameters for Input Bpp = %d "
2204                             "Compressed BPP = %d\n",
2205                             pipe_config->pipe_bpp,
2206                             pipe_config->dsc.compressed_bpp);
2207                 return ret;
2208         }
2209
2210         pipe_config->dsc.compression_enable = true;
2211         drm_dbg_kms(&dev_priv->drm, "DP DSC computed with Input Bpp = %d "
2212                     "Compressed Bpp = %d Slice Count = %d\n",
2213                     pipe_config->pipe_bpp,
2214                     pipe_config->dsc.compressed_bpp,
2215                     pipe_config->dsc.slice_count);
2216
2217         return 0;
2218 }
2219
2220 int intel_dp_min_bpp(const struct intel_crtc_state *crtc_state)
2221 {
2222         if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_RGB)
2223                 return 6 * 3;
2224         else
2225                 return 8 * 3;
2226 }
2227
2228 static int
2229 intel_dp_compute_link_config(struct intel_encoder *encoder,
2230                              struct intel_crtc_state *pipe_config,
2231                              struct drm_connector_state *conn_state)
2232 {
2233         struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
2234         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2235         struct link_config_limits limits;
2236         int common_len;
2237         int ret;
2238
2239         common_len = intel_dp_common_len_rate_limit(intel_dp,
2240                                                     intel_dp->max_link_rate);
2241
2242         /* No common link rates between source and sink */
2243         drm_WARN_ON(encoder->base.dev, common_len <= 0);
2244
2245         limits.min_clock = 0;
2246         limits.max_clock = common_len - 1;
2247
2248         limits.min_lane_count = 1;
2249         limits.max_lane_count = intel_dp_max_lane_count(intel_dp);
2250
2251         limits.min_bpp = intel_dp_min_bpp(pipe_config);
2252         limits.max_bpp = intel_dp_compute_bpp(intel_dp, pipe_config);
2253
2254         if (intel_dp_is_edp(intel_dp)) {
2255                 /*
2256                  * Use the maximum clock and number of lanes the eDP panel
2257                  * advertizes being capable of. The panels are generally
2258                  * designed to support only a single clock and lane
2259                  * configuration, and typically these values correspond to the
2260                  * native resolution of the panel.
2261                  */
2262                 limits.min_lane_count = limits.max_lane_count;
2263                 limits.min_clock = limits.max_clock;
2264         }
2265
2266         intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits);
2267
2268         DRM_DEBUG_KMS("DP link computation with max lane count %i "
2269                       "max rate %d max bpp %d pixel clock %iKHz\n",
2270                       limits.max_lane_count,
2271                       intel_dp->common_rates[limits.max_clock],
2272                       limits.max_bpp, adjusted_mode->crtc_clock);
2273
2274         /*
2275          * Optimize for slow and wide. This is the place to add alternative
2276          * optimization policy.
2277          */
2278         ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits);
2279
2280         /* enable compression if the mode doesn't fit available BW */
2281         DRM_DEBUG_KMS("Force DSC en = %d\n", intel_dp->force_dsc_en);
2282         if (ret || intel_dp->force_dsc_en) {
2283                 ret = intel_dp_dsc_compute_config(intel_dp, pipe_config,
2284                                                   conn_state, &limits);
2285                 if (ret < 0)
2286                         return ret;
2287         }
2288
2289         if (pipe_config->dsc.compression_enable) {
2290                 DRM_DEBUG_KMS("DP lane count %d clock %d Input bpp %d Compressed bpp %d\n",
2291                               pipe_config->lane_count, pipe_config->port_clock,
2292                               pipe_config->pipe_bpp,
2293                               pipe_config->dsc.compressed_bpp);
2294
2295                 DRM_DEBUG_KMS("DP link rate required %i available %i\n",
2296                               intel_dp_link_required(adjusted_mode->crtc_clock,
2297                                                      pipe_config->dsc.compressed_bpp),
2298                               intel_dp_max_data_rate(pipe_config->port_clock,
2299                                                      pipe_config->lane_count));
2300         } else {
2301                 DRM_DEBUG_KMS("DP lane count %d clock %d bpp %d\n",
2302                               pipe_config->lane_count, pipe_config->port_clock,
2303                               pipe_config->pipe_bpp);
2304
2305                 DRM_DEBUG_KMS("DP link rate required %i available %i\n",
2306                               intel_dp_link_required(adjusted_mode->crtc_clock,
2307                                                      pipe_config->pipe_bpp),
2308                               intel_dp_max_data_rate(pipe_config->port_clock,
2309                                                      pipe_config->lane_count));
2310         }
2311         return 0;
2312 }
2313
2314 static int
2315 intel_dp_ycbcr420_config(struct intel_dp *intel_dp,
2316                          struct drm_connector *connector,
2317                          struct intel_crtc_state *crtc_state)
2318 {
2319         const struct drm_display_info *info = &connector->display_info;
2320         const struct drm_display_mode *adjusted_mode =
2321                 &crtc_state->hw.adjusted_mode;
2322         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2323         int ret;
2324
2325         if (!drm_mode_is_420_only(info, adjusted_mode) ||
2326             !intel_dp_get_colorimetry_status(intel_dp) ||
2327             !connector->ycbcr_420_allowed)
2328                 return 0;
2329
2330         crtc_state->output_format = INTEL_OUTPUT_FORMAT_YCBCR420;
2331
2332         /* YCBCR 420 output conversion needs a scaler */
2333         ret = skl_update_scaler_crtc(crtc_state);
2334         if (ret) {
2335                 DRM_DEBUG_KMS("Scaler allocation for output failed\n");
2336                 return ret;
2337         }
2338
2339         intel_pch_panel_fitting(crtc, crtc_state, DRM_MODE_SCALE_FULLSCREEN);
2340
2341         return 0;
2342 }
2343
2344 bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
2345                                   const struct drm_connector_state *conn_state)
2346 {
2347         const struct intel_digital_connector_state *intel_conn_state =
2348                 to_intel_digital_connector_state(conn_state);
2349         const struct drm_display_mode *adjusted_mode =
2350                 &crtc_state->hw.adjusted_mode;
2351
2352         /*
2353          * Our YCbCr output is always limited range.
2354          * crtc_state->limited_color_range only applies to RGB,
2355          * and it must never be set for YCbCr or we risk setting
2356          * some conflicting bits in PIPECONF which will mess up
2357          * the colors on the monitor.
2358          */
2359         if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
2360                 return false;
2361
2362         if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
2363                 /*
2364                  * See:
2365                  * CEA-861-E - 5.1 Default Encoding Parameters
2366                  * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
2367                  */
2368                 return crtc_state->pipe_bpp != 18 &&
2369                         drm_default_rgb_quant_range(adjusted_mode) ==
2370                         HDMI_QUANTIZATION_RANGE_LIMITED;
2371         } else {
2372                 return intel_conn_state->broadcast_rgb ==
2373                         INTEL_BROADCAST_RGB_LIMITED;
2374         }
2375 }
2376
2377 static bool intel_dp_port_has_audio(struct drm_i915_private *dev_priv,
2378                                     enum port port)
2379 {
2380         if (IS_G4X(dev_priv))
2381                 return false;
2382         if (INTEL_GEN(dev_priv) < 12 && port == PORT_A)
2383                 return false;
2384
2385         return true;
2386 }
2387
2388 int
2389 intel_dp_compute_config(struct intel_encoder *encoder,
2390                         struct intel_crtc_state *pipe_config,
2391                         struct drm_connector_state *conn_state)
2392 {
2393         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2394         struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
2395         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2396         struct intel_lspcon *lspcon = enc_to_intel_lspcon(encoder);
2397         enum port port = encoder->port;
2398         struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc);
2399         struct intel_connector *intel_connector = intel_dp->attached_connector;
2400         struct intel_digital_connector_state *intel_conn_state =
2401                 to_intel_digital_connector_state(conn_state);
2402         bool constant_n = drm_dp_has_quirk(&intel_dp->desc,
2403                                            DP_DPCD_QUIRK_CONSTANT_N);
2404         int ret = 0, output_bpp;
2405
2406         if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A)
2407                 pipe_config->has_pch_encoder = true;
2408
2409         pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
2410
2411         if (lspcon->active)
2412                 lspcon_ycbcr420_config(&intel_connector->base, pipe_config);
2413         else
2414                 ret = intel_dp_ycbcr420_config(intel_dp, &intel_connector->base,
2415                                                pipe_config);
2416
2417         if (ret)
2418                 return ret;
2419
2420         pipe_config->has_drrs = false;
2421         if (!intel_dp_port_has_audio(dev_priv, port))
2422                 pipe_config->has_audio = false;
2423         else if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
2424                 pipe_config->has_audio = intel_dp->has_audio;
2425         else
2426                 pipe_config->has_audio = intel_conn_state->force_audio == HDMI_AUDIO_ON;
2427
2428         if (intel_dp_is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
2429                 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
2430                                        adjusted_mode);
2431
2432                 if (INTEL_GEN(dev_priv) >= 9) {
2433                         ret = skl_update_scaler_crtc(pipe_config);
2434                         if (ret)
2435                                 return ret;
2436                 }
2437
2438                 if (HAS_GMCH(dev_priv))
2439                         intel_gmch_panel_fitting(intel_crtc, pipe_config,
2440                                                  conn_state->scaling_mode);
2441                 else
2442                         intel_pch_panel_fitting(intel_crtc, pipe_config,
2443                                                 conn_state->scaling_mode);
2444         }
2445
2446         if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
2447                 return -EINVAL;
2448
2449         if (HAS_GMCH(dev_priv) &&
2450             adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
2451                 return -EINVAL;
2452
2453         if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
2454                 return -EINVAL;
2455
2456         if (intel_dp_hdisplay_bad(dev_priv, adjusted_mode->crtc_hdisplay))
2457                 return -EINVAL;
2458
2459         ret = intel_dp_compute_link_config(encoder, pipe_config, conn_state);
2460         if (ret < 0)
2461                 return ret;
2462
2463         pipe_config->limited_color_range =
2464                 intel_dp_limited_color_range(pipe_config, conn_state);
2465
2466         if (pipe_config->dsc.compression_enable)
2467                 output_bpp = pipe_config->dsc.compressed_bpp;
2468         else
2469                 output_bpp = intel_dp_output_bpp(pipe_config, pipe_config->pipe_bpp);
2470
2471         intel_link_compute_m_n(output_bpp,
2472                                pipe_config->lane_count,
2473                                adjusted_mode->crtc_clock,
2474                                pipe_config->port_clock,
2475                                &pipe_config->dp_m_n,
2476                                constant_n, pipe_config->fec_enable);
2477
2478         if (intel_connector->panel.downclock_mode != NULL &&
2479                 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
2480                         pipe_config->has_drrs = true;
2481                         intel_link_compute_m_n(output_bpp,
2482                                                pipe_config->lane_count,
2483                                                intel_connector->panel.downclock_mode->clock,
2484                                                pipe_config->port_clock,
2485                                                &pipe_config->dp_m2_n2,
2486                                                constant_n, pipe_config->fec_enable);
2487         }
2488
2489         if (!HAS_DDI(dev_priv))
2490                 intel_dp_set_clock(encoder, pipe_config);
2491
2492         intel_psr_compute_config(intel_dp, pipe_config);
2493
2494         return 0;
2495 }
2496
2497 void intel_dp_set_link_params(struct intel_dp *intel_dp,
2498                               int link_rate, u8 lane_count,
2499                               bool link_mst)
2500 {
2501         intel_dp->link_trained = false;
2502         intel_dp->link_rate = link_rate;
2503         intel_dp->lane_count = lane_count;
2504         intel_dp->link_mst = link_mst;
2505 }
2506
2507 static void intel_dp_prepare(struct intel_encoder *encoder,
2508                              const struct intel_crtc_state *pipe_config)
2509 {
2510         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2511         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2512         enum port port = encoder->port;
2513         struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
2514         const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
2515
2516         intel_dp_set_link_params(intel_dp, pipe_config->port_clock,
2517                                  pipe_config->lane_count,
2518                                  intel_crtc_has_type(pipe_config,
2519                                                      INTEL_OUTPUT_DP_MST));
2520
2521         intel_dp->regs.dp_tp_ctl = DP_TP_CTL(port);
2522         intel_dp->regs.dp_tp_status = DP_TP_STATUS(port);
2523
2524         /*
2525          * There are four kinds of DP registers:
2526          *
2527          *      IBX PCH
2528          *      SNB CPU
2529          *      IVB CPU
2530          *      CPT PCH
2531          *
2532          * IBX PCH and CPU are the same for almost everything,
2533          * except that the CPU DP PLL is configured in this
2534          * register
2535          *
2536          * CPT PCH is quite different, having many bits moved
2537          * to the TRANS_DP_CTL register instead. That
2538          * configuration happens (oddly) in ilk_pch_enable
2539          */
2540
2541         /* Preserve the BIOS-computed detected bit. This is
2542          * supposed to be read-only.
2543          */
2544         intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED;
2545
2546         /* Handle DP bits in common between all three register formats */
2547         intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
2548         intel_dp->DP |= DP_PORT_WIDTH(pipe_config->lane_count);
2549
2550         /* Split out the IBX/CPU vs CPT settings */
2551
2552         if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
2553                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
2554                         intel_dp->DP |= DP_SYNC_HS_HIGH;
2555                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
2556                         intel_dp->DP |= DP_SYNC_VS_HIGH;
2557                 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
2558
2559                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2560                         intel_dp->DP |= DP_ENHANCED_FRAMING;
2561
2562                 intel_dp->DP |= DP_PIPE_SEL_IVB(crtc->pipe);
2563         } else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
2564                 u32 trans_dp;
2565
2566                 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
2567
2568                 trans_dp = intel_de_read(dev_priv, TRANS_DP_CTL(crtc->pipe));
2569                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2570                         trans_dp |= TRANS_DP_ENH_FRAMING;
2571                 else
2572                         trans_dp &= ~TRANS_DP_ENH_FRAMING;
2573                 intel_de_write(dev_priv, TRANS_DP_CTL(crtc->pipe), trans_dp);
2574         } else {
2575                 if (IS_G4X(dev_priv) && pipe_config->limited_color_range)
2576                         intel_dp->DP |= DP_COLOR_RANGE_16_235;
2577
2578                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
2579                         intel_dp->DP |= DP_SYNC_HS_HIGH;
2580                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
2581                         intel_dp->DP |= DP_SYNC_VS_HIGH;
2582                 intel_dp->DP |= DP_LINK_TRAIN_OFF;
2583
2584                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2585                         intel_dp->DP |= DP_ENHANCED_FRAMING;
2586
2587                 if (IS_CHERRYVIEW(dev_priv))
2588                         intel_dp->DP |= DP_PIPE_SEL_CHV(crtc->pipe);
2589                 else
2590                         intel_dp->DP |= DP_PIPE_SEL(crtc->pipe);
2591         }
2592 }
2593
2594 #define IDLE_ON_MASK            (PP_ON | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
2595 #define IDLE_ON_VALUE           (PP_ON | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
2596
2597 #define IDLE_OFF_MASK           (PP_ON | PP_SEQUENCE_MASK | 0                     | 0)
2598 #define IDLE_OFF_VALUE          (0     | PP_SEQUENCE_NONE | 0                     | 0)
2599
2600 #define IDLE_CYCLE_MASK         (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
2601 #define IDLE_CYCLE_VALUE        (0     | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
2602
2603 static void intel_pps_verify_state(struct intel_dp *intel_dp);
2604
2605 static void wait_panel_status(struct intel_dp *intel_dp,
2606                                        u32 mask,
2607                                        u32 value)
2608 {
2609         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2610         i915_reg_t pp_stat_reg, pp_ctrl_reg;
2611
2612         lockdep_assert_held(&dev_priv->pps_mutex);
2613
2614         intel_pps_verify_state(intel_dp);
2615
2616         pp_stat_reg = _pp_stat_reg(intel_dp);
2617         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2618
2619         drm_dbg_kms(&dev_priv->drm,
2620                     "mask %08x value %08x status %08x control %08x\n",
2621                     mask, value,
2622                     intel_de_read(dev_priv, pp_stat_reg),
2623                     intel_de_read(dev_priv, pp_ctrl_reg));
2624
2625         if (intel_de_wait_for_register(dev_priv, pp_stat_reg,
2626                                        mask, value, 5000))
2627                 drm_err(&dev_priv->drm,
2628                         "Panel status timeout: status %08x control %08x\n",
2629                         intel_de_read(dev_priv, pp_stat_reg),
2630                         intel_de_read(dev_priv, pp_ctrl_reg));
2631
2632         drm_dbg_kms(&dev_priv->drm, "Wait complete\n");
2633 }
2634
2635 static void wait_panel_on(struct intel_dp *intel_dp)
2636 {
2637         DRM_DEBUG_KMS("Wait for panel power on\n");
2638         wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
2639 }
2640
2641 static void wait_panel_off(struct intel_dp *intel_dp)
2642 {
2643         DRM_DEBUG_KMS("Wait for panel power off time\n");
2644         wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
2645 }
2646
2647 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
2648 {
2649         ktime_t panel_power_on_time;
2650         s64 panel_power_off_duration;
2651
2652         DRM_DEBUG_KMS("Wait for panel power cycle\n");
2653
2654         /* take the difference of currrent time and panel power off time
2655          * and then make panel wait for t11_t12 if needed. */
2656         panel_power_on_time = ktime_get_boottime();
2657         panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->panel_power_off_time);
2658
2659         /* When we disable the VDD override bit last we have to do the manual
2660          * wait. */
2661         if (panel_power_off_duration < (s64)intel_dp->panel_power_cycle_delay)
2662                 wait_remaining_ms_from_jiffies(jiffies,
2663                                        intel_dp->panel_power_cycle_delay - panel_power_off_duration);
2664
2665         wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
2666 }
2667
2668 static void wait_backlight_on(struct intel_dp *intel_dp)
2669 {
2670         wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
2671                                        intel_dp->backlight_on_delay);
2672 }
2673
2674 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
2675 {
2676         wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
2677                                        intel_dp->backlight_off_delay);
2678 }
2679
2680 /* Read the current pp_control value, unlocking the register if it
2681  * is locked
2682  */
2683
2684 static  u32 ilk_get_pp_control(struct intel_dp *intel_dp)
2685 {
2686         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2687         u32 control;
2688
2689         lockdep_assert_held(&dev_priv->pps_mutex);
2690
2691         control = intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp));
2692         if (drm_WARN_ON(&dev_priv->drm, !HAS_DDI(dev_priv) &&
2693                         (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) {
2694                 control &= ~PANEL_UNLOCK_MASK;
2695                 control |= PANEL_UNLOCK_REGS;
2696         }
2697         return control;
2698 }
2699
2700 /*
2701  * Must be paired with edp_panel_vdd_off().
2702  * Must hold pps_mutex around the whole on/off sequence.
2703  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
2704  */
2705 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
2706 {
2707         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2708         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2709         u32 pp;
2710         i915_reg_t pp_stat_reg, pp_ctrl_reg;
2711         bool need_to_disable = !intel_dp->want_panel_vdd;
2712
2713         lockdep_assert_held(&dev_priv->pps_mutex);
2714
2715         if (!intel_dp_is_edp(intel_dp))
2716                 return false;
2717
2718         cancel_delayed_work(&intel_dp->panel_vdd_work);
2719         intel_dp->want_panel_vdd = true;
2720
2721         if (edp_have_panel_vdd(intel_dp))
2722                 return need_to_disable;
2723
2724         intel_display_power_get(dev_priv,
2725                                 intel_aux_power_domain(intel_dig_port));
2726
2727         drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD on\n",
2728                     intel_dig_port->base.base.base.id,
2729                     intel_dig_port->base.base.name);
2730
2731         if (!edp_have_panel_power(intel_dp))
2732                 wait_panel_power_cycle(intel_dp);
2733
2734         pp = ilk_get_pp_control(intel_dp);
2735         pp |= EDP_FORCE_VDD;
2736
2737         pp_stat_reg = _pp_stat_reg(intel_dp);
2738         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2739
2740         intel_de_write(dev_priv, pp_ctrl_reg, pp);
2741         intel_de_posting_read(dev_priv, pp_ctrl_reg);
2742         drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
2743                     intel_de_read(dev_priv, pp_stat_reg),
2744                     intel_de_read(dev_priv, pp_ctrl_reg));
2745         /*
2746          * If the panel wasn't on, delay before accessing aux channel
2747          */
2748         if (!edp_have_panel_power(intel_dp)) {
2749                 drm_dbg_kms(&dev_priv->drm,
2750                             "[ENCODER:%d:%s] panel power wasn't enabled\n",
2751                             intel_dig_port->base.base.base.id,
2752                             intel_dig_port->base.base.name);
2753                 msleep(intel_dp->panel_power_up_delay);
2754         }
2755
2756         return need_to_disable;
2757 }
2758
2759 /*
2760  * Must be paired with intel_edp_panel_vdd_off() or
2761  * intel_edp_panel_off().
2762  * Nested calls to these functions are not allowed since
2763  * we drop the lock. Caller must use some higher level
2764  * locking to prevent nested calls from other threads.
2765  */
2766 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
2767 {
2768         intel_wakeref_t wakeref;
2769         bool vdd;
2770
2771         if (!intel_dp_is_edp(intel_dp))
2772                 return;
2773
2774         vdd = false;
2775         with_pps_lock(intel_dp, wakeref)
2776                 vdd = edp_panel_vdd_on(intel_dp);
2777         I915_STATE_WARN(!vdd, "[ENCODER:%d:%s] VDD already requested on\n",
2778                         dp_to_dig_port(intel_dp)->base.base.base.id,
2779                         dp_to_dig_port(intel_dp)->base.base.name);
2780 }
2781
2782 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
2783 {
2784         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2785         struct intel_digital_port *intel_dig_port =
2786                 dp_to_dig_port(intel_dp);
2787         u32 pp;
2788         i915_reg_t pp_stat_reg, pp_ctrl_reg;
2789
2790         lockdep_assert_held(&dev_priv->pps_mutex);
2791
2792         drm_WARN_ON(&dev_priv->drm, intel_dp->want_panel_vdd);
2793
2794         if (!edp_have_panel_vdd(intel_dp))
2795                 return;
2796
2797         drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD off\n",
2798                     intel_dig_port->base.base.base.id,
2799                     intel_dig_port->base.base.name);
2800
2801         pp = ilk_get_pp_control(intel_dp);
2802         pp &= ~EDP_FORCE_VDD;
2803
2804         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2805         pp_stat_reg = _pp_stat_reg(intel_dp);
2806
2807         intel_de_write(dev_priv, pp_ctrl_reg, pp);
2808         intel_de_posting_read(dev_priv, pp_ctrl_reg);
2809
2810         /* Make sure sequencer is idle before allowing subsequent activity */
2811         drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
2812                     intel_de_read(dev_priv, pp_stat_reg),
2813                     intel_de_read(dev_priv, pp_ctrl_reg));
2814
2815         if ((pp & PANEL_POWER_ON) == 0)
2816                 intel_dp->panel_power_off_time = ktime_get_boottime();
2817
2818         intel_display_power_put_unchecked(dev_priv,
2819                                           intel_aux_power_domain(intel_dig_port));
2820 }
2821
2822 static void edp_panel_vdd_work(struct work_struct *__work)
2823 {
2824         struct intel_dp *intel_dp =
2825                 container_of(to_delayed_work(__work),
2826                              struct intel_dp, panel_vdd_work);
2827         intel_wakeref_t wakeref;
2828
2829         with_pps_lock(intel_dp, wakeref) {
2830                 if (!intel_dp->want_panel_vdd)
2831                         edp_panel_vdd_off_sync(intel_dp);
2832         }
2833 }
2834
2835 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
2836 {
2837         unsigned long delay;
2838
2839         /*
2840          * Queue the timer to fire a long time from now (relative to the power
2841          * down delay) to keep the panel power up across a sequence of
2842          * operations.
2843          */
2844         delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
2845         schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
2846 }
2847
2848 /*
2849  * Must be paired with edp_panel_vdd_on().
2850  * Must hold pps_mutex around the whole on/off sequence.
2851  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
2852  */
2853 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
2854 {
2855         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2856
2857         lockdep_assert_held(&dev_priv->pps_mutex);
2858
2859         if (!intel_dp_is_edp(intel_dp))
2860                 return;
2861
2862         I915_STATE_WARN(!intel_dp->want_panel_vdd, "[ENCODER:%d:%s] VDD not forced on",
2863                         dp_to_dig_port(intel_dp)->base.base.base.id,
2864                         dp_to_dig_port(intel_dp)->base.base.name);
2865
2866         intel_dp->want_panel_vdd = false;
2867
2868         if (sync)
2869                 edp_panel_vdd_off_sync(intel_dp);
2870         else
2871                 edp_panel_vdd_schedule_off(intel_dp);
2872 }
2873
2874 static void edp_panel_on(struct intel_dp *intel_dp)
2875 {
2876         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2877         u32 pp;
2878         i915_reg_t pp_ctrl_reg;
2879
2880         lockdep_assert_held(&dev_priv->pps_mutex);
2881
2882         if (!intel_dp_is_edp(intel_dp))
2883                 return;
2884
2885         drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power on\n",
2886                     dp_to_dig_port(intel_dp)->base.base.base.id,
2887                     dp_to_dig_port(intel_dp)->base.base.name);
2888
2889         if (drm_WARN(&dev_priv->drm, edp_have_panel_power(intel_dp),
2890                      "[ENCODER:%d:%s] panel power already on\n",
2891                      dp_to_dig_port(intel_dp)->base.base.base.id,
2892                      dp_to_dig_port(intel_dp)->base.base.name))
2893                 return;
2894
2895         wait_panel_power_cycle(intel_dp);
2896
2897         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2898         pp = ilk_get_pp_control(intel_dp);
2899         if (IS_GEN(dev_priv, 5)) {
2900                 /* ILK workaround: disable reset around power sequence */
2901                 pp &= ~PANEL_POWER_RESET;
2902                 intel_de_write(dev_priv, pp_ctrl_reg, pp);
2903                 intel_de_posting_read(dev_priv, pp_ctrl_reg);
2904         }
2905
2906         pp |= PANEL_POWER_ON;
2907         if (!IS_GEN(dev_priv, 5))
2908                 pp |= PANEL_POWER_RESET;
2909
2910         intel_de_write(dev_priv, pp_ctrl_reg, pp);
2911         intel_de_posting_read(dev_priv, pp_ctrl_reg);
2912
2913         wait_panel_on(intel_dp);
2914         intel_dp->last_power_on = jiffies;
2915
2916         if (IS_GEN(dev_priv, 5)) {
2917                 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
2918                 intel_de_write(dev_priv, pp_ctrl_reg, pp);
2919                 intel_de_posting_read(dev_priv, pp_ctrl_reg);
2920         }
2921 }
2922
2923 void intel_edp_panel_on(struct intel_dp *intel_dp)
2924 {
2925         intel_wakeref_t wakeref;
2926
2927         if (!intel_dp_is_edp(intel_dp))
2928                 return;
2929
2930         with_pps_lock(intel_dp, wakeref)
2931                 edp_panel_on(intel_dp);
2932 }
2933
2934
2935 static void edp_panel_off(struct intel_dp *intel_dp)
2936 {
2937         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2938         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2939         u32 pp;
2940         i915_reg_t pp_ctrl_reg;
2941
2942         lockdep_assert_held(&dev_priv->pps_mutex);
2943
2944         if (!intel_dp_is_edp(intel_dp))
2945                 return;
2946
2947         drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power off\n",
2948                     dig_port->base.base.base.id, dig_port->base.base.name);
2949
2950         drm_WARN(&dev_priv->drm, !intel_dp->want_panel_vdd,
2951                  "Need [ENCODER:%d:%s] VDD to turn off panel\n",
2952                  dig_port->base.base.base.id, dig_port->base.base.name);
2953
2954         pp = ilk_get_pp_control(intel_dp);
2955         /* We need to switch off panel power _and_ force vdd, for otherwise some
2956          * panels get very unhappy and cease to work. */
2957         pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2958                 EDP_BLC_ENABLE);
2959
2960         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2961
2962         intel_dp->want_panel_vdd = false;
2963
2964         intel_de_write(dev_priv, pp_ctrl_reg, pp);
2965         intel_de_posting_read(dev_priv, pp_ctrl_reg);
2966
2967         wait_panel_off(intel_dp);
2968         intel_dp->panel_power_off_time = ktime_get_boottime();
2969
2970         /* We got a reference when we enabled the VDD. */
2971         intel_display_power_put_unchecked(dev_priv, intel_aux_power_domain(dig_port));
2972 }
2973
2974 void intel_edp_panel_off(struct intel_dp *intel_dp)
2975 {
2976         intel_wakeref_t wakeref;
2977
2978         if (!intel_dp_is_edp(intel_dp))
2979                 return;
2980
2981         with_pps_lock(intel_dp, wakeref)
2982                 edp_panel_off(intel_dp);
2983 }
2984
2985 /* Enable backlight in the panel power control. */
2986 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
2987 {
2988         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2989         intel_wakeref_t wakeref;
2990
2991         /*
2992          * If we enable the backlight right away following a panel power
2993          * on, we may see slight flicker as the panel syncs with the eDP
2994          * link.  So delay a bit to make sure the image is solid before
2995          * allowing it to appear.
2996          */
2997         wait_backlight_on(intel_dp);
2998
2999         with_pps_lock(intel_dp, wakeref) {
3000                 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
3001                 u32 pp;
3002
3003                 pp = ilk_get_pp_control(intel_dp);
3004                 pp |= EDP_BLC_ENABLE;
3005
3006                 intel_de_write(dev_priv, pp_ctrl_reg, pp);
3007                 intel_de_posting_read(dev_priv, pp_ctrl_reg);
3008         }
3009 }
3010
3011 /* Enable backlight PWM and backlight PP control. */
3012 void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
3013                             const struct drm_connector_state *conn_state)
3014 {
3015         struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(conn_state->best_encoder));
3016
3017         if (!intel_dp_is_edp(intel_dp))
3018                 return;
3019
3020         DRM_DEBUG_KMS("\n");
3021
3022         intel_panel_enable_backlight(crtc_state, conn_state);
3023         _intel_edp_backlight_on(intel_dp);
3024 }
3025
3026 /* Disable backlight in the panel power control. */
3027 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
3028 {
3029         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3030         intel_wakeref_t wakeref;
3031
3032         if (!intel_dp_is_edp(intel_dp))
3033                 return;
3034
3035         with_pps_lock(intel_dp, wakeref) {
3036                 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
3037                 u32 pp;
3038
3039                 pp = ilk_get_pp_control(intel_dp);
3040                 pp &= ~EDP_BLC_ENABLE;
3041
3042                 intel_de_write(dev_priv, pp_ctrl_reg, pp);
3043                 intel_de_posting_read(dev_priv, pp_ctrl_reg);
3044         }
3045
3046         intel_dp->last_backlight_off = jiffies;
3047         edp_wait_backlight_off(intel_dp);
3048 }
3049
3050 /* Disable backlight PP control and backlight PWM. */
3051 void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state)
3052 {
3053         struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(old_conn_state->best_encoder));
3054
3055         if (!intel_dp_is_edp(intel_dp))
3056                 return;
3057
3058         DRM_DEBUG_KMS("\n");
3059
3060         _intel_edp_backlight_off(intel_dp);
3061         intel_panel_disable_backlight(old_conn_state);
3062 }
3063
3064 /*
3065  * Hook for controlling the panel power control backlight through the bl_power
3066  * sysfs attribute. Take care to handle multiple calls.
3067  */
3068 static void intel_edp_backlight_power(struct intel_connector *connector,
3069                                       bool enable)
3070 {
3071         struct intel_dp *intel_dp = intel_attached_dp(connector);
3072         intel_wakeref_t wakeref;
3073         bool is_enabled;
3074
3075         is_enabled = false;
3076         with_pps_lock(intel_dp, wakeref)
3077                 is_enabled = ilk_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
3078         if (is_enabled == enable)
3079                 return;
3080
3081         DRM_DEBUG_KMS("panel power control backlight %s\n",
3082                       enable ? "enable" : "disable");
3083
3084         if (enable)
3085                 _intel_edp_backlight_on(intel_dp);
3086         else
3087                 _intel_edp_backlight_off(intel_dp);
3088 }
3089
3090 static void assert_dp_port(struct intel_dp *intel_dp, bool state)
3091 {
3092         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3093         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
3094         bool cur_state = intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN;
3095
3096         I915_STATE_WARN(cur_state != state,
3097                         "[ENCODER:%d:%s] state assertion failure (expected %s, current %s)\n",
3098                         dig_port->base.base.base.id, dig_port->base.base.name,
3099                         onoff(state), onoff(cur_state));
3100 }
3101 #define assert_dp_port_disabled(d) assert_dp_port((d), false)
3102
3103 static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
3104 {
3105         bool cur_state = intel_de_read(dev_priv, DP_A) & DP_PLL_ENABLE;
3106
3107         I915_STATE_WARN(cur_state != state,
3108                         "eDP PLL state assertion failure (expected %s, current %s)\n",
3109                         onoff(state), onoff(cur_state));
3110 }
3111 #define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
3112 #define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
3113
3114 static void ilk_edp_pll_on(struct intel_dp *intel_dp,
3115                            const struct intel_crtc_state *pipe_config)
3116 {
3117         struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
3118         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3119
3120         assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
3121         assert_dp_port_disabled(intel_dp);
3122         assert_edp_pll_disabled(dev_priv);
3123
3124         drm_dbg_kms(&dev_priv->drm, "enabling eDP PLL for clock %d\n",
3125                     pipe_config->port_clock);
3126
3127         intel_dp->DP &= ~DP_PLL_FREQ_MASK;
3128
3129         if (pipe_config->port_clock == 162000)
3130                 intel_dp->DP |= DP_PLL_FREQ_162MHZ;
3131         else
3132                 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
3133
3134         intel_de_write(dev_priv, DP_A, intel_dp->DP);
3135         intel_de_posting_read(dev_priv, DP_A);
3136         udelay(500);
3137
3138         /*
3139          * [DevILK] Work around required when enabling DP PLL
3140          * while a pipe is enabled going to FDI:
3141          * 1. Wait for the start of vertical blank on the enabled pipe going to FDI
3142          * 2. Program DP PLL enable
3143          */
3144         if (IS_GEN(dev_priv, 5))
3145                 intel_wait_for_vblank_if_active(dev_priv, !crtc->pipe);
3146
3147         intel_dp->DP |= DP_PLL_ENABLE;
3148
3149         intel_de_write(dev_priv, DP_A, intel_dp->DP);
3150         intel_de_posting_read(dev_priv, DP_A);
3151         udelay(200);
3152 }
3153
3154 static void ilk_edp_pll_off(struct intel_dp *intel_dp,
3155                             const struct intel_crtc_state *old_crtc_state)
3156 {
3157         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
3158         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3159
3160         assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder);
3161         assert_dp_port_disabled(intel_dp);
3162         assert_edp_pll_enabled(dev_priv);
3163
3164         drm_dbg_kms(&dev_priv->drm, "disabling eDP PLL\n");
3165
3166         intel_dp->DP &= ~DP_PLL_ENABLE;
3167
3168         intel_de_write(dev_priv, DP_A, intel_dp->DP);
3169         intel_de_posting_read(dev_priv, DP_A);
3170         udelay(200);
3171 }
3172
3173 static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp)
3174 {
3175         /*
3176          * DPCD 1.2+ should support BRANCH_DEVICE_CTRL, and thus
3177          * be capable of signalling downstream hpd with a long pulse.
3178          * Whether or not that means D3 is safe to use is not clear,
3179          * but let's assume so until proven otherwise.
3180          *
3181          * FIXME should really check all downstream ports...
3182          */
3183         return intel_dp->dpcd[DP_DPCD_REV] == 0x11 &&
3184                 drm_dp_is_branch(intel_dp->dpcd) &&
3185                 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD;
3186 }
3187
3188 void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
3189                                            const struct intel_crtc_state *crtc_state,
3190                                            bool enable)
3191 {
3192         int ret;
3193
3194         if (!crtc_state->dsc.compression_enable)
3195                 return;
3196
3197         ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_DSC_ENABLE,
3198                                  enable ? DP_DECOMPRESSION_EN : 0);
3199         if (ret < 0)
3200                 DRM_DEBUG_KMS("Failed to %s sink decompression state\n",
3201                               enable ? "enable" : "disable");
3202 }
3203
3204 /* If the sink supports it, try to set the power state appropriately */
3205 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
3206 {
3207         int ret, i;
3208
3209         /* Should have a valid DPCD by this point */
3210         if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
3211                 return;
3212
3213         if (mode != DRM_MODE_DPMS_ON) {
3214                 if (downstream_hpd_needs_d0(intel_dp))
3215                         return;
3216
3217                 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
3218                                          DP_SET_POWER_D3);
3219         } else {
3220                 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
3221
3222                 /*
3223                  * When turning on, we need to retry for 1ms to give the sink
3224                  * time to wake up.
3225                  */
3226                 for (i = 0; i < 3; i++) {
3227                         ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
3228                                                  DP_SET_POWER_D0);
3229                         if (ret == 1)
3230                                 break;
3231                         msleep(1);
3232                 }
3233
3234                 if (ret == 1 && lspcon->active)
3235                         lspcon_wait_pcon_mode(lspcon);
3236         }
3237
3238         if (ret != 1)
3239                 DRM_DEBUG_KMS("failed to %s sink power state\n",
3240                               mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
3241 }
3242
3243 static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv,
3244                                  enum port port, enum pipe *pipe)
3245 {
3246         enum pipe p;
3247
3248         for_each_pipe(dev_priv, p) {
3249                 u32 val = intel_de_read(dev_priv, TRANS_DP_CTL(p));
3250
3251                 if ((val & TRANS_DP_PORT_SEL_MASK) == TRANS_DP_PORT_SEL(port)) {
3252                         *pipe = p;
3253                         return true;
3254                 }
3255         }
3256
3257         drm_dbg_kms(&dev_priv->drm, "No pipe for DP port %c found\n",
3258                     port_name(port));
3259
3260         /* must initialize pipe to something for the asserts */
3261         *pipe = PIPE_A;
3262
3263         return false;
3264 }
3265
3266 bool intel_dp_port_enabled(struct drm_i915_private *dev_priv,
3267                            i915_reg_t dp_reg, enum port port,
3268                            enum pipe *pipe)
3269 {
3270         bool ret;
3271         u32 val;
3272
3273         val = intel_de_read(dev_priv, dp_reg);
3274
3275         ret = val & DP_PORT_EN;
3276
3277         /* asserts want to know the pipe even if the port is disabled */
3278         if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
3279                 *pipe = (val & DP_PIPE_SEL_MASK_IVB) >> DP_PIPE_SEL_SHIFT_IVB;
3280         else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
3281                 ret &= cpt_dp_port_selected(dev_priv, port, pipe);
3282         else if (IS_CHERRYVIEW(dev_priv))
3283                 *pipe = (val & DP_PIPE_SEL_MASK_CHV) >> DP_PIPE_SEL_SHIFT_CHV;
3284         else
3285                 *pipe = (val & DP_PIPE_SEL_MASK) >> DP_PIPE_SEL_SHIFT;
3286
3287         return ret;
3288 }
3289
3290 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
3291                                   enum pipe *pipe)
3292 {
3293         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3294         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3295         intel_wakeref_t wakeref;
3296         bool ret;
3297
3298         wakeref = intel_display_power_get_if_enabled(dev_priv,
3299                                                      encoder->power_domain);
3300         if (!wakeref)
3301                 return false;
3302
3303         ret = intel_dp_port_enabled(dev_priv, intel_dp->output_reg,
3304                                     encoder->port, pipe);
3305
3306         intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
3307
3308         return ret;
3309 }
3310
3311 static void intel_dp_get_config(struct intel_encoder *encoder,
3312                                 struct intel_crtc_state *pipe_config)
3313 {
3314         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3315         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3316         u32 tmp, flags = 0;
3317         enum port port = encoder->port;
3318         struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
3319
3320         if (encoder->type == INTEL_OUTPUT_EDP)
3321                 pipe_config->output_types |= BIT(INTEL_OUTPUT_EDP);
3322         else
3323                 pipe_config->output_types |= BIT(INTEL_OUTPUT_DP);
3324
3325         tmp = intel_de_read(dev_priv, intel_dp->output_reg);
3326
3327         pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
3328
3329         if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
3330                 u32 trans_dp = intel_de_read(dev_priv,
3331                                              TRANS_DP_CTL(crtc->pipe));
3332
3333                 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
3334                         flags |= DRM_MODE_FLAG_PHSYNC;
3335                 else
3336                         flags |= DRM_MODE_FLAG_NHSYNC;
3337
3338                 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
3339                         flags |= DRM_MODE_FLAG_PVSYNC;
3340                 else
3341                         flags |= DRM_MODE_FLAG_NVSYNC;
3342         } else {
3343                 if (tmp & DP_SYNC_HS_HIGH)
3344                         flags |= DRM_MODE_FLAG_PHSYNC;
3345                 else
3346                         flags |= DRM_MODE_FLAG_NHSYNC;
3347
3348                 if (tmp & DP_SYNC_VS_HIGH)
3349                         flags |= DRM_MODE_FLAG_PVSYNC;
3350                 else
3351                         flags |= DRM_MODE_FLAG_NVSYNC;
3352         }
3353
3354         pipe_config->hw.adjusted_mode.flags |= flags;
3355
3356         if (IS_G4X(dev_priv) && tmp & DP_COLOR_RANGE_16_235)
3357                 pipe_config->limited_color_range = true;
3358
3359         pipe_config->lane_count =
3360                 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
3361
3362         intel_dp_get_m_n(crtc, pipe_config);
3363
3364         if (port == PORT_A) {
3365                 if ((intel_de_read(dev_priv, DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
3366                         pipe_config->port_clock = 162000;
3367                 else
3368                         pipe_config->port_clock = 270000;
3369         }
3370
3371         pipe_config->hw.adjusted_mode.crtc_clock =
3372                 intel_dotclock_calculate(pipe_config->port_clock,
3373                                          &pipe_config->dp_m_n);
3374
3375         if (intel_dp_is_edp(intel_dp) && dev_priv->vbt.edp.bpp &&
3376             pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) {
3377                 /*
3378                  * This is a big fat ugly hack.
3379                  *
3380                  * Some machines in UEFI boot mode provide us a VBT that has 18
3381                  * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
3382                  * unknown we fail to light up. Yet the same BIOS boots up with
3383                  * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
3384                  * max, not what it tells us to use.
3385                  *
3386                  * Note: This will still be broken if the eDP panel is not lit
3387                  * up by the BIOS, and thus we can't get the mode at module
3388                  * load.
3389                  */
3390                 drm_dbg_kms(&dev_priv->drm,
3391                             "pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
3392                             pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp);
3393                 dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp;
3394         }
3395 }
3396
3397 static void intel_disable_dp(struct intel_encoder *encoder,
3398                              const struct intel_crtc_state *old_crtc_state,
3399                              const struct drm_connector_state *old_conn_state)
3400 {
3401         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3402
3403         intel_dp->link_trained = false;
3404
3405         if (old_crtc_state->has_audio)
3406                 intel_audio_codec_disable(encoder,
3407                                           old_crtc_state, old_conn_state);
3408
3409         /* Make sure the panel is off before trying to change the mode. But also
3410          * ensure that we have vdd while we switch off the panel. */
3411         intel_edp_panel_vdd_on(intel_dp);
3412         intel_edp_backlight_off(old_conn_state);
3413         intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
3414         intel_edp_panel_off(intel_dp);
3415 }
3416
3417 static void g4x_disable_dp(struct intel_encoder *encoder,
3418                            const struct intel_crtc_state *old_crtc_state,
3419                            const struct drm_connector_state *old_conn_state)
3420 {
3421         intel_disable_dp(encoder, old_crtc_state, old_conn_state);
3422 }
3423
3424 static void vlv_disable_dp(struct intel_encoder *encoder,
3425                            const struct intel_crtc_state *old_crtc_state,
3426                            const struct drm_connector_state *old_conn_state)
3427 {
3428         intel_disable_dp(encoder, old_crtc_state, old_conn_state);
3429 }
3430
3431 static void g4x_post_disable_dp(struct intel_encoder *encoder,
3432                                 const struct intel_crtc_state *old_crtc_state,
3433                                 const struct drm_connector_state *old_conn_state)
3434 {
3435         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3436         enum port port = encoder->port;
3437
3438         /*
3439          * Bspec does not list a specific disable sequence for g4x DP.
3440          * Follow the ilk+ sequence (disable pipe before the port) for
3441          * g4x DP as it does not suffer from underruns like the normal
3442          * g4x modeset sequence (disable pipe after the port).
3443          */
3444         intel_dp_link_down(encoder, old_crtc_state);
3445
3446         /* Only ilk+ has port A */
3447         if (port == PORT_A)
3448                 ilk_edp_pll_off(intel_dp, old_crtc_state);
3449 }
3450
3451 static void vlv_post_disable_dp(struct intel_encoder *encoder,
3452                                 const struct intel_crtc_state *old_crtc_state,
3453                                 const struct drm_connector_state *old_conn_state)
3454 {
3455         intel_dp_link_down(encoder, old_crtc_state);
3456 }
3457
3458 static void chv_post_disable_dp(struct intel_encoder *encoder,
3459                                 const struct intel_crtc_state *old_crtc_state,
3460                                 const struct drm_connector_state *old_conn_state)
3461 {
3462         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3463
3464         intel_dp_link_down(encoder, old_crtc_state);
3465
3466         vlv_dpio_get(dev_priv);
3467
3468         /* Assert data lane reset */
3469         chv_data_lane_soft_reset(encoder, old_crtc_state, true);
3470
3471         vlv_dpio_put(dev_priv);
3472 }
3473
3474 static void
3475 _intel_dp_set_link_train(struct intel_dp *intel_dp,
3476                          u32 *DP,
3477                          u8 dp_train_pat)
3478 {
3479         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3480         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3481         enum port port = intel_dig_port->base.port;
3482         u8 train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd);
3483
3484         if (dp_train_pat & train_pat_mask)
3485                 drm_dbg_kms(&dev_priv->drm,
3486                             "Using DP training pattern TPS%d\n",
3487                             dp_train_pat & train_pat_mask);
3488
3489         if (HAS_DDI(dev_priv)) {
3490                 u32 temp = intel_de_read(dev_priv, intel_dp->regs.dp_tp_ctl);
3491
3492                 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
3493                         temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
3494                 else
3495                         temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
3496
3497                 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3498                 switch (dp_train_pat & train_pat_mask) {
3499                 case DP_TRAINING_PATTERN_DISABLE:
3500                         temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
3501
3502                         break;
3503                 case DP_TRAINING_PATTERN_1:
3504                         temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
3505                         break;
3506                 case DP_TRAINING_PATTERN_2:
3507                         temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
3508                         break;
3509                 case DP_TRAINING_PATTERN_3:
3510                         temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
3511                         break;
3512                 case DP_TRAINING_PATTERN_4:
3513                         temp |= DP_TP_CTL_LINK_TRAIN_PAT4;
3514                         break;
3515                 }
3516                 intel_de_write(dev_priv, intel_dp->regs.dp_tp_ctl, temp);
3517
3518         } else if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
3519                    (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
3520                 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
3521
3522                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
3523                 case DP_TRAINING_PATTERN_DISABLE:
3524                         *DP |= DP_LINK_TRAIN_OFF_CPT;
3525                         break;
3526                 case DP_TRAINING_PATTERN_1:
3527                         *DP |= DP_LINK_TRAIN_PAT_1_CPT;
3528                         break;
3529                 case DP_TRAINING_PATTERN_2:
3530                         *DP |= DP_LINK_TRAIN_PAT_2_CPT;
3531                         break;
3532                 case DP_TRAINING_PATTERN_3:
3533                         drm_dbg_kms(&dev_priv->drm,
3534                                     "TPS3 not supported, using TPS2 instead\n");
3535                         *DP |= DP_LINK_TRAIN_PAT_2_CPT;
3536                         break;
3537                 }
3538
3539         } else {
3540                 *DP &= ~DP_LINK_TRAIN_MASK;
3541
3542                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
3543                 case DP_TRAINING_PATTERN_DISABLE:
3544                         *DP |= DP_LINK_TRAIN_OFF;
3545                         break;
3546                 case DP_TRAINING_PATTERN_1:
3547                         *DP |= DP_LINK_TRAIN_PAT_1;
3548                         break;
3549                 case DP_TRAINING_PATTERN_2:
3550                         *DP |= DP_LINK_TRAIN_PAT_2;
3551                         break;
3552                 case DP_TRAINING_PATTERN_3:
3553                         drm_dbg_kms(&dev_priv->drm,
3554                                     "TPS3 not supported, using TPS2 instead\n");
3555                         *DP |= DP_LINK_TRAIN_PAT_2;
3556                         break;
3557                 }
3558         }
3559 }
3560
3561 static void intel_dp_enable_port(struct intel_dp *intel_dp,
3562                                  const struct intel_crtc_state *old_crtc_state)
3563 {
3564         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3565
3566         /* enable with pattern 1 (as per spec) */
3567
3568         intel_dp_program_link_training_pattern(intel_dp, DP_TRAINING_PATTERN_1);
3569
3570         /*
3571          * Magic for VLV/CHV. We _must_ first set up the register
3572          * without actually enabling the port, and then do another
3573          * write to enable the port. Otherwise link training will
3574          * fail when the power sequencer is freshly used for this port.
3575          */
3576         intel_dp->DP |= DP_PORT_EN;
3577         if (old_crtc_state->has_audio)
3578                 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
3579
3580         intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
3581         intel_de_posting_read(dev_priv, intel_dp->output_reg);
3582 }
3583
3584 static void intel_enable_dp(struct intel_encoder *encoder,
3585                             const struct intel_crtc_state *pipe_config,
3586                             const struct drm_connector_state *conn_state)
3587 {
3588         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3589         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3590         struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
3591         u32 dp_reg = intel_de_read(dev_priv, intel_dp->output_reg);
3592         enum pipe pipe = crtc->pipe;
3593         intel_wakeref_t wakeref;
3594
3595         if (drm_WARN_ON(&dev_priv->drm, dp_reg & DP_PORT_EN))
3596                 return;
3597
3598         with_pps_lock(intel_dp, wakeref) {
3599                 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
3600                         vlv_init_panel_power_sequencer(encoder, pipe_config);
3601
3602                 intel_dp_enable_port(intel_dp, pipe_config);
3603
3604                 edp_panel_vdd_on(intel_dp);
3605                 edp_panel_on(intel_dp);
3606                 edp_panel_vdd_off(intel_dp, true);
3607         }
3608
3609         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
3610                 unsigned int lane_mask = 0x0;
3611
3612                 if (IS_CHERRYVIEW(dev_priv))
3613                         lane_mask = intel_dp_unused_lane_mask(pipe_config->lane_count);
3614
3615                 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
3616                                     lane_mask);
3617         }
3618
3619         intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
3620         intel_dp_start_link_train(intel_dp);
3621         intel_dp_stop_link_train(intel_dp);
3622
3623         if (pipe_config->has_audio) {
3624                 drm_dbg(&dev_priv->drm, "Enabling DP audio on pipe %c\n",
3625                         pipe_name(pipe));
3626                 intel_audio_codec_enable(encoder, pipe_config, conn_state);
3627         }
3628 }
3629
3630 static void g4x_enable_dp(struct intel_encoder *encoder,
3631                           const struct intel_crtc_state *pipe_config,
3632                           const struct drm_connector_state *conn_state)
3633 {
3634         intel_enable_dp(encoder, pipe_config, conn_state);
3635         intel_edp_backlight_on(pipe_config, conn_state);
3636 }
3637
3638 static void vlv_enable_dp(struct intel_encoder *encoder,
3639                           const struct intel_crtc_state *pipe_config,
3640                           const struct drm_connector_state *conn_state)
3641 {
3642         intel_edp_backlight_on(pipe_config, conn_state);
3643 }
3644
3645 static void g4x_pre_enable_dp(struct intel_encoder *encoder,
3646                               const struct intel_crtc_state *pipe_config,
3647                               const struct drm_connector_state *conn_state)
3648 {
3649         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3650         enum port port = encoder->port;
3651
3652         intel_dp_prepare(encoder, pipe_config);
3653
3654         /* Only ilk+ has port A */
3655         if (port == PORT_A)
3656                 ilk_edp_pll_on(intel_dp, pipe_config);
3657 }
3658
3659 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
3660 {
3661         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3662         struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
3663         enum pipe pipe = intel_dp->pps_pipe;
3664         i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe);
3665
3666         drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE);
3667
3668         if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B))
3669                 return;
3670
3671         edp_panel_vdd_off_sync(intel_dp);
3672
3673         /*
3674          * VLV seems to get confused when multiple power sequencers
3675          * have the same port selected (even if only one has power/vdd
3676          * enabled). The failure manifests as vlv_wait_port_ready() failing
3677          * CHV on the other hand doesn't seem to mind having the same port
3678          * selected in multiple power sequencers, but let's clear the
3679          * port select always when logically disconnecting a power sequencer
3680          * from a port.
3681          */
3682         drm_dbg_kms(&dev_priv->drm,
3683                     "detaching pipe %c power sequencer from [ENCODER:%d:%s]\n",
3684                     pipe_name(pipe), intel_dig_port->base.base.base.id,
3685                     intel_dig_port->base.base.name);
3686         intel_de_write(dev_priv, pp_on_reg, 0);
3687         intel_de_posting_read(dev_priv, pp_on_reg);
3688
3689         intel_dp->pps_pipe = INVALID_PIPE;
3690 }
3691
3692 static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
3693                                       enum pipe pipe)
3694 {
3695         struct intel_encoder *encoder;
3696
3697         lockdep_assert_held(&dev_priv->pps_mutex);
3698
3699         for_each_intel_dp(&dev_priv->drm, encoder) {
3700                 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3701
3702                 drm_WARN(&dev_priv->drm, intel_dp->active_pipe == pipe,
3703                          "stealing pipe %c power sequencer from active [ENCODER:%d:%s]\n",
3704                          pipe_name(pipe), encoder->base.base.id,
3705                          encoder->base.name);
3706
3707                 if (intel_dp->pps_pipe != pipe)
3708                         continue;
3709
3710                 drm_dbg_kms(&dev_priv->drm,
3711                             "stealing pipe %c power sequencer from [ENCODER:%d:%s]\n",
3712                             pipe_name(pipe), encoder->base.base.id,
3713                             encoder->base.name);
3714
3715                 /* make sure vdd is off before we steal it */
3716                 vlv_detach_power_sequencer(intel_dp);
3717         }
3718 }
3719
3720 static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
3721                                            const struct intel_crtc_state *crtc_state)
3722 {
3723         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3724         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3725         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3726
3727         lockdep_assert_held(&dev_priv->pps_mutex);
3728
3729         drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE);
3730
3731         if (intel_dp->pps_pipe != INVALID_PIPE &&
3732             intel_dp->pps_pipe != crtc->pipe) {
3733                 /*
3734                  * If another power sequencer was being used on this
3735                  * port previously make sure to turn off vdd there while
3736                  * we still have control of it.
3737                  */
3738                 vlv_detach_power_sequencer(intel_dp);
3739         }
3740
3741         /*
3742          * We may be stealing the power
3743          * sequencer from another port.
3744          */
3745         vlv_steal_power_sequencer(dev_priv, crtc->pipe);
3746
3747         intel_dp->active_pipe = crtc->pipe;
3748
3749         if (!intel_dp_is_edp(intel_dp))
3750                 return;
3751
3752         /* now it's all ours */
3753         intel_dp->pps_pipe = crtc->pipe;
3754
3755         drm_dbg_kms(&dev_priv->drm,
3756                     "initializing pipe %c power sequencer for [ENCODER:%d:%s]\n",
3757                     pipe_name(intel_dp->pps_pipe), encoder->base.base.id,
3758                     encoder->base.name);
3759
3760         /* init power sequencer on this pipe and port */
3761         intel_dp_init_panel_power_sequencer(intel_dp);
3762         intel_dp_init_panel_power_sequencer_registers(intel_dp, true);
3763 }
3764
3765 static void vlv_pre_enable_dp(struct intel_encoder *encoder,
3766                               const struct intel_crtc_state *pipe_config,
3767                               const struct drm_connector_state *conn_state)
3768 {
3769         vlv_phy_pre_encoder_enable(encoder, pipe_config);
3770
3771         intel_enable_dp(encoder, pipe_config, conn_state);
3772 }
3773
3774 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder,
3775                                   const struct intel_crtc_state *pipe_config,
3776                                   const struct drm_connector_state *conn_state)
3777 {
3778         intel_dp_prepare(encoder, pipe_config);
3779
3780         vlv_phy_pre_pll_enable(encoder, pipe_config);
3781 }
3782
3783 static void chv_pre_enable_dp(struct intel_encoder *encoder,
3784                               const struct intel_crtc_state *pipe_config,
3785                               const struct drm_connector_state *conn_state)
3786 {
3787         chv_phy_pre_encoder_enable(encoder, pipe_config);
3788
3789         intel_enable_dp(encoder, pipe_config, conn_state);
3790
3791         /* Second common lane will stay alive on its own now */
3792         chv_phy_release_cl2_override(encoder);
3793 }
3794
3795 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder,
3796                                   const struct intel_crtc_state *pipe_config,
3797                                   const struct drm_connector_state *conn_state)
3798 {
3799         intel_dp_prepare(encoder, pipe_config);
3800
3801         chv_phy_pre_pll_enable(encoder, pipe_config);
3802 }
3803
3804 static void chv_dp_post_pll_disable(struct intel_encoder *encoder,
3805                                     const struct intel_crtc_state *old_crtc_state,
3806                                     const struct drm_connector_state *old_conn_state)
3807 {
3808         chv_phy_post_pll_disable(encoder, old_crtc_state);
3809 }
3810
3811 /*
3812  * Fetch AUX CH registers 0x202 - 0x207 which contain
3813  * link status information
3814  */
3815 bool
3816 intel_dp_get_link_status(struct intel_dp *intel_dp, u8 link_status[DP_LINK_STATUS_SIZE])
3817 {
3818         return drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS, link_status,
3819                                 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
3820 }
3821
3822 /* These are source-specific values. */
3823 u8
3824 intel_dp_voltage_max(struct intel_dp *intel_dp)
3825 {
3826         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3827         struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3828         enum port port = encoder->port;
3829
3830         if (HAS_DDI(dev_priv))
3831                 return intel_ddi_dp_voltage_max(encoder);
3832         else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
3833                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3834         else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
3835                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3836         else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
3837                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3838         else
3839                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3840 }
3841
3842 u8
3843 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, u8 voltage_swing)
3844 {
3845         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3846         struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3847         enum port port = encoder->port;
3848
3849         if (HAS_DDI(dev_priv)) {
3850                 return intel_ddi_dp_pre_emphasis_max(encoder, voltage_swing);
3851         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
3852                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3853                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3854                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
3855                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3856                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3857                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3858                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3859                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3860                 default:
3861                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3862                 }
3863         } else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
3864                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3865                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3866                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3867                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3868                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3869                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3870                 default:
3871                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3872                 }
3873         } else {
3874                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3875                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3876                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3877                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3878                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3879                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3880                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3881                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3882                 default:
3883                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3884                 }
3885         }
3886 }
3887
3888 static u32 vlv_signal_levels(struct intel_dp *intel_dp)
3889 {
3890         struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3891         unsigned long demph_reg_value, preemph_reg_value,
3892                 uniqtranscale_reg_value;
3893         u8 train_set = intel_dp->train_set[0];
3894
3895         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3896         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3897                 preemph_reg_value = 0x0004000;
3898                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3899                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3900                         demph_reg_value = 0x2B405555;
3901                         uniqtranscale_reg_value = 0x552AB83A;
3902                         break;
3903                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3904                         demph_reg_value = 0x2B404040;
3905                         uniqtranscale_reg_value = 0x5548B83A;
3906                         break;
3907                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3908                         demph_reg_value = 0x2B245555;
3909                         uniqtranscale_reg_value = 0x5560B83A;
3910                         break;
3911                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3912                         demph_reg_value = 0x2B405555;
3913                         uniqtranscale_reg_value = 0x5598DA3A;
3914                         break;
3915                 default:
3916                         return 0;
3917                 }
3918                 break;
3919         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3920                 preemph_reg_value = 0x0002000;
3921                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3922                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3923                         demph_reg_value = 0x2B404040;
3924                         uniqtranscale_reg_value = 0x5552B83A;
3925                         break;
3926                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3927                         demph_reg_value = 0x2B404848;
3928                         uniqtranscale_reg_value = 0x5580B83A;
3929                         break;
3930                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3931                         demph_reg_value = 0x2B404040;
3932                         uniqtranscale_reg_value = 0x55ADDA3A;
3933                         break;
3934                 default:
3935                         return 0;
3936                 }
3937                 break;
3938         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3939                 preemph_reg_value = 0x0000000;
3940                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3941                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3942                         demph_reg_value = 0x2B305555;
3943                         uniqtranscale_reg_value = 0x5570B83A;
3944                         break;
3945                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3946                         demph_reg_value = 0x2B2B4040;
3947                         uniqtranscale_reg_value = 0x55ADDA3A;
3948                         break;
3949                 default:
3950                         return 0;
3951                 }
3952                 break;
3953         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3954                 preemph_reg_value = 0x0006000;
3955                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3956                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3957                         demph_reg_value = 0x1B405555;
3958                         uniqtranscale_reg_value = 0x55ADDA3A;
3959                         break;
3960                 default:
3961                         return 0;
3962                 }
3963                 break;
3964         default:
3965                 return 0;
3966         }
3967
3968         vlv_set_phy_signal_level(encoder, demph_reg_value, preemph_reg_value,
3969                                  uniqtranscale_reg_value, 0);
3970
3971         return 0;
3972 }
3973
3974 static u32 chv_signal_levels(struct intel_dp *intel_dp)
3975 {
3976         struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3977         u32 deemph_reg_value, margin_reg_value;
3978         bool uniq_trans_scale = false;
3979         u8 train_set = intel_dp->train_set[0];
3980
3981         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3982         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3983                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3984                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3985                         deemph_reg_value = 128;
3986                         margin_reg_value = 52;
3987                         break;
3988                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3989                         deemph_reg_value = 128;
3990                         margin_reg_value = 77;
3991                         break;
3992                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3993                         deemph_reg_value = 128;
3994                         margin_reg_value = 102;
3995                         break;
3996                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3997                         deemph_reg_value = 128;
3998                         margin_reg_value = 154;
3999                         uniq_trans_scale = true;
4000                         break;
4001                 default:
4002                         return 0;
4003                 }
4004                 break;
4005         case DP_TRAIN_PRE_EMPH_LEVEL_1:
4006                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
4007                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
4008                         deemph_reg_value = 85;
4009                         margin_reg_value = 78;
4010                         break;
4011                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
4012                         deemph_reg_value = 85;
4013                         margin_reg_value = 116;
4014                         break;
4015                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
4016                         deemph_reg_value = 85;
4017                         margin_reg_value = 154;
4018                         break;
4019                 default:
4020                         return 0;
4021                 }
4022                 break;
4023         case DP_TRAIN_PRE_EMPH_LEVEL_2:
4024                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
4025                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
4026                         deemph_reg_value = 64;
4027                         margin_reg_value = 104;
4028                         break;
4029                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
4030                         deemph_reg_value = 64;
4031                         margin_reg_value = 154;
4032                         break;
4033                 default:
4034                         return 0;
4035                 }
4036                 break;
4037         case DP_TRAIN_PRE_EMPH_LEVEL_3:
4038                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
4039                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
4040                         deemph_reg_value = 43;
4041                         margin_reg_value = 154;
4042                         break;
4043                 default:
4044                         return 0;
4045                 }
4046                 break;
4047         default:
4048                 return 0;
4049         }
4050
4051         chv_set_phy_signal_level(encoder, deemph_reg_value,
4052                                  margin_reg_value, uniq_trans_scale);
4053
4054         return 0;
4055 }
4056
4057 static u32
4058 g4x_signal_levels(u8 train_set)
4059 {
4060         u32 signal_levels = 0;
4061
4062         switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
4063         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
4064         default:
4065                 signal_levels |= DP_VOLTAGE_0_4;
4066                 break;
4067         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
4068                 signal_levels |= DP_VOLTAGE_0_6;
4069                 break;
4070         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
4071                 signal_levels |= DP_VOLTAGE_0_8;
4072                 break;
4073         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
4074                 signal_levels |= DP_VOLTAGE_1_2;
4075                 break;
4076         }
4077         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
4078         case DP_TRAIN_PRE_EMPH_LEVEL_0:
4079         default:
4080                 signal_levels |= DP_PRE_EMPHASIS_0;
4081                 break;
4082         case DP_TRAIN_PRE_EMPH_LEVEL_1:
4083                 signal_levels |= DP_PRE_EMPHASIS_3_5;
4084                 break;
4085         case DP_TRAIN_PRE_EMPH_LEVEL_2:
4086                 signal_levels |= DP_PRE_EMPHASIS_6;
4087                 break;
4088         case DP_TRAIN_PRE_EMPH_LEVEL_3:
4089                 signal_levels |= DP_PRE_EMPHASIS_9_5;
4090                 break;
4091         }
4092         return signal_levels;
4093 }
4094
4095 /* SNB CPU eDP voltage swing and pre-emphasis control */
4096 static u32
4097 snb_cpu_edp_signal_levels(u8 train_set)
4098 {
4099         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
4100                                          DP_TRAIN_PRE_EMPHASIS_MASK);
4101         switch (signal_levels) {
4102         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
4103         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
4104                 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
4105         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
4106                 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
4107         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
4108         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
4109                 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
4110         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
4111         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
4112                 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
4113         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
4114         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
4115                 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
4116         default:
4117                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
4118                               "0x%x\n", signal_levels);
4119                 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
4120         }
4121 }
4122
4123 /* IVB CPU eDP voltage swing and pre-emphasis control */
4124 static u32
4125 ivb_cpu_edp_signal_levels(u8 train_set)
4126 {
4127         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
4128                                          DP_TRAIN_PRE_EMPHASIS_MASK);
4129         switch (signal_levels) {
4130         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
4131                 return EDP_LINK_TRAIN_400MV_0DB_IVB;
4132         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
4133                 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
4134         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
4135                 return EDP_LINK_TRAIN_400MV_6DB_IVB;
4136
4137         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
4138                 return EDP_LINK_TRAIN_600MV_0DB_IVB;
4139         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
4140                 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
4141
4142         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
4143                 return EDP_LINK_TRAIN_800MV_0DB_IVB;
4144         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
4145                 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
4146
4147         default:
4148                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
4149                               "0x%x\n", signal_levels);
4150                 return EDP_LINK_TRAIN_500MV_0DB_IVB;
4151         }
4152 }
4153
4154 void
4155 intel_dp_set_signal_levels(struct intel_dp *intel_dp)
4156 {
4157         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4158         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4159         enum port port = intel_dig_port->base.port;
4160         u32 signal_levels, mask = 0;
4161         u8 train_set = intel_dp->train_set[0];
4162
4163         if (IS_GEN9_LP(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
4164                 signal_levels = bxt_signal_levels(intel_dp);
4165         } else if (HAS_DDI(dev_priv)) {
4166                 signal_levels = ddi_signal_levels(intel_dp);
4167                 mask = DDI_BUF_EMP_MASK;
4168         } else if (IS_CHERRYVIEW(dev_priv)) {
4169                 signal_levels = chv_signal_levels(intel_dp);
4170         } else if (IS_VALLEYVIEW(dev_priv)) {
4171                 signal_levels = vlv_signal_levels(intel_dp);
4172         } else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
4173                 signal_levels = ivb_cpu_edp_signal_levels(train_set);
4174                 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
4175         } else if (IS_GEN(dev_priv, 6) && port == PORT_A) {
4176                 signal_levels = snb_cpu_edp_signal_levels(train_set);
4177                 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
4178         } else {
4179                 signal_levels = g4x_signal_levels(train_set);
4180                 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
4181         }
4182
4183         if (mask)
4184                 drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
4185                             signal_levels);
4186
4187         drm_dbg_kms(&dev_priv->drm, "Using vswing level %d%s\n",
4188                     train_set & DP_TRAIN_VOLTAGE_SWING_MASK,
4189                     train_set & DP_TRAIN_MAX_SWING_REACHED ? " (max)" : "");
4190         drm_dbg_kms(&dev_priv->drm, "Using pre-emphasis level %d%s\n",
4191                     (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
4192                     DP_TRAIN_PRE_EMPHASIS_SHIFT,
4193                     train_set & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED ?
4194                     " (max)" : "");
4195
4196         intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels;
4197
4198         intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
4199         intel_de_posting_read(dev_priv, intel_dp->output_reg);
4200 }
4201
4202 void
4203 intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
4204                                        u8 dp_train_pat)
4205 {
4206         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4207         struct drm_i915_private *dev_priv =
4208                 to_i915(intel_dig_port->base.base.dev);
4209
4210         _intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat);
4211
4212         intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
4213         intel_de_posting_read(dev_priv, intel_dp->output_reg);
4214 }
4215
4216 void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
4217 {
4218         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4219         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4220         enum port port = intel_dig_port->base.port;
4221         u32 val;
4222
4223         if (!HAS_DDI(dev_priv))
4224                 return;
4225
4226         val = intel_de_read(dev_priv, intel_dp->regs.dp_tp_ctl);
4227         val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
4228         val |= DP_TP_CTL_LINK_TRAIN_IDLE;
4229         intel_de_write(dev_priv, intel_dp->regs.dp_tp_ctl, val);
4230
4231         /*
4232          * Until TGL on PORT_A we can have only eDP in SST mode. There the only
4233          * reason we need to set idle transmission mode is to work around a HW
4234          * issue where we enable the pipe while not in idle link-training mode.
4235          * In this case there is requirement to wait for a minimum number of
4236          * idle patterns to be sent.
4237          */
4238         if (port == PORT_A && INTEL_GEN(dev_priv) < 12)
4239                 return;
4240
4241         if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status,
4242                                   DP_TP_STATUS_IDLE_DONE, 1))
4243                 drm_err(&dev_priv->drm,
4244                         "Timed out waiting for DP idle patterns\n");
4245 }
4246
4247 static void
4248 intel_dp_link_down(struct intel_encoder *encoder,
4249                    const struct intel_crtc_state *old_crtc_state)
4250 {
4251         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4252         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
4253         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
4254         enum port port = encoder->port;
4255         u32 DP = intel_dp->DP;
4256
4257         if (drm_WARN_ON(&dev_priv->drm,
4258                         (intel_de_read(dev_priv, intel_dp->output_reg) &
4259                          DP_PORT_EN) == 0))
4260                 return;
4261
4262         drm_dbg_kms(&dev_priv->drm, "\n");
4263
4264         if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
4265             (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
4266                 DP &= ~DP_LINK_TRAIN_MASK_CPT;
4267                 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
4268         } else {
4269                 DP &= ~DP_LINK_TRAIN_MASK;
4270                 DP |= DP_LINK_TRAIN_PAT_IDLE;
4271         }
4272         intel_de_write(dev_priv, intel_dp->output_reg, DP);
4273         intel_de_posting_read(dev_priv, intel_dp->output_reg);
4274
4275         DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
4276         intel_de_write(dev_priv, intel_dp->output_reg, DP);
4277         intel_de_posting_read(dev_priv, intel_dp->output_reg);
4278
4279         /*
4280          * HW workaround for IBX, we need to move the port
4281          * to transcoder A after disabling it to allow the
4282          * matching HDMI port to be enabled on transcoder A.
4283          */
4284         if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B && port != PORT_A) {
4285                 /*
4286                  * We get CPU/PCH FIFO underruns on the other pipe when
4287                  * doing the workaround. Sweep them under the rug.
4288                  */
4289                 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
4290                 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
4291
4292                 /* always enable with pattern 1 (as per spec) */
4293                 DP &= ~(DP_PIPE_SEL_MASK | DP_LINK_TRAIN_MASK);
4294                 DP |= DP_PORT_EN | DP_PIPE_SEL(PIPE_A) |
4295                         DP_LINK_TRAIN_PAT_1;
4296                 intel_de_write(dev_priv, intel_dp->output_reg, DP);
4297                 intel_de_posting_read(dev_priv, intel_dp->output_reg);
4298
4299                 DP &= ~DP_PORT_EN;
4300                 intel_de_write(dev_priv, intel_dp->output_reg, DP);
4301                 intel_de_posting_read(dev_priv, intel_dp->output_reg);
4302
4303                 intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
4304                 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
4305                 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
4306         }
4307
4308         msleep(intel_dp->panel_power_down_delay);
4309
4310         intel_dp->DP = DP;
4311
4312         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
4313                 intel_wakeref_t wakeref;
4314
4315                 with_pps_lock(intel_dp, wakeref)
4316                         intel_dp->active_pipe = INVALID_PIPE;
4317         }
4318 }
4319
4320 static void
4321 intel_dp_extended_receiver_capabilities(struct intel_dp *intel_dp)
4322 {
4323         u8 dpcd_ext[6];
4324
4325         /*
4326          * Prior to DP1.3 the bit represented by
4327          * DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT was reserved.
4328          * if it is set DP_DPCD_REV at 0000h could be at a value less than
4329          * the true capability of the panel. The only way to check is to
4330          * then compare 0000h and 2200h.
4331          */
4332         if (!(intel_dp->dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
4333               DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT))
4334                 return;
4335
4336         if (drm_dp_dpcd_read(&intel_dp->aux, DP_DP13_DPCD_REV,
4337                              &dpcd_ext, sizeof(dpcd_ext)) != sizeof(dpcd_ext)) {
4338                 DRM_ERROR("DPCD failed read at extended capabilities\n");
4339                 return;
4340         }
4341
4342         if (intel_dp->dpcd[DP_DPCD_REV] > dpcd_ext[DP_DPCD_REV]) {
4343                 DRM_DEBUG_KMS("DPCD extended DPCD rev less than base DPCD rev\n");
4344                 return;
4345         }
4346
4347         if (!memcmp(intel_dp->dpcd, dpcd_ext, sizeof(dpcd_ext)))
4348                 return;
4349
4350         DRM_DEBUG_KMS("Base DPCD: %*ph\n",
4351                       (int)sizeof(intel_dp->dpcd), intel_dp->dpcd);
4352
4353         memcpy(intel_dp->dpcd, dpcd_ext, sizeof(dpcd_ext));
4354 }
4355
4356 bool
4357 intel_dp_read_dpcd(struct intel_dp *intel_dp)
4358 {
4359         if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd,
4360                              sizeof(intel_dp->dpcd)) < 0)
4361                 return false; /* aux transfer failed */
4362
4363         intel_dp_extended_receiver_capabilities(intel_dp);
4364
4365         DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
4366
4367         return intel_dp->dpcd[DP_DPCD_REV] != 0;
4368 }
4369
4370 bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
4371 {
4372         u8 dprx = 0;
4373
4374         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST,
4375                               &dprx) != 1)
4376                 return false;
4377         return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED;
4378 }
4379
4380 static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp)
4381 {
4382         /*
4383          * Clear the cached register set to avoid using stale values
4384          * for the sinks that do not support DSC.
4385          */
4386         memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd));
4387
4388         /* Clear fec_capable to avoid using stale values */
4389         intel_dp->fec_capable = 0;
4390
4391         /* Cache the DSC DPCD if eDP or DP rev >= 1.4 */
4392         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x14 ||
4393             intel_dp->edp_dpcd[0] >= DP_EDP_14) {
4394                 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DSC_SUPPORT,
4395                                      intel_dp->dsc_dpcd,
4396                                      sizeof(intel_dp->dsc_dpcd)) < 0)
4397                         DRM_ERROR("Failed to read DPCD register 0x%x\n",
4398                                   DP_DSC_SUPPORT);
4399
4400                 DRM_DEBUG_KMS("DSC DPCD: %*ph\n",
4401                               (int)sizeof(intel_dp->dsc_dpcd),
4402                               intel_dp->dsc_dpcd);
4403
4404                 /* FEC is supported only on DP 1.4 */
4405                 if (!intel_dp_is_edp(intel_dp) &&
4406                     drm_dp_dpcd_readb(&intel_dp->aux, DP_FEC_CAPABILITY,
4407                                       &intel_dp->fec_capable) < 0)
4408                         DRM_ERROR("Failed to read FEC DPCD register\n");
4409
4410                 DRM_DEBUG_KMS("FEC CAPABILITY: %x\n", intel_dp->fec_capable);
4411         }
4412 }
4413
4414 static bool
4415 intel_edp_init_dpcd(struct intel_dp *intel_dp)
4416 {
4417         struct drm_i915_private *dev_priv =
4418                 to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
4419
4420         /* this function is meant to be called only once */
4421         drm_WARN_ON(&dev_priv->drm, intel_dp->dpcd[DP_DPCD_REV] != 0);
4422
4423         if (!intel_dp_read_dpcd(intel_dp))
4424                 return false;
4425
4426         drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
4427                          drm_dp_is_branch(intel_dp->dpcd));
4428
4429         /*
4430          * Read the eDP display control registers.
4431          *
4432          * Do this independent of DP_DPCD_DISPLAY_CONTROL_CAPABLE bit in
4433          * DP_EDP_CONFIGURATION_CAP, because some buggy displays do not have it
4434          * set, but require eDP 1.4+ detection (e.g. for supported link rates
4435          * method). The display control registers should read zero if they're
4436          * not supported anyway.
4437          */
4438         if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
4439                              intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
4440                              sizeof(intel_dp->edp_dpcd))
4441                 drm_dbg_kms(&dev_priv->drm, "eDP DPCD: %*ph\n",
4442                             (int)sizeof(intel_dp->edp_dpcd),
4443                             intel_dp->edp_dpcd);
4444
4445         /*
4446          * This has to be called after intel_dp->edp_dpcd is filled, PSR checks
4447          * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1]
4448          */
4449         intel_psr_init_dpcd(intel_dp);
4450
4451         /* Read the eDP 1.4+ supported link rates. */
4452         if (intel_dp->edp_dpcd[0] >= DP_EDP_14) {
4453                 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
4454                 int i;
4455
4456                 drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES,
4457                                 sink_rates, sizeof(sink_rates));
4458
4459                 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
4460                         int val = le16_to_cpu(sink_rates[i]);
4461
4462                         if (val == 0)
4463                                 break;
4464
4465                         /* Value read multiplied by 200kHz gives the per-lane
4466                          * link rate in kHz. The source rates are, however,
4467                          * stored in terms of LS_Clk kHz. The full conversion
4468                          * back to symbols is
4469                          * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte)
4470                          */
4471                         intel_dp->sink_rates[i] = (val * 200) / 10;
4472                 }
4473                 intel_dp->num_sink_rates = i;
4474         }
4475
4476         /*
4477          * Use DP_LINK_RATE_SET if DP_SUPPORTED_LINK_RATES are available,
4478          * default to DP_MAX_LINK_RATE and DP_LINK_BW_SET otherwise.
4479          */
4480         if (intel_dp->num_sink_rates)
4481                 intel_dp->use_rate_select = true;
4482         else
4483                 intel_dp_set_sink_rates(intel_dp);
4484
4485         intel_dp_set_common_rates(intel_dp);
4486
4487         /* Read the eDP DSC DPCD registers */
4488         if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
4489                 intel_dp_get_dsc_sink_cap(intel_dp);
4490
4491         return true;
4492 }
4493
4494
4495 static bool
4496 intel_dp_get_dpcd(struct intel_dp *intel_dp)
4497 {
4498         if (!intel_dp_read_dpcd(intel_dp))
4499                 return false;
4500
4501         /*
4502          * Don't clobber cached eDP rates. Also skip re-reading
4503          * the OUI/ID since we know it won't change.
4504          */
4505         if (!intel_dp_is_edp(intel_dp)) {
4506                 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
4507                                  drm_dp_is_branch(intel_dp->dpcd));
4508
4509                 intel_dp_set_sink_rates(intel_dp);
4510                 intel_dp_set_common_rates(intel_dp);
4511         }
4512
4513         /*
4514          * Some eDP panels do not set a valid value for sink count, that is why
4515          * it don't care about read it here and in intel_edp_init_dpcd().
4516          */
4517         if (!intel_dp_is_edp(intel_dp) &&
4518             !drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_SINK_COUNT)) {
4519                 u8 count;
4520                 ssize_t r;
4521
4522                 r = drm_dp_dpcd_readb(&intel_dp->aux, DP_SINK_COUNT, &count);
4523                 if (r < 1)
4524                         return false;
4525
4526                 /*
4527                  * Sink count can change between short pulse hpd hence
4528                  * a member variable in intel_dp will track any changes
4529                  * between short pulse interrupts.
4530                  */
4531                 intel_dp->sink_count = DP_GET_SINK_COUNT(count);
4532
4533                 /*
4534                  * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that
4535                  * a dongle is present but no display. Unless we require to know
4536                  * if a dongle is present or not, we don't need to update
4537                  * downstream port information. So, an early return here saves
4538                  * time from performing other operations which are not required.
4539                  */
4540                 if (!intel_dp->sink_count)
4541                         return false;
4542         }
4543
4544         if (!drm_dp_is_branch(intel_dp->dpcd))
4545                 return true; /* native DP sink */
4546
4547         if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
4548                 return true; /* no per-port downstream info */
4549
4550         if (drm_dp_dpcd_read(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
4551                              intel_dp->downstream_ports,
4552                              DP_MAX_DOWNSTREAM_PORTS) < 0)
4553                 return false; /* downstream port status fetch failed */
4554
4555         return true;
4556 }
4557
4558 static bool
4559 intel_dp_sink_can_mst(struct intel_dp *intel_dp)
4560 {
4561         u8 mstm_cap;
4562
4563         if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
4564                 return false;
4565
4566         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_MSTM_CAP, &mstm_cap) != 1)
4567                 return false;
4568
4569         return mstm_cap & DP_MST_CAP;
4570 }
4571
4572 static bool
4573 intel_dp_can_mst(struct intel_dp *intel_dp)
4574 {
4575         return i915_modparams.enable_dp_mst &&
4576                 intel_dp->can_mst &&
4577                 intel_dp_sink_can_mst(intel_dp);
4578 }
4579
4580 static void
4581 intel_dp_configure_mst(struct intel_dp *intel_dp)
4582 {
4583         struct intel_encoder *encoder =
4584                 &dp_to_dig_port(intel_dp)->base;
4585         bool sink_can_mst = intel_dp_sink_can_mst(intel_dp);
4586
4587         DRM_DEBUG_KMS("[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s\n",
4588                       encoder->base.base.id, encoder->base.name,
4589                       yesno(intel_dp->can_mst), yesno(sink_can_mst),
4590                       yesno(i915_modparams.enable_dp_mst));
4591
4592         if (!intel_dp->can_mst)
4593                 return;
4594
4595         intel_dp->is_mst = sink_can_mst &&
4596                 i915_modparams.enable_dp_mst;
4597
4598         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
4599                                         intel_dp->is_mst);
4600 }
4601
4602 static bool
4603 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4604 {
4605         return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI,
4606                                 sink_irq_vector, DP_DPRX_ESI_LEN) ==
4607                 DP_DPRX_ESI_LEN;
4608 }
4609
4610 bool
4611 intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state,
4612                        const struct drm_connector_state *conn_state)
4613 {
4614         /*
4615          * As per DP 1.4a spec section 2.2.4.3 [MSA Field for Indication
4616          * of Color Encoding Format and Content Color Gamut], in order to
4617          * sending YCBCR 420 or HDR BT.2020 signals we should use DP VSC SDP.
4618          */
4619         if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
4620                 return true;
4621
4622         switch (conn_state->colorspace) {
4623         case DRM_MODE_COLORIMETRY_SYCC_601:
4624         case DRM_MODE_COLORIMETRY_OPYCC_601:
4625         case DRM_MODE_COLORIMETRY_BT2020_YCC:
4626         case DRM_MODE_COLORIMETRY_BT2020_RGB:
4627         case DRM_MODE_COLORIMETRY_BT2020_CYCC:
4628                 return true;
4629         default:
4630                 break;
4631         }
4632
4633         return false;
4634 }
4635
4636 static void
4637 intel_dp_setup_vsc_sdp(struct intel_dp *intel_dp,
4638                        const struct intel_crtc_state *crtc_state,
4639                        const struct drm_connector_state *conn_state)
4640 {
4641         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4642         struct dp_sdp vsc_sdp = {};
4643
4644         /* Prepare VSC Header for SU as per DP 1.4a spec, Table 2-119 */
4645         vsc_sdp.sdp_header.HB0 = 0;
4646         vsc_sdp.sdp_header.HB1 = 0x7;
4647
4648         /*
4649          * VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/
4650          * Colorimetry Format indication.
4651          */
4652         vsc_sdp.sdp_header.HB2 = 0x5;
4653
4654         /*
4655          * VSC SDP supporting 3D stereo, + PSR2, + Pixel Encoding/
4656          * Colorimetry Format indication (HB2 = 05h).
4657          */
4658         vsc_sdp.sdp_header.HB3 = 0x13;
4659
4660         /* DP 1.4a spec, Table 2-120 */
4661         switch (crtc_state->output_format) {
4662         case INTEL_OUTPUT_FORMAT_YCBCR444:
4663                 vsc_sdp.db[16] = 0x1 << 4; /* YCbCr 444 : DB16[7:4] = 1h */
4664                 break;
4665         case INTEL_OUTPUT_FORMAT_YCBCR420:
4666                 vsc_sdp.db[16] = 0x3 << 4; /* YCbCr 420 : DB16[7:4] = 3h */
4667                 break;
4668         case INTEL_OUTPUT_FORMAT_RGB:
4669         default:
4670                 /* RGB: DB16[7:4] = 0h */
4671                 break;
4672         }
4673
4674         switch (conn_state->colorspace) {
4675         case DRM_MODE_COLORIMETRY_BT709_YCC:
4676                 vsc_sdp.db[16] |= 0x1;
4677                 break;
4678         case DRM_MODE_COLORIMETRY_XVYCC_601:
4679                 vsc_sdp.db[16] |= 0x2;
4680                 break;
4681         case DRM_MODE_COLORIMETRY_XVYCC_709:
4682                 vsc_sdp.db[16] |= 0x3;
4683                 break;
4684         case DRM_MODE_COLORIMETRY_SYCC_601:
4685                 vsc_sdp.db[16] |= 0x4;
4686                 break;
4687         case DRM_MODE_COLORIMETRY_OPYCC_601:
4688                 vsc_sdp.db[16] |= 0x5;
4689                 break;
4690         case DRM_MODE_COLORIMETRY_BT2020_CYCC:
4691         case DRM_MODE_COLORIMETRY_BT2020_RGB:
4692                 vsc_sdp.db[16] |= 0x6;
4693                 break;
4694         case DRM_MODE_COLORIMETRY_BT2020_YCC:
4695                 vsc_sdp.db[16] |= 0x7;
4696                 break;
4697         case DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65:
4698         case DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER:
4699                 vsc_sdp.db[16] |= 0x4; /* DCI-P3 (SMPTE RP 431-2) */
4700                 break;
4701         default:
4702                 /* sRGB (IEC 61966-2-1) / ITU-R BT.601: DB16[0:3] = 0h */
4703
4704                 /* RGB->YCBCR color conversion uses the BT.709 color space. */
4705                 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
4706                         vsc_sdp.db[16] |= 0x1; /* 0x1, ITU-R BT.709 */
4707                 break;
4708         }
4709
4710         /*
4711          * For pixel encoding formats YCbCr444, YCbCr422, YCbCr420, and Y Only,
4712          * the following Component Bit Depth values are defined:
4713          * 001b = 8bpc.
4714          * 010b = 10bpc.
4715          * 011b = 12bpc.
4716          * 100b = 16bpc.
4717          */
4718         switch (crtc_state->pipe_bpp) {
4719         case 24: /* 8bpc */
4720                 vsc_sdp.db[17] = 0x1;
4721                 break;
4722         case 30: /* 10bpc */
4723                 vsc_sdp.db[17] = 0x2;
4724                 break;
4725         case 36: /* 12bpc */
4726                 vsc_sdp.db[17] = 0x3;
4727                 break;
4728         case 48: /* 16bpc */
4729                 vsc_sdp.db[17] = 0x4;
4730                 break;
4731         default:
4732                 MISSING_CASE(crtc_state->pipe_bpp);
4733                 break;
4734         }
4735
4736         /*
4737          * Dynamic Range (Bit 7)
4738          * 0 = VESA range, 1 = CTA range.
4739          * all YCbCr are always limited range
4740          */
4741         vsc_sdp.db[17] |= 0x80;
4742
4743         /*
4744          * Content Type (Bits 2:0)
4745          * 000b = Not defined.
4746          * 001b = Graphics.
4747          * 010b = Photo.
4748          * 011b = Video.
4749          * 100b = Game
4750          * All other values are RESERVED.
4751          * Note: See CTA-861-G for the definition and expected
4752          * processing by a stream sink for the above contect types.
4753          */
4754         vsc_sdp.db[18] = 0;
4755
4756         intel_dig_port->write_infoframe(&intel_dig_port->base,
4757                         crtc_state, DP_SDP_VSC, &vsc_sdp, sizeof(vsc_sdp));
4758 }
4759
4760 static void
4761 intel_dp_setup_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp,
4762                                           const struct intel_crtc_state *crtc_state,
4763                                           const struct drm_connector_state *conn_state)
4764 {
4765         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4766         struct dp_sdp infoframe_sdp = {};
4767         struct hdmi_drm_infoframe drm_infoframe = {};
4768         const int infoframe_size = HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE;
4769         unsigned char buf[HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE];
4770         ssize_t len;
4771         int ret;
4772
4773         ret = drm_hdmi_infoframe_set_hdr_metadata(&drm_infoframe, conn_state);
4774         if (ret) {
4775                 DRM_DEBUG_KMS("couldn't set HDR metadata in infoframe\n");
4776                 return;
4777         }
4778
4779         len = hdmi_drm_infoframe_pack_only(&drm_infoframe, buf, sizeof(buf));
4780         if (len < 0) {
4781                 DRM_DEBUG_KMS("buffer size is smaller than hdr metadata infoframe\n");
4782                 return;
4783         }
4784
4785         if (len != infoframe_size) {
4786                 DRM_DEBUG_KMS("wrong static hdr metadata size\n");
4787                 return;
4788         }
4789
4790         /*
4791          * Set up the infoframe sdp packet for HDR static metadata.
4792          * Prepare VSC Header for SU as per DP 1.4a spec,
4793          * Table 2-100 and Table 2-101
4794          */
4795
4796         /* Packet ID, 00h for non-Audio INFOFRAME */
4797         infoframe_sdp.sdp_header.HB0 = 0;
4798         /*
4799          * Packet Type 80h + Non-audio INFOFRAME Type value
4800          * HDMI_INFOFRAME_TYPE_DRM: 0x87,
4801          */
4802         infoframe_sdp.sdp_header.HB1 = drm_infoframe.type;
4803         /*
4804          * Least Significant Eight Bits of (Data Byte Count – 1)
4805          * infoframe_size - 1,
4806          */
4807         infoframe_sdp.sdp_header.HB2 = 0x1D;
4808         /* INFOFRAME SDP Version Number */
4809         infoframe_sdp.sdp_header.HB3 = (0x13 << 2);
4810         /* CTA Header Byte 2 (INFOFRAME Version Number) */
4811         infoframe_sdp.db[0] = drm_infoframe.version;
4812         /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */
4813         infoframe_sdp.db[1] = drm_infoframe.length;
4814         /*
4815          * Copy HDMI_DRM_INFOFRAME_SIZE size from a buffer after
4816          * HDMI_INFOFRAME_HEADER_SIZE
4817          */
4818         BUILD_BUG_ON(sizeof(infoframe_sdp.db) < HDMI_DRM_INFOFRAME_SIZE + 2);
4819         memcpy(&infoframe_sdp.db[2], &buf[HDMI_INFOFRAME_HEADER_SIZE],
4820                HDMI_DRM_INFOFRAME_SIZE);
4821
4822         /*
4823          * Size of DP infoframe sdp packet for HDR static metadata is consist of
4824          * - DP SDP Header(struct dp_sdp_header): 4 bytes
4825          * - Two Data Blocks: 2 bytes
4826          *    CTA Header Byte2 (INFOFRAME Version Number)
4827          *    CTA Header Byte3 (Length of INFOFRAME)
4828          * - HDMI_DRM_INFOFRAME_SIZE: 26 bytes
4829          *
4830          * Prior to GEN11's GMP register size is identical to DP HDR static metadata
4831          * infoframe size. But GEN11+ has larger than that size, write_infoframe
4832          * will pad rest of the size.
4833          */
4834         intel_dig_port->write_infoframe(&intel_dig_port->base, crtc_state,
4835                                         HDMI_PACKET_TYPE_GAMUT_METADATA,
4836                                         &infoframe_sdp,
4837                                         sizeof(struct dp_sdp_header) + 2 + HDMI_DRM_INFOFRAME_SIZE);
4838 }
4839
4840 void intel_dp_vsc_enable(struct intel_dp *intel_dp,
4841                          const struct intel_crtc_state *crtc_state,
4842                          const struct drm_connector_state *conn_state)
4843 {
4844         if (!intel_dp_needs_vsc_sdp(crtc_state, conn_state))
4845                 return;
4846
4847         intel_dp_setup_vsc_sdp(intel_dp, crtc_state, conn_state);
4848 }
4849
4850 void intel_dp_hdr_metadata_enable(struct intel_dp *intel_dp,
4851                                   const struct intel_crtc_state *crtc_state,
4852                                   const struct drm_connector_state *conn_state)
4853 {
4854         if (!conn_state->hdr_output_metadata)
4855                 return;
4856
4857         intel_dp_setup_hdr_metadata_infoframe_sdp(intel_dp,
4858                                                   crtc_state,
4859                                                   conn_state);
4860 }
4861
4862 static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4863 {
4864         int status = 0;
4865         int test_link_rate;
4866         u8 test_lane_count, test_link_bw;
4867         /* (DP CTS 1.2)
4868          * 4.3.1.11
4869          */
4870         /* Read the TEST_LANE_COUNT and TEST_LINK_RTAE fields (DP CTS 3.1.4) */
4871         status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LANE_COUNT,
4872                                    &test_lane_count);
4873
4874         if (status <= 0) {
4875                 DRM_DEBUG_KMS("Lane count read failed\n");
4876                 return DP_TEST_NAK;
4877         }
4878         test_lane_count &= DP_MAX_LANE_COUNT_MASK;
4879
4880         status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE,
4881                                    &test_link_bw);
4882         if (status <= 0) {
4883                 DRM_DEBUG_KMS("Link Rate read failed\n");
4884                 return DP_TEST_NAK;
4885         }
4886         test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw);
4887
4888         /* Validate the requested link rate and lane count */
4889         if (!intel_dp_link_params_valid(intel_dp, test_link_rate,
4890                                         test_lane_count))
4891                 return DP_TEST_NAK;
4892
4893         intel_dp->compliance.test_lane_count = test_lane_count;
4894         intel_dp->compliance.test_link_rate = test_link_rate;
4895
4896         return DP_TEST_ACK;
4897 }
4898
4899 static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4900 {
4901         u8 test_pattern;
4902         u8 test_misc;
4903         __be16 h_width, v_height;
4904         int status = 0;
4905
4906         /* Read the TEST_PATTERN (DP CTS 3.1.5) */
4907         status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_PATTERN,
4908                                    &test_pattern);
4909         if (status <= 0) {
4910                 DRM_DEBUG_KMS("Test pattern read failed\n");
4911                 return DP_TEST_NAK;
4912         }
4913         if (test_pattern != DP_COLOR_RAMP)
4914                 return DP_TEST_NAK;
4915
4916         status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_H_WIDTH_HI,
4917                                   &h_width, 2);
4918         if (status <= 0) {
4919                 DRM_DEBUG_KMS("H Width read failed\n");
4920                 return DP_TEST_NAK;
4921         }
4922
4923         status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_V_HEIGHT_HI,
4924                                   &v_height, 2);
4925         if (status <= 0) {
4926                 DRM_DEBUG_KMS("V Height read failed\n");
4927                 return DP_TEST_NAK;
4928         }
4929
4930         status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_MISC0,
4931                                    &test_misc);
4932         if (status <= 0) {
4933                 DRM_DEBUG_KMS("TEST MISC read failed\n");
4934                 return DP_TEST_NAK;
4935         }
4936         if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB)
4937                 return DP_TEST_NAK;
4938         if (test_misc & DP_TEST_DYNAMIC_RANGE_CEA)
4939                 return DP_TEST_NAK;
4940         switch (test_misc & DP_TEST_BIT_DEPTH_MASK) {
4941         case DP_TEST_BIT_DEPTH_6:
4942                 intel_dp->compliance.test_data.bpc = 6;
4943                 break;
4944         case DP_TEST_BIT_DEPTH_8:
4945                 intel_dp->compliance.test_data.bpc = 8;
4946                 break;
4947         default:
4948                 return DP_TEST_NAK;
4949         }
4950
4951         intel_dp->compliance.test_data.video_pattern = test_pattern;
4952         intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width);
4953         intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height);
4954         /* Set test active flag here so userspace doesn't interrupt things */
4955         intel_dp->compliance.test_active = true;
4956
4957         return DP_TEST_ACK;
4958 }
4959
4960 static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp)
4961 {
4962         u8 test_result = DP_TEST_ACK;
4963         struct intel_connector *intel_connector = intel_dp->attached_connector;
4964         struct drm_connector *connector = &intel_connector->base;
4965
4966         if (intel_connector->detect_edid == NULL ||
4967             connector->edid_corrupt ||
4968             intel_dp->aux.i2c_defer_count > 6) {
4969                 /* Check EDID read for NACKs, DEFERs and corruption
4970                  * (DP CTS 1.2 Core r1.1)
4971                  *    4.2.2.4 : Failed EDID read, I2C_NAK
4972                  *    4.2.2.5 : Failed EDID read, I2C_DEFER
4973                  *    4.2.2.6 : EDID corruption detected
4974                  * Use failsafe mode for all cases
4975                  */
4976                 if (intel_dp->aux.i2c_nack_count > 0 ||
4977                         intel_dp->aux.i2c_defer_count > 0)
4978                         DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4979                                       intel_dp->aux.i2c_nack_count,
4980                                       intel_dp->aux.i2c_defer_count);
4981                 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE;
4982         } else {
4983                 struct edid *block = intel_connector->detect_edid;
4984
4985                 /* We have to write the checksum
4986                  * of the last block read
4987                  */
4988                 block += intel_connector->detect_edid->extensions;
4989
4990                 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_EDID_CHECKSUM,
4991                                        block->checksum) <= 0)
4992                         DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4993
4994                 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4995                 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED;
4996         }
4997
4998         /* Set test active flag here so userspace doesn't interrupt things */
4999         intel_dp->compliance.test_active = true;
5000
5001         return test_result;
5002 }
5003
5004 static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
5005 {
5006         u8 test_result = DP_TEST_NAK;
5007         return test_result;
5008 }
5009
5010 static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
5011 {
5012         u8 response = DP_TEST_NAK;
5013         u8 request = 0;
5014         int status;
5015
5016         status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request);
5017         if (status <= 0) {
5018                 DRM_DEBUG_KMS("Could not read test request from sink\n");
5019                 goto update_status;
5020         }
5021
5022         switch (request) {
5023         case DP_TEST_LINK_TRAINING:
5024                 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
5025                 response = intel_dp_autotest_link_training(intel_dp);
5026                 break;
5027         case DP_TEST_LINK_VIDEO_PATTERN:
5028                 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
5029                 response = intel_dp_autotest_video_pattern(intel_dp);
5030                 break;
5031         case DP_TEST_LINK_EDID_READ:
5032                 DRM_DEBUG_KMS("EDID test requested\n");
5033                 response = intel_dp_autotest_edid(intel_dp);
5034                 break;
5035         case DP_TEST_LINK_PHY_TEST_PATTERN:
5036                 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
5037                 response = intel_dp_autotest_phy_pattern(intel_dp);
5038                 break;
5039         default:
5040                 DRM_DEBUG_KMS("Invalid test request '%02x'\n", request);
5041                 break;
5042         }
5043
5044         if (response & DP_TEST_ACK)
5045                 intel_dp->compliance.test_type = request;
5046
5047 update_status:
5048         status = drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, response);
5049         if (status <= 0)
5050                 DRM_DEBUG_KMS("Could not write test response to sink\n");
5051 }
5052
5053 static int
5054 intel_dp_check_mst_status(struct intel_dp *intel_dp)
5055 {
5056         bool bret;
5057
5058         if (intel_dp->is_mst) {
5059                 u8 esi[DP_DPRX_ESI_LEN] = { 0 };
5060                 int ret = 0;
5061                 int retry;
5062                 bool handled;
5063
5064                 WARN_ON_ONCE(intel_dp->active_mst_links < 0);
5065                 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
5066 go_again:
5067                 if (bret == true) {
5068
5069                         /* check link status - esi[10] = 0x200c */
5070                         if (intel_dp->active_mst_links > 0 &&
5071                             !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
5072                                 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
5073                                 intel_dp_start_link_train(intel_dp);
5074                                 intel_dp_stop_link_train(intel_dp);
5075                         }
5076
5077                         DRM_DEBUG_KMS("got esi %3ph\n", esi);
5078                         ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
5079
5080                         if (handled) {
5081                                 for (retry = 0; retry < 3; retry++) {
5082                                         int wret;
5083                                         wret = drm_dp_dpcd_write(&intel_dp->aux,
5084                                                                  DP_SINK_COUNT_ESI+1,
5085                                                                  &esi[1], 3);
5086                                         if (wret == 3) {
5087                                                 break;
5088                                         }
5089                                 }
5090
5091                                 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
5092                                 if (bret == true) {
5093                                         DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
5094                                         goto go_again;
5095                                 }
5096                         } else
5097                                 ret = 0;
5098
5099                         return ret;
5100                 } else {
5101                         DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
5102                         intel_dp->is_mst = false;
5103                         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
5104                                                         intel_dp->is_mst);
5105                 }
5106         }
5107         return -EINVAL;
5108 }
5109
5110 static bool
5111 intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
5112 {
5113         u8 link_status[DP_LINK_STATUS_SIZE];
5114
5115         if (!intel_dp->link_trained)
5116                 return false;
5117
5118         /*
5119          * While PSR source HW is enabled, it will control main-link sending
5120          * frames, enabling and disabling it so trying to do a retrain will fail
5121          * as the link would or not be on or it could mix training patterns
5122          * and frame data at the same time causing retrain to fail.
5123          * Also when exiting PSR, HW will retrain the link anyways fixing
5124          * any link status error.
5125          */
5126         if (intel_psr_enabled(intel_dp))
5127                 return false;
5128
5129         if (!intel_dp_get_link_status(intel_dp, link_status))
5130                 return false;
5131
5132         /*
5133          * Validate the cached values of intel_dp->link_rate and
5134          * intel_dp->lane_count before attempting to retrain.
5135          */
5136         if (!intel_dp_link_params_valid(intel_dp, intel_dp->link_rate,
5137                                         intel_dp->lane_count))
5138                 return false;
5139
5140         /* Retrain if Channel EQ or CR not ok */
5141         return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count);
5142 }
5143
5144 int intel_dp_retrain_link(struct intel_encoder *encoder,
5145                           struct drm_modeset_acquire_ctx *ctx)
5146 {
5147         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5148         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
5149         struct intel_connector *connector = intel_dp->attached_connector;
5150         struct drm_connector_state *conn_state;
5151         struct intel_crtc_state *crtc_state;
5152         struct intel_crtc *crtc;
5153         int ret;
5154
5155         /* FIXME handle the MST connectors as well */
5156
5157         if (!connector || connector->base.status != connector_status_connected)
5158                 return 0;
5159
5160         ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
5161                                ctx);
5162         if (ret)
5163                 return ret;
5164
5165         conn_state = connector->base.state;
5166
5167         crtc = to_intel_crtc(conn_state->crtc);
5168         if (!crtc)
5169                 return 0;
5170
5171         ret = drm_modeset_lock(&crtc->base.mutex, ctx);
5172         if (ret)
5173                 return ret;
5174
5175         crtc_state = to_intel_crtc_state(crtc->base.state);
5176
5177         drm_WARN_ON(&dev_priv->drm, !intel_crtc_has_dp_encoder(crtc_state));
5178
5179         if (!crtc_state->hw.active)
5180                 return 0;
5181
5182         if (conn_state->commit &&
5183             !try_wait_for_completion(&conn_state->commit->hw_done))
5184                 return 0;
5185
5186         if (!intel_dp_needs_link_retrain(intel_dp))
5187                 return 0;
5188
5189         /* Suppress underruns caused by re-training */
5190         intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
5191         if (crtc_state->has_pch_encoder)
5192                 intel_set_pch_fifo_underrun_reporting(dev_priv,
5193                                                       intel_crtc_pch_transcoder(crtc), false);
5194
5195         intel_dp_start_link_train(intel_dp);
5196         intel_dp_stop_link_train(intel_dp);
5197
5198         /* Keep underrun reporting disabled until things are stable */
5199         intel_wait_for_vblank(dev_priv, crtc->pipe);
5200
5201         intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
5202         if (crtc_state->has_pch_encoder)
5203                 intel_set_pch_fifo_underrun_reporting(dev_priv,
5204                                                       intel_crtc_pch_transcoder(crtc), true);
5205
5206         return 0;
5207 }
5208
5209 /*
5210  * If display is now connected check links status,
5211  * there has been known issues of link loss triggering
5212  * long pulse.
5213  *
5214  * Some sinks (eg. ASUS PB287Q) seem to perform some
5215  * weird HPD ping pong during modesets. So we can apparently
5216  * end up with HPD going low during a modeset, and then
5217  * going back up soon after. And once that happens we must
5218  * retrain the link to get a picture. That's in case no
5219  * userspace component reacted to intermittent HPD dip.
5220  */
5221 static enum intel_hotplug_state
5222 intel_dp_hotplug(struct intel_encoder *encoder,
5223                  struct intel_connector *connector,
5224                  bool irq_received)
5225 {
5226         struct drm_modeset_acquire_ctx ctx;
5227         enum intel_hotplug_state state;
5228         int ret;
5229
5230         state = intel_encoder_hotplug(encoder, connector, irq_received);
5231
5232         drm_modeset_acquire_init(&ctx, 0);
5233
5234         for (;;) {
5235                 ret = intel_dp_retrain_link(encoder, &ctx);
5236
5237                 if (ret == -EDEADLK) {
5238                         drm_modeset_backoff(&ctx);
5239                         continue;
5240                 }
5241
5242                 break;
5243         }
5244
5245         drm_modeset_drop_locks(&ctx);
5246         drm_modeset_acquire_fini(&ctx);
5247         drm_WARN(encoder->base.dev, ret,
5248                  "Acquiring modeset locks failed with %i\n", ret);
5249
5250         /*
5251          * Keeping it consistent with intel_ddi_hotplug() and
5252          * intel_hdmi_hotplug().
5253          */
5254         if (state == INTEL_HOTPLUG_UNCHANGED && irq_received)
5255                 state = INTEL_HOTPLUG_RETRY;
5256
5257         return state;
5258 }
5259
5260 static void intel_dp_check_service_irq(struct intel_dp *intel_dp)
5261 {
5262         u8 val;
5263
5264         if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
5265                 return;
5266
5267         if (drm_dp_dpcd_readb(&intel_dp->aux,
5268                               DP_DEVICE_SERVICE_IRQ_VECTOR, &val) != 1 || !val)
5269                 return;
5270
5271         drm_dp_dpcd_writeb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, val);
5272
5273         if (val & DP_AUTOMATED_TEST_REQUEST)
5274                 intel_dp_handle_test_request(intel_dp);
5275
5276         if (val & DP_CP_IRQ)
5277                 intel_hdcp_handle_cp_irq(intel_dp->attached_connector);
5278
5279         if (val & DP_SINK_SPECIFIC_IRQ)
5280                 DRM_DEBUG_DRIVER("Sink specific irq unhandled\n");
5281 }
5282
5283 /*
5284  * According to DP spec
5285  * 5.1.2:
5286  *  1. Read DPCD
5287  *  2. Configure link according to Receiver Capabilities
5288  *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
5289  *  4. Check link status on receipt of hot-plug interrupt
5290  *
5291  * intel_dp_short_pulse -  handles short pulse interrupts
5292  * when full detection is not required.
5293  * Returns %true if short pulse is handled and full detection
5294  * is NOT required and %false otherwise.
5295  */
5296 static bool
5297 intel_dp_short_pulse(struct intel_dp *intel_dp)
5298 {
5299         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
5300         u8 old_sink_count = intel_dp->sink_count;
5301         bool ret;
5302
5303         /*
5304          * Clearing compliance test variables to allow capturing
5305          * of values for next automated test request.
5306          */
5307         memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
5308
5309         /*
5310          * Now read the DPCD to see if it's actually running
5311          * If the current value of sink count doesn't match with
5312          * the value that was stored earlier or dpcd read failed
5313          * we need to do full detection
5314          */
5315         ret = intel_dp_get_dpcd(intel_dp);
5316
5317         if ((old_sink_count != intel_dp->sink_count) || !ret) {
5318                 /* No need to proceed if we are going to do full detect */
5319                 return false;
5320         }
5321
5322         intel_dp_check_service_irq(intel_dp);
5323
5324         /* Handle CEC interrupts, if any */
5325         drm_dp_cec_irq(&intel_dp->aux);
5326
5327         /* defer to the hotplug work for link retraining if needed */
5328         if (intel_dp_needs_link_retrain(intel_dp))
5329                 return false;
5330
5331         intel_psr_short_pulse(intel_dp);
5332
5333         if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
5334                 drm_dbg_kms(&dev_priv->drm,
5335                             "Link Training Compliance Test requested\n");
5336                 /* Send a Hotplug Uevent to userspace to start modeset */
5337                 drm_kms_helper_hotplug_event(&dev_priv->drm);
5338         }
5339
5340         return true;
5341 }
5342
5343 /* XXX this is probably wrong for multiple downstream ports */
5344 static enum drm_connector_status
5345 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
5346 {
5347         struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
5348         u8 *dpcd = intel_dp->dpcd;
5349         u8 type;
5350
5351         if (WARN_ON(intel_dp_is_edp(intel_dp)))
5352                 return connector_status_connected;
5353
5354         if (lspcon->active)
5355                 lspcon_resume(lspcon);
5356
5357         if (!intel_dp_get_dpcd(intel_dp))
5358                 return connector_status_disconnected;
5359
5360         /* if there's no downstream port, we're done */
5361         if (!drm_dp_is_branch(dpcd))
5362                 return connector_status_connected;
5363
5364         /* If we're HPD-aware, SINK_COUNT changes dynamically */
5365         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
5366             intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
5367
5368                 return intel_dp->sink_count ?
5369                 connector_status_connected : connector_status_disconnected;
5370         }
5371
5372         if (intel_dp_can_mst(intel_dp))
5373                 return connector_status_connected;
5374
5375         /* If no HPD, poke DDC gently */
5376         if (drm_probe_ddc(&intel_dp->aux.ddc))
5377                 return connector_status_connected;
5378
5379         /* Well we tried, say unknown for unreliable port types */
5380         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
5381                 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
5382                 if (type == DP_DS_PORT_TYPE_VGA ||
5383                     type == DP_DS_PORT_TYPE_NON_EDID)
5384                         return connector_status_unknown;
5385         } else {
5386                 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
5387                         DP_DWN_STRM_PORT_TYPE_MASK;
5388                 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
5389                     type == DP_DWN_STRM_PORT_TYPE_OTHER)
5390                         return connector_status_unknown;
5391         }
5392
5393         /* Anything else is out of spec, warn and ignore */
5394         DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
5395         return connector_status_disconnected;
5396 }
5397
5398 static enum drm_connector_status
5399 edp_detect(struct intel_dp *intel_dp)
5400 {
5401         return connector_status_connected;
5402 }
5403
5404 static bool ibx_digital_port_connected(struct intel_encoder *encoder)
5405 {
5406         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5407         u32 bit;
5408
5409         switch (encoder->hpd_pin) {
5410         case HPD_PORT_B:
5411                 bit = SDE_PORTB_HOTPLUG;
5412                 break;
5413         case HPD_PORT_C:
5414                 bit = SDE_PORTC_HOTPLUG;
5415                 break;
5416         case HPD_PORT_D:
5417                 bit = SDE_PORTD_HOTPLUG;
5418                 break;
5419         default:
5420                 MISSING_CASE(encoder->hpd_pin);
5421                 return false;
5422         }
5423
5424         return intel_de_read(dev_priv, SDEISR) & bit;
5425 }
5426
5427 static bool cpt_digital_port_connected(struct intel_encoder *encoder)
5428 {
5429         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5430         u32 bit;
5431
5432         switch (encoder->hpd_pin) {
5433         case HPD_PORT_B:
5434                 bit = SDE_PORTB_HOTPLUG_CPT;
5435                 break;
5436         case HPD_PORT_C:
5437                 bit = SDE_PORTC_HOTPLUG_CPT;
5438                 break;
5439         case HPD_PORT_D:
5440                 bit = SDE_PORTD_HOTPLUG_CPT;
5441                 break;
5442         default:
5443                 MISSING_CASE(encoder->hpd_pin);
5444                 return false;
5445         }
5446
5447         return intel_de_read(dev_priv, SDEISR) & bit;
5448 }
5449
5450 static bool spt_digital_port_connected(struct intel_encoder *encoder)
5451 {
5452         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5453         u32 bit;
5454
5455         switch (encoder->hpd_pin) {
5456         case HPD_PORT_A:
5457                 bit = SDE_PORTA_HOTPLUG_SPT;
5458                 break;
5459         case HPD_PORT_E:
5460                 bit = SDE_PORTE_HOTPLUG_SPT;
5461                 break;
5462         default:
5463                 return cpt_digital_port_connected(encoder);
5464         }
5465
5466         return intel_de_read(dev_priv, SDEISR) & bit;
5467 }
5468
5469 static bool g4x_digital_port_connected(struct intel_encoder *encoder)
5470 {
5471         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5472         u32 bit;
5473
5474         switch (encoder->hpd_pin) {
5475         case HPD_PORT_B:
5476                 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
5477                 break;
5478         case HPD_PORT_C:
5479                 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
5480                 break;
5481         case HPD_PORT_D:
5482                 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
5483                 break;
5484         default:
5485                 MISSING_CASE(encoder->hpd_pin);
5486                 return false;
5487         }
5488
5489         return intel_de_read(dev_priv, PORT_HOTPLUG_STAT) & bit;
5490 }
5491
5492 static bool gm45_digital_port_connected(struct intel_encoder *encoder)
5493 {
5494         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5495         u32 bit;
5496
5497         switch (encoder->hpd_pin) {
5498         case HPD_PORT_B:
5499                 bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
5500                 break;
5501         case HPD_PORT_C:
5502                 bit = PORTC_HOTPLUG_LIVE_STATUS_GM45;
5503                 break;
5504         case HPD_PORT_D:
5505                 bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
5506                 break;
5507         default:
5508                 MISSING_CASE(encoder->hpd_pin);
5509                 return false;
5510         }
5511
5512         return intel_de_read(dev_priv, PORT_HOTPLUG_STAT) & bit;
5513 }
5514
5515 static bool ilk_digital_port_connected(struct intel_encoder *encoder)
5516 {
5517         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5518
5519         if (encoder->hpd_pin == HPD_PORT_A)
5520                 return intel_de_read(dev_priv, DEISR) & DE_DP_A_HOTPLUG;
5521         else
5522                 return ibx_digital_port_connected(encoder);
5523 }
5524
5525 static bool snb_digital_port_connected(struct intel_encoder *encoder)
5526 {
5527         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5528
5529         if (encoder->hpd_pin == HPD_PORT_A)
5530                 return intel_de_read(dev_priv, DEISR) & DE_DP_A_HOTPLUG;
5531         else
5532                 return cpt_digital_port_connected(encoder);
5533 }
5534
5535 static bool ivb_digital_port_connected(struct intel_encoder *encoder)
5536 {
5537         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5538
5539         if (encoder->hpd_pin == HPD_PORT_A)
5540                 return intel_de_read(dev_priv, DEISR) & DE_DP_A_HOTPLUG_IVB;
5541         else
5542                 return cpt_digital_port_connected(encoder);
5543 }
5544
5545 static bool bdw_digital_port_connected(struct intel_encoder *encoder)
5546 {
5547         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5548
5549         if (encoder->hpd_pin == HPD_PORT_A)
5550                 return intel_de_read(dev_priv, GEN8_DE_PORT_ISR) & GEN8_PORT_DP_A_HOTPLUG;
5551         else
5552                 return cpt_digital_port_connected(encoder);
5553 }
5554
5555 static bool bxt_digital_port_connected(struct intel_encoder *encoder)
5556 {
5557         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5558         u32 bit;
5559
5560         switch (encoder->hpd_pin) {
5561         case HPD_PORT_A:
5562                 bit = BXT_DE_PORT_HP_DDIA;
5563                 break;
5564         case HPD_PORT_B:
5565                 bit = BXT_DE_PORT_HP_DDIB;
5566                 break;
5567         case HPD_PORT_C:
5568                 bit = BXT_DE_PORT_HP_DDIC;
5569                 break;
5570         default:
5571                 MISSING_CASE(encoder->hpd_pin);
5572                 return false;
5573         }
5574
5575         return intel_de_read(dev_priv, GEN8_DE_PORT_ISR) & bit;
5576 }
5577
5578 static bool intel_combo_phy_connected(struct drm_i915_private *dev_priv,
5579                                       enum phy phy)
5580 {
5581         if (HAS_PCH_MCC(dev_priv) && phy == PHY_C)
5582                 return intel_de_read(dev_priv, SDEISR) & SDE_TC_HOTPLUG_ICP(PORT_TC1);
5583
5584         return intel_de_read(dev_priv, SDEISR) & SDE_DDI_HOTPLUG_ICP(phy);
5585 }
5586
5587 static bool icp_digital_port_connected(struct intel_encoder *encoder)
5588 {
5589         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5590         struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
5591         enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
5592
5593         if (intel_phy_is_combo(dev_priv, phy))
5594                 return intel_combo_phy_connected(dev_priv, phy);
5595         else if (intel_phy_is_tc(dev_priv, phy))
5596                 return intel_tc_port_connected(dig_port);
5597         else
5598                 MISSING_CASE(encoder->hpd_pin);
5599
5600         return false;
5601 }
5602
5603 /*
5604  * intel_digital_port_connected - is the specified port connected?
5605  * @encoder: intel_encoder
5606  *
5607  * In cases where there's a connector physically connected but it can't be used
5608  * by our hardware we also return false, since the rest of the driver should
5609  * pretty much treat the port as disconnected. This is relevant for type-C
5610  * (starting on ICL) where there's ownership involved.
5611  *
5612  * Return %true if port is connected, %false otherwise.
5613  */
5614 static bool __intel_digital_port_connected(struct intel_encoder *encoder)
5615 {
5616         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5617
5618         if (HAS_GMCH(dev_priv)) {
5619                 if (IS_GM45(dev_priv))
5620                         return gm45_digital_port_connected(encoder);
5621                 else
5622                         return g4x_digital_port_connected(encoder);
5623         }
5624
5625         if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
5626                 return icp_digital_port_connected(encoder);
5627         else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
5628                 return spt_digital_port_connected(encoder);
5629         else if (IS_GEN9_LP(dev_priv))
5630                 return bxt_digital_port_connected(encoder);
5631         else if (IS_GEN(dev_priv, 8))
5632                 return bdw_digital_port_connected(encoder);
5633         else if (IS_GEN(dev_priv, 7))
5634                 return ivb_digital_port_connected(encoder);
5635         else if (IS_GEN(dev_priv, 6))
5636                 return snb_digital_port_connected(encoder);
5637         else if (IS_GEN(dev_priv, 5))
5638                 return ilk_digital_port_connected(encoder);
5639
5640         MISSING_CASE(INTEL_GEN(dev_priv));
5641         return false;
5642 }
5643
5644 bool intel_digital_port_connected(struct intel_encoder *encoder)
5645 {
5646         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5647         bool is_connected = false;
5648         intel_wakeref_t wakeref;
5649
5650         with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref)
5651                 is_connected = __intel_digital_port_connected(encoder);
5652
5653         return is_connected;
5654 }
5655
5656 static struct edid *
5657 intel_dp_get_edid(struct intel_dp *intel_dp)
5658 {
5659         struct intel_connector *intel_connector = intel_dp->attached_connector;
5660
5661         /* use cached edid if we have one */
5662         if (intel_connector->edid) {
5663                 /* invalid edid */
5664                 if (IS_ERR(intel_connector->edid))
5665                         return NULL;
5666
5667                 return drm_edid_duplicate(intel_connector->edid);
5668         } else
5669                 return drm_get_edid(&intel_connector->base,
5670                                     &intel_dp->aux.ddc);
5671 }
5672
5673 static void
5674 intel_dp_set_edid(struct intel_dp *intel_dp)
5675 {
5676         struct intel_connector *intel_connector = intel_dp->attached_connector;
5677         struct edid *edid;
5678
5679         intel_dp_unset_edid(intel_dp);
5680         edid = intel_dp_get_edid(intel_dp);
5681         intel_connector->detect_edid = edid;
5682
5683         intel_dp->has_audio = drm_detect_monitor_audio(edid);
5684         drm_dp_cec_set_edid(&intel_dp->aux, edid);
5685 }
5686
5687 static void
5688 intel_dp_unset_edid(struct intel_dp *intel_dp)
5689 {
5690         struct intel_connector *intel_connector = intel_dp->attached_connector;
5691
5692         drm_dp_cec_unset_edid(&intel_dp->aux);
5693         kfree(intel_connector->detect_edid);
5694         intel_connector->detect_edid = NULL;
5695
5696         intel_dp->has_audio = false;
5697 }
5698
5699 static int
5700 intel_dp_detect(struct drm_connector *connector,
5701                 struct drm_modeset_acquire_ctx *ctx,
5702                 bool force)
5703 {
5704         struct drm_i915_private *dev_priv = to_i915(connector->dev);
5705         struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
5706         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5707         struct intel_encoder *encoder = &dig_port->base;
5708         enum drm_connector_status status;
5709
5710         drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n",
5711                     connector->base.id, connector->name);
5712         drm_WARN_ON(&dev_priv->drm,
5713                     !drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
5714
5715         /* Can't disconnect eDP */
5716         if (intel_dp_is_edp(intel_dp))
5717                 status = edp_detect(intel_dp);
5718         else if (intel_digital_port_connected(encoder))
5719                 status = intel_dp_detect_dpcd(intel_dp);
5720         else
5721                 status = connector_status_disconnected;
5722
5723         if (status == connector_status_disconnected) {
5724                 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
5725                 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd));
5726
5727                 if (intel_dp->is_mst) {
5728                         drm_dbg_kms(&dev_priv->drm,
5729                                     "MST device may have disappeared %d vs %d\n",
5730                                     intel_dp->is_mst,
5731                                     intel_dp->mst_mgr.mst_state);
5732                         intel_dp->is_mst = false;
5733                         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
5734                                                         intel_dp->is_mst);
5735                 }
5736
5737                 goto out;
5738         }
5739
5740         if (intel_dp->reset_link_params) {
5741                 /* Initial max link lane count */
5742                 intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp);
5743
5744                 /* Initial max link rate */
5745                 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
5746
5747                 intel_dp->reset_link_params = false;
5748         }
5749
5750         intel_dp_print_rates(intel_dp);
5751
5752         /* Read DP Sink DSC Cap DPCD regs for DP v1.4 */
5753         if (INTEL_GEN(dev_priv) >= 11)
5754                 intel_dp_get_dsc_sink_cap(intel_dp);
5755
5756         intel_dp_configure_mst(intel_dp);
5757
5758         if (intel_dp->is_mst) {
5759                 /*
5760                  * If we are in MST mode then this connector
5761                  * won't appear connected or have anything
5762                  * with EDID on it
5763                  */
5764                 status = connector_status_disconnected;
5765                 goto out;
5766         }
5767
5768         /*
5769          * Some external monitors do not signal loss of link synchronization
5770          * with an IRQ_HPD, so force a link status check.
5771          */
5772         if (!intel_dp_is_edp(intel_dp)) {
5773                 int ret;
5774
5775                 ret = intel_dp_retrain_link(encoder, ctx);
5776                 if (ret)
5777                         return ret;
5778         }
5779
5780         /*
5781          * Clearing NACK and defer counts to get their exact values
5782          * while reading EDID which are required by Compliance tests
5783          * 4.2.2.4 and 4.2.2.5
5784          */
5785         intel_dp->aux.i2c_nack_count = 0;
5786         intel_dp->aux.i2c_defer_count = 0;
5787
5788         intel_dp_set_edid(intel_dp);
5789         if (intel_dp_is_edp(intel_dp) ||
5790             to_intel_connector(connector)->detect_edid)
5791                 status = connector_status_connected;
5792
5793         intel_dp_check_service_irq(intel_dp);
5794
5795 out:
5796         if (status != connector_status_connected && !intel_dp->is_mst)
5797                 intel_dp_unset_edid(intel_dp);
5798
5799         /*
5800          * Make sure the refs for power wells enabled during detect are
5801          * dropped to avoid a new detect cycle triggered by HPD polling.
5802          */
5803         intel_display_power_flush_work(dev_priv);
5804
5805         return status;
5806 }
5807
5808 static void
5809 intel_dp_force(struct drm_connector *connector)
5810 {
5811         struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
5812         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5813         struct intel_encoder *intel_encoder = &dig_port->base;
5814         struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
5815         enum intel_display_power_domain aux_domain =
5816                 intel_aux_power_domain(dig_port);
5817         intel_wakeref_t wakeref;
5818
5819         drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n",
5820                     connector->base.id, connector->name);
5821         intel_dp_unset_edid(intel_dp);
5822
5823         if (connector->status != connector_status_connected)
5824                 return;
5825
5826         wakeref = intel_display_power_get(dev_priv, aux_domain);
5827
5828         intel_dp_set_edid(intel_dp);
5829
5830         intel_display_power_put(dev_priv, aux_domain, wakeref);
5831 }
5832
5833 static int intel_dp_get_modes(struct drm_connector *connector)
5834 {
5835         struct intel_connector *intel_connector = to_intel_connector(connector);
5836         struct edid *edid;
5837
5838         edid = intel_connector->detect_edid;
5839         if (edid) {
5840                 int ret = intel_connector_update_modes(connector, edid);
5841                 if (ret)
5842                         return ret;
5843         }
5844
5845         /* if eDP has no EDID, fall back to fixed mode */
5846         if (intel_dp_is_edp(intel_attached_dp(to_intel_connector(connector))) &&
5847             intel_connector->panel.fixed_mode) {
5848                 struct drm_display_mode *mode;
5849
5850                 mode = drm_mode_duplicate(connector->dev,
5851                                           intel_connector->panel.fixed_mode);
5852                 if (mode) {
5853                         drm_mode_probed_add(connector, mode);
5854                         return 1;
5855                 }
5856         }
5857
5858         return 0;
5859 }
5860
5861 static int
5862 intel_dp_connector_register(struct drm_connector *connector)
5863 {
5864         struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
5865         int ret;
5866
5867         ret = intel_connector_register(connector);
5868         if (ret)
5869                 return ret;
5870
5871         intel_connector_debugfs_add(connector);
5872
5873         DRM_DEBUG_KMS("registering %s bus for %s\n",
5874                       intel_dp->aux.name, connector->kdev->kobj.name);
5875
5876         intel_dp->aux.dev = connector->kdev;
5877         ret = drm_dp_aux_register(&intel_dp->aux);
5878         if (!ret)
5879                 drm_dp_cec_register_connector(&intel_dp->aux, connector);
5880         return ret;
5881 }
5882
5883 static void
5884 intel_dp_connector_unregister(struct drm_connector *connector)
5885 {
5886         struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
5887
5888         drm_dp_cec_unregister_connector(&intel_dp->aux);
5889         drm_dp_aux_unregister(&intel_dp->aux);
5890         intel_connector_unregister(connector);
5891 }
5892
5893 void intel_dp_encoder_flush_work(struct drm_encoder *encoder)
5894 {
5895         struct intel_digital_port *intel_dig_port = enc_to_dig_port(to_intel_encoder(encoder));
5896         struct intel_dp *intel_dp = &intel_dig_port->dp;
5897
5898         intel_dp_mst_encoder_cleanup(intel_dig_port);
5899         if (intel_dp_is_edp(intel_dp)) {
5900                 intel_wakeref_t wakeref;
5901
5902                 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5903                 /*
5904                  * vdd might still be enabled do to the delayed vdd off.
5905                  * Make sure vdd is actually turned off here.
5906                  */
5907                 with_pps_lock(intel_dp, wakeref)
5908                         edp_panel_vdd_off_sync(intel_dp);
5909
5910                 if (intel_dp->edp_notifier.notifier_call) {
5911                         unregister_reboot_notifier(&intel_dp->edp_notifier);
5912                         intel_dp->edp_notifier.notifier_call = NULL;
5913                 }
5914         }
5915
5916         intel_dp_aux_fini(intel_dp);
5917 }
5918
5919 static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
5920 {
5921         intel_dp_encoder_flush_work(encoder);
5922
5923         drm_encoder_cleanup(encoder);
5924         kfree(enc_to_dig_port(to_intel_encoder(encoder)));
5925 }
5926
5927 void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
5928 {
5929         struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
5930         intel_wakeref_t wakeref;
5931
5932         if (!intel_dp_is_edp(intel_dp))
5933                 return;
5934
5935         /*
5936          * vdd might still be enabled do to the delayed vdd off.
5937          * Make sure vdd is actually turned off here.
5938          */
5939         cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5940         with_pps_lock(intel_dp, wakeref)
5941                 edp_panel_vdd_off_sync(intel_dp);
5942 }
5943
5944 static void intel_dp_hdcp_wait_for_cp_irq(struct intel_hdcp *hdcp, int timeout)
5945 {
5946         long ret;
5947
5948 #define C (hdcp->cp_irq_count_cached != atomic_read(&hdcp->cp_irq_count))
5949         ret = wait_event_interruptible_timeout(hdcp->cp_irq_queue, C,
5950                                                msecs_to_jiffies(timeout));
5951
5952         if (!ret)
5953                 DRM_DEBUG_KMS("Timedout at waiting for CP_IRQ\n");
5954 }
5955
5956 static
5957 int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
5958                                 u8 *an)
5959 {
5960         struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(&intel_dig_port->base.base));
5961         static const struct drm_dp_aux_msg msg = {
5962                 .request = DP_AUX_NATIVE_WRITE,
5963                 .address = DP_AUX_HDCP_AKSV,
5964                 .size = DRM_HDCP_KSV_LEN,
5965         };
5966         u8 txbuf[HEADER_SIZE + DRM_HDCP_KSV_LEN] = {}, rxbuf[2], reply = 0;
5967         ssize_t dpcd_ret;
5968         int ret;
5969
5970         /* Output An first, that's easy */
5971         dpcd_ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux, DP_AUX_HDCP_AN,
5972                                      an, DRM_HDCP_AN_LEN);
5973         if (dpcd_ret != DRM_HDCP_AN_LEN) {
5974                 DRM_DEBUG_KMS("Failed to write An over DP/AUX (%zd)\n",
5975                               dpcd_ret);
5976                 return dpcd_ret >= 0 ? -EIO : dpcd_ret;
5977         }
5978
5979         /*
5980          * Since Aksv is Oh-So-Secret, we can't access it in software. So in
5981          * order to get it on the wire, we need to create the AUX header as if
5982          * we were writing the data, and then tickle the hardware to output the
5983          * data once the header is sent out.
5984          */
5985         intel_dp_aux_header(txbuf, &msg);
5986
5987         ret = intel_dp_aux_xfer(intel_dp, txbuf, HEADER_SIZE + msg.size,
5988                                 rxbuf, sizeof(rxbuf),
5989                                 DP_AUX_CH_CTL_AUX_AKSV_SELECT);
5990         if (ret < 0) {
5991                 DRM_DEBUG_KMS("Write Aksv over DP/AUX failed (%d)\n", ret);
5992                 return ret;
5993         } else if (ret == 0) {
5994                 DRM_DEBUG_KMS("Aksv write over DP/AUX was empty\n");
5995                 return -EIO;
5996         }
5997
5998         reply = (rxbuf[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK;
5999         if (reply != DP_AUX_NATIVE_REPLY_ACK) {
6000                 DRM_DEBUG_KMS("Aksv write: no DP_AUX_NATIVE_REPLY_ACK %x\n",
6001                               reply);
6002                 return -EIO;
6003         }
6004         return 0;
6005 }
6006
6007 static int intel_dp_hdcp_read_bksv(struct intel_digital_port *intel_dig_port,
6008                                    u8 *bksv)
6009 {
6010         ssize_t ret;
6011         ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BKSV, bksv,
6012                                DRM_HDCP_KSV_LEN);
6013         if (ret != DRM_HDCP_KSV_LEN) {
6014                 DRM_DEBUG_KMS("Read Bksv from DP/AUX failed (%zd)\n", ret);
6015                 return ret >= 0 ? -EIO : ret;
6016         }
6017         return 0;
6018 }
6019
6020 static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port,
6021                                       u8 *bstatus)
6022 {
6023         ssize_t ret;
6024         /*
6025          * For some reason the HDMI and DP HDCP specs call this register
6026          * definition by different names. In the HDMI spec, it's called BSTATUS,
6027          * but in DP it's called BINFO.
6028          */
6029         ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BINFO,
6030                                bstatus, DRM_HDCP_BSTATUS_LEN);
6031         if (ret != DRM_HDCP_BSTATUS_LEN) {
6032                 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
6033                 return ret >= 0 ? -EIO : ret;
6034         }
6035         return 0;
6036 }
6037
6038 static
6039 int intel_dp_hdcp_read_bcaps(struct intel_digital_port *intel_dig_port,
6040                              u8 *bcaps)
6041 {
6042         ssize_t ret;
6043
6044         ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BCAPS,
6045                                bcaps, 1);
6046         if (ret != 1) {
6047                 DRM_DEBUG_KMS("Read bcaps from DP/AUX failed (%zd)\n", ret);
6048                 return ret >= 0 ? -EIO : ret;
6049         }
6050
6051         return 0;
6052 }
6053
6054 static
6055 int intel_dp_hdcp_repeater_present(struct intel_digital_port *intel_dig_port,
6056                                    bool *repeater_present)
6057 {
6058         ssize_t ret;
6059         u8 bcaps;
6060
6061         ret = intel_dp_hdcp_read_bcaps(intel_dig_port, &bcaps);
6062         if (ret)
6063                 return ret;
6064
6065         *repeater_present = bcaps & DP_BCAPS_REPEATER_PRESENT;
6066         return 0;
6067 }
6068
6069 static
6070 int intel_dp_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port,
6071                                 u8 *ri_prime)
6072 {
6073         ssize_t ret;
6074         ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_RI_PRIME,
6075                                ri_prime, DRM_HDCP_RI_LEN);
6076         if (ret != DRM_HDCP_RI_LEN) {
6077                 DRM_DEBUG_KMS("Read Ri' from DP/AUX failed (%zd)\n", ret);
6078                 return ret >= 0 ? -EIO : ret;
6079         }
6080         return 0;
6081 }
6082
6083 static
6084 int intel_dp_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port,
6085                                  bool *ksv_ready)
6086 {
6087         ssize_t ret;
6088         u8 bstatus;
6089         ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
6090                                &bstatus, 1);
6091         if (ret != 1) {
6092                 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
6093                 return ret >= 0 ? -EIO : ret;
6094         }
6095         *ksv_ready = bstatus & DP_BSTATUS_READY;
6096         return 0;
6097 }
6098
6099 static
6100 int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port,
6101                                 int num_downstream, u8 *ksv_fifo)
6102 {
6103         ssize_t ret;
6104         int i;
6105
6106         /* KSV list is read via 15 byte window (3 entries @ 5 bytes each) */
6107         for (i = 0; i < num_downstream; i += 3) {
6108                 size_t len = min(num_downstream - i, 3) * DRM_HDCP_KSV_LEN;
6109                 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
6110                                        DP_AUX_HDCP_KSV_FIFO,
6111                                        ksv_fifo + i * DRM_HDCP_KSV_LEN,
6112                                        len);
6113                 if (ret != len) {
6114                         DRM_DEBUG_KMS("Read ksv[%d] from DP/AUX failed (%zd)\n",
6115                                       i, ret);
6116                         return ret >= 0 ? -EIO : ret;
6117                 }
6118         }
6119         return 0;
6120 }
6121
6122 static
6123 int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port,
6124                                     int i, u32 *part)
6125 {
6126         ssize_t ret;
6127
6128         if (i >= DRM_HDCP_V_PRIME_NUM_PARTS)
6129                 return -EINVAL;
6130
6131         ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
6132                                DP_AUX_HDCP_V_PRIME(i), part,
6133                                DRM_HDCP_V_PRIME_PART_LEN);
6134         if (ret != DRM_HDCP_V_PRIME_PART_LEN) {
6135                 DRM_DEBUG_KMS("Read v'[%d] from DP/AUX failed (%zd)\n", i, ret);
6136                 return ret >= 0 ? -EIO : ret;
6137         }
6138         return 0;
6139 }
6140
6141 static
6142 int intel_dp_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port,
6143                                     bool enable)
6144 {
6145         /* Not used for single stream DisplayPort setups */
6146         return 0;
6147 }
6148
6149 static
6150 bool intel_dp_hdcp_check_link(struct intel_digital_port *intel_dig_port)
6151 {
6152         ssize_t ret;
6153         u8 bstatus;
6154
6155         ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
6156                                &bstatus, 1);
6157         if (ret != 1) {
6158                 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
6159                 return false;
6160         }
6161
6162         return !(bstatus & (DP_BSTATUS_LINK_FAILURE | DP_BSTATUS_REAUTH_REQ));
6163 }
6164
6165 static
6166 int intel_dp_hdcp_capable(struct intel_digital_port *intel_dig_port,
6167                           bool *hdcp_capable)
6168 {
6169         ssize_t ret;
6170         u8 bcaps;
6171
6172         ret = intel_dp_hdcp_read_bcaps(intel_dig_port, &bcaps);
6173         if (ret)
6174                 return ret;
6175
6176         *hdcp_capable = bcaps & DP_BCAPS_HDCP_CAPABLE;
6177         return 0;
6178 }
6179
6180 struct hdcp2_dp_errata_stream_type {
6181         u8      msg_id;
6182         u8      stream_type;
6183 } __packed;
6184
6185 struct hdcp2_dp_msg_data {
6186         u8 msg_id;
6187         u32 offset;
6188         bool msg_detectable;
6189         u32 timeout;
6190         u32 timeout2; /* Added for non_paired situation */
6191 };
6192
6193 static const struct hdcp2_dp_msg_data hdcp2_dp_msg_data[] = {
6194         { HDCP_2_2_AKE_INIT, DP_HDCP_2_2_AKE_INIT_OFFSET, false, 0, 0 },
6195         { HDCP_2_2_AKE_SEND_CERT, DP_HDCP_2_2_AKE_SEND_CERT_OFFSET,
6196           false, HDCP_2_2_CERT_TIMEOUT_MS, 0 },
6197         { HDCP_2_2_AKE_NO_STORED_KM, DP_HDCP_2_2_AKE_NO_STORED_KM_OFFSET,
6198           false, 0, 0 },
6199         { HDCP_2_2_AKE_STORED_KM, DP_HDCP_2_2_AKE_STORED_KM_OFFSET,
6200           false, 0, 0 },
6201         { HDCP_2_2_AKE_SEND_HPRIME, DP_HDCP_2_2_AKE_SEND_HPRIME_OFFSET,
6202           true, HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS,
6203           HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS },
6204         { HDCP_2_2_AKE_SEND_PAIRING_INFO,
6205           DP_HDCP_2_2_AKE_SEND_PAIRING_INFO_OFFSET, true,
6206           HDCP_2_2_PAIRING_TIMEOUT_MS, 0 },
6207         { HDCP_2_2_LC_INIT, DP_HDCP_2_2_LC_INIT_OFFSET, false, 0, 0 },
6208         { HDCP_2_2_LC_SEND_LPRIME, DP_HDCP_2_2_LC_SEND_LPRIME_OFFSET,
6209           false, HDCP_2_2_DP_LPRIME_TIMEOUT_MS, 0 },
6210         { HDCP_2_2_SKE_SEND_EKS, DP_HDCP_2_2_SKE_SEND_EKS_OFFSET, false,
6211           0, 0 },
6212         { HDCP_2_2_REP_SEND_RECVID_LIST,
6213           DP_HDCP_2_2_REP_SEND_RECVID_LIST_OFFSET, true,
6214           HDCP_2_2_RECVID_LIST_TIMEOUT_MS, 0 },
6215         { HDCP_2_2_REP_SEND_ACK, DP_HDCP_2_2_REP_SEND_ACK_OFFSET, false,
6216           0, 0 },
6217         { HDCP_2_2_REP_STREAM_MANAGE,
6218           DP_HDCP_2_2_REP_STREAM_MANAGE_OFFSET, false,
6219           0, 0 },
6220         { HDCP_2_2_REP_STREAM_READY, DP_HDCP_2_2_REP_STREAM_READY_OFFSET,
6221           false, HDCP_2_2_STREAM_READY_TIMEOUT_MS, 0 },
6222 /* local define to shovel this through the write_2_2 interface */
6223 #define HDCP_2_2_ERRATA_DP_STREAM_TYPE  50
6224         { HDCP_2_2_ERRATA_DP_STREAM_TYPE,
6225           DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET, false,
6226           0, 0 },
6227 };
6228
6229 static inline
6230 int intel_dp_hdcp2_read_rx_status(struct intel_digital_port *intel_dig_port,
6231                                   u8 *rx_status)
6232 {
6233         ssize_t ret;
6234
6235         ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
6236                                DP_HDCP_2_2_REG_RXSTATUS_OFFSET, rx_status,
6237                                HDCP_2_2_DP_RXSTATUS_LEN);
6238         if (ret != HDCP_2_2_DP_RXSTATUS_LEN) {
6239                 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
6240                 return ret >= 0 ? -EIO : ret;
6241         }
6242
6243         return 0;
6244 }
6245
6246 static
6247 int hdcp2_detect_msg_availability(struct intel_digital_port *intel_dig_port,
6248                                   u8 msg_id, bool *msg_ready)
6249 {
6250         u8 rx_status;
6251         int ret;
6252
6253         *msg_ready = false;
6254         ret = intel_dp_hdcp2_read_rx_status(intel_dig_port, &rx_status);
6255         if (ret < 0)
6256                 return ret;
6257
6258         switch (msg_id) {
6259         case HDCP_2_2_AKE_SEND_HPRIME:
6260                 if (HDCP_2_2_DP_RXSTATUS_H_PRIME(rx_status))
6261                         *msg_ready = true;
6262                 break;
6263         case HDCP_2_2_AKE_SEND_PAIRING_INFO:
6264                 if (HDCP_2_2_DP_RXSTATUS_PAIRING(rx_status))
6265                         *msg_ready = true;
6266                 break;
6267         case HDCP_2_2_REP_SEND_RECVID_LIST:
6268                 if (HDCP_2_2_DP_RXSTATUS_READY(rx_status))
6269                         *msg_ready = true;
6270                 break;
6271         default:
6272                 DRM_ERROR("Unidentified msg_id: %d\n", msg_id);
6273                 return -EINVAL;
6274         }
6275
6276         return 0;
6277 }
6278
6279 static ssize_t
6280 intel_dp_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port,
6281                             const struct hdcp2_dp_msg_data *hdcp2_msg_data)
6282 {
6283         struct intel_dp *dp = &intel_dig_port->dp;
6284         struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
6285         u8 msg_id = hdcp2_msg_data->msg_id;
6286         int ret, timeout;
6287         bool msg_ready = false;
6288
6289         if (msg_id == HDCP_2_2_AKE_SEND_HPRIME && !hdcp->is_paired)
6290                 timeout = hdcp2_msg_data->timeout2;
6291         else
6292                 timeout = hdcp2_msg_data->timeout;
6293
6294         /*
6295          * There is no way to detect the CERT, LPRIME and STREAM_READY
6296          * availability. So Wait for timeout and read the msg.
6297          */
6298         if (!hdcp2_msg_data->msg_detectable) {
6299                 mdelay(timeout);
6300                 ret = 0;
6301         } else {
6302                 /*
6303                  * As we want to check the msg availability at timeout, Ignoring
6304                  * the timeout at wait for CP_IRQ.
6305                  */
6306                 intel_dp_hdcp_wait_for_cp_irq(hdcp, timeout);
6307                 ret = hdcp2_detect_msg_availability(intel_dig_port,
6308                                                     msg_id, &msg_ready);
6309                 if (!msg_ready)
6310                         ret = -ETIMEDOUT;
6311         }
6312
6313         if (ret)
6314                 DRM_DEBUG_KMS("msg_id %d, ret %d, timeout(mSec): %d\n",
6315                               hdcp2_msg_data->msg_id, ret, timeout);
6316
6317         return ret;
6318 }
6319
6320 static const struct hdcp2_dp_msg_data *get_hdcp2_dp_msg_data(u8 msg_id)
6321 {
6322         int i;
6323
6324         for (i = 0; i < ARRAY_SIZE(hdcp2_dp_msg_data); i++)
6325                 if (hdcp2_dp_msg_data[i].msg_id == msg_id)
6326                         return &hdcp2_dp_msg_data[i];
6327
6328         return NULL;
6329 }
6330
6331 static
6332 int intel_dp_hdcp2_write_msg(struct intel_digital_port *intel_dig_port,
6333                              void *buf, size_t size)
6334 {
6335         struct intel_dp *dp = &intel_dig_port->dp;
6336         struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
6337         unsigned int offset;
6338         u8 *byte = buf;
6339         ssize_t ret, bytes_to_write, len;
6340         const struct hdcp2_dp_msg_data *hdcp2_msg_data;
6341
6342         hdcp2_msg_data = get_hdcp2_dp_msg_data(*byte);
6343         if (!hdcp2_msg_data)
6344                 return -EINVAL;
6345
6346         offset = hdcp2_msg_data->offset;
6347
6348         /* No msg_id in DP HDCP2.2 msgs */
6349         bytes_to_write = size - 1;
6350         byte++;
6351
6352         hdcp->cp_irq_count_cached = atomic_read(&hdcp->cp_irq_count);
6353
6354         while (bytes_to_write) {
6355                 len = bytes_to_write > DP_AUX_MAX_PAYLOAD_BYTES ?
6356                                 DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_write;
6357
6358                 ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux,
6359                                         offset, (void *)byte, len);
6360                 if (ret < 0)
6361                         return ret;
6362
6363                 bytes_to_write -= ret;
6364                 byte += ret;
6365                 offset += ret;
6366         }
6367
6368         return size;
6369 }
6370
6371 static
6372 ssize_t get_receiver_id_list_size(struct intel_digital_port *intel_dig_port)
6373 {
6374         u8 rx_info[HDCP_2_2_RXINFO_LEN];
6375         u32 dev_cnt;
6376         ssize_t ret;
6377
6378         ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
6379                                DP_HDCP_2_2_REG_RXINFO_OFFSET,
6380                                (void *)rx_info, HDCP_2_2_RXINFO_LEN);
6381         if (ret != HDCP_2_2_RXINFO_LEN)
6382                 return ret >= 0 ? -EIO : ret;
6383
6384         dev_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 |
6385                    HDCP_2_2_DEV_COUNT_LO(rx_info[1]));
6386
6387         if (dev_cnt > HDCP_2_2_MAX_DEVICE_COUNT)
6388                 dev_cnt = HDCP_2_2_MAX_DEVICE_COUNT;
6389
6390         ret = sizeof(struct hdcp2_rep_send_receiverid_list) -
6391                 HDCP_2_2_RECEIVER_IDS_MAX_LEN +
6392                 (dev_cnt * HDCP_2_2_RECEIVER_ID_LEN);
6393
6394         return ret;
6395 }
6396
6397 static
6398 int intel_dp_hdcp2_read_msg(struct intel_digital_port *intel_dig_port,
6399                             u8 msg_id, void *buf, size_t size)
6400 {
6401         unsigned int offset;
6402         u8 *byte = buf;
6403         ssize_t ret, bytes_to_recv, len;
6404         const struct hdcp2_dp_msg_data *hdcp2_msg_data;
6405
6406         hdcp2_msg_data = get_hdcp2_dp_msg_data(msg_id);
6407         if (!hdcp2_msg_data)
6408                 return -EINVAL;
6409         offset = hdcp2_msg_data->offset;
6410
6411         ret = intel_dp_hdcp2_wait_for_msg(intel_dig_port, hdcp2_msg_data);
6412         if (ret < 0)
6413                 return ret;
6414
6415         if (msg_id == HDCP_2_2_REP_SEND_RECVID_LIST) {
6416                 ret = get_receiver_id_list_size(intel_dig_port);
6417                 if (ret < 0)
6418                         return ret;
6419
6420                 size = ret;
6421         }
6422         bytes_to_recv = size - 1;
6423
6424         /* DP adaptation msgs has no msg_id */
6425         byte++;
6426
6427         while (bytes_to_recv) {
6428                 len = bytes_to_recv > DP_AUX_MAX_PAYLOAD_BYTES ?
6429                       DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_recv;
6430
6431                 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, offset,
6432                                        (void *)byte, len);
6433                 if (ret < 0) {
6434                         DRM_DEBUG_KMS("msg_id %d, ret %zd\n", msg_id, ret);
6435                         return ret;
6436                 }
6437
6438                 bytes_to_recv -= ret;
6439                 byte += ret;
6440                 offset += ret;
6441         }
6442         byte = buf;
6443         *byte = msg_id;
6444
6445         return size;
6446 }
6447
6448 static
6449 int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *intel_dig_port,
6450                                       bool is_repeater, u8 content_type)
6451 {
6452         struct hdcp2_dp_errata_stream_type stream_type_msg;
6453
6454         if (is_repeater)
6455                 return 0;
6456
6457         /*
6458          * Errata for DP: As Stream type is used for encryption, Receiver
6459          * should be communicated with stream type for the decryption of the
6460          * content.
6461          * Repeater will be communicated with stream type as a part of it's
6462          * auth later in time.
6463          */
6464         stream_type_msg.msg_id = HDCP_2_2_ERRATA_DP_STREAM_TYPE;
6465         stream_type_msg.stream_type = content_type;
6466
6467         return intel_dp_hdcp2_write_msg(intel_dig_port, &stream_type_msg,
6468                                         sizeof(stream_type_msg));
6469 }
6470
6471 static
6472 int intel_dp_hdcp2_check_link(struct intel_digital_port *intel_dig_port)
6473 {
6474         u8 rx_status;
6475         int ret;
6476
6477         ret = intel_dp_hdcp2_read_rx_status(intel_dig_port, &rx_status);
6478         if (ret)
6479                 return ret;
6480
6481         if (HDCP_2_2_DP_RXSTATUS_REAUTH_REQ(rx_status))
6482                 ret = HDCP_REAUTH_REQUEST;
6483         else if (HDCP_2_2_DP_RXSTATUS_LINK_FAILED(rx_status))
6484                 ret = HDCP_LINK_INTEGRITY_FAILURE;
6485         else if (HDCP_2_2_DP_RXSTATUS_READY(rx_status))
6486                 ret = HDCP_TOPOLOGY_CHANGE;
6487
6488         return ret;
6489 }
6490
6491 static
6492 int intel_dp_hdcp2_capable(struct intel_digital_port *intel_dig_port,
6493                            bool *capable)
6494 {
6495         u8 rx_caps[3];
6496         int ret;
6497
6498         *capable = false;
6499         ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
6500                                DP_HDCP_2_2_REG_RX_CAPS_OFFSET,
6501                                rx_caps, HDCP_2_2_RXCAPS_LEN);
6502         if (ret != HDCP_2_2_RXCAPS_LEN)
6503                 return ret >= 0 ? -EIO : ret;
6504
6505         if (rx_caps[0] == HDCP_2_2_RX_CAPS_VERSION_VAL &&
6506             HDCP_2_2_DP_HDCP_CAPABLE(rx_caps[2]))
6507                 *capable = true;
6508
6509         return 0;
6510 }
6511
6512 static const struct intel_hdcp_shim intel_dp_hdcp_shim = {
6513         .write_an_aksv = intel_dp_hdcp_write_an_aksv,
6514         .read_bksv = intel_dp_hdcp_read_bksv,
6515         .read_bstatus = intel_dp_hdcp_read_bstatus,
6516         .repeater_present = intel_dp_hdcp_repeater_present,
6517         .read_ri_prime = intel_dp_hdcp_read_ri_prime,
6518         .read_ksv_ready = intel_dp_hdcp_read_ksv_ready,
6519         .read_ksv_fifo = intel_dp_hdcp_read_ksv_fifo,
6520         .read_v_prime_part = intel_dp_hdcp_read_v_prime_part,
6521         .toggle_signalling = intel_dp_hdcp_toggle_signalling,
6522         .check_link = intel_dp_hdcp_check_link,
6523         .hdcp_capable = intel_dp_hdcp_capable,
6524         .write_2_2_msg = intel_dp_hdcp2_write_msg,
6525         .read_2_2_msg = intel_dp_hdcp2_read_msg,
6526         .config_stream_type = intel_dp_hdcp2_config_stream_type,
6527         .check_2_2_link = intel_dp_hdcp2_check_link,
6528         .hdcp_2_2_capable = intel_dp_hdcp2_capable,
6529         .protocol = HDCP_PROTOCOL_DP,
6530 };
6531
6532 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
6533 {
6534         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6535         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
6536
6537         lockdep_assert_held(&dev_priv->pps_mutex);
6538
6539         if (!edp_have_panel_vdd(intel_dp))
6540                 return;
6541
6542         /*
6543          * The VDD bit needs a power domain reference, so if the bit is
6544          * already enabled when we boot or resume, grab this reference and
6545          * schedule a vdd off, so we don't hold on to the reference
6546          * indefinitely.
6547          */
6548         drm_dbg_kms(&dev_priv->drm,
6549                     "VDD left on by BIOS, adjusting state tracking\n");
6550         intel_display_power_get(dev_priv, intel_aux_power_domain(dig_port));
6551
6552         edp_panel_vdd_schedule_off(intel_dp);
6553 }
6554
6555 static enum pipe vlv_active_pipe(struct intel_dp *intel_dp)
6556 {
6557         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6558         struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
6559         enum pipe pipe;
6560
6561         if (intel_dp_port_enabled(dev_priv, intel_dp->output_reg,
6562                                   encoder->port, &pipe))
6563                 return pipe;
6564
6565         return INVALID_PIPE;
6566 }
6567
6568 void intel_dp_encoder_reset(struct drm_encoder *encoder)
6569 {
6570         struct drm_i915_private *dev_priv = to_i915(encoder->dev);
6571         struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(encoder));
6572         struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
6573         intel_wakeref_t wakeref;
6574
6575         if (!HAS_DDI(dev_priv))
6576                 intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg);
6577
6578         if (lspcon->active)
6579                 lspcon_resume(lspcon);
6580
6581         intel_dp->reset_link_params = true;
6582
6583         if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
6584             !intel_dp_is_edp(intel_dp))
6585                 return;
6586
6587         with_pps_lock(intel_dp, wakeref) {
6588                 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6589                         intel_dp->active_pipe = vlv_active_pipe(intel_dp);
6590
6591                 if (intel_dp_is_edp(intel_dp)) {
6592                         /*
6593                          * Reinit the power sequencer, in case BIOS did
6594                          * something nasty with it.
6595                          */
6596                         intel_dp_pps_init(intel_dp);
6597                         intel_edp_panel_vdd_sanitize(intel_dp);
6598                 }
6599         }
6600 }
6601
6602 static int intel_modeset_tile_group(struct intel_atomic_state *state,
6603                                     int tile_group_id)
6604 {
6605         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6606         struct drm_connector_list_iter conn_iter;
6607         struct drm_connector *connector;
6608         int ret = 0;
6609
6610         drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
6611         drm_for_each_connector_iter(connector, &conn_iter) {
6612                 struct drm_connector_state *conn_state;
6613                 struct intel_crtc_state *crtc_state;
6614                 struct intel_crtc *crtc;
6615
6616                 if (!connector->has_tile ||
6617                     connector->tile_group->id != tile_group_id)
6618                         continue;
6619
6620                 conn_state = drm_atomic_get_connector_state(&state->base,
6621                                                             connector);
6622                 if (IS_ERR(conn_state)) {
6623                         ret = PTR_ERR(conn_state);
6624                         break;
6625                 }
6626
6627                 crtc = to_intel_crtc(conn_state->crtc);
6628
6629                 if (!crtc)
6630                         continue;
6631
6632                 crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
6633                 crtc_state->uapi.mode_changed = true;
6634
6635                 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
6636                 if (ret)
6637                         break;
6638         }
6639         drm_connector_list_iter_end(&conn_iter);
6640
6641         return ret;
6642 }
6643
6644 static int intel_modeset_affected_transcoders(struct intel_atomic_state *state, u8 transcoders)
6645 {
6646         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6647         struct intel_crtc *crtc;
6648
6649         if (transcoders == 0)
6650                 return 0;
6651
6652         for_each_intel_crtc(&dev_priv->drm, crtc) {
6653                 struct intel_crtc_state *crtc_state;
6654                 int ret;
6655
6656                 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
6657                 if (IS_ERR(crtc_state))
6658                         return PTR_ERR(crtc_state);
6659
6660                 if (!crtc_state->hw.enable)
6661                         continue;
6662
6663                 if (!(transcoders & BIT(crtc_state->cpu_transcoder)))
6664                         continue;
6665
6666                 crtc_state->uapi.mode_changed = true;
6667
6668                 ret = drm_atomic_add_affected_connectors(&state->base, &crtc->base);
6669                 if (ret)
6670                         return ret;
6671
6672                 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
6673                 if (ret)
6674                         return ret;
6675
6676                 transcoders &= ~BIT(crtc_state->cpu_transcoder);
6677         }
6678
6679         drm_WARN_ON(&dev_priv->drm, transcoders != 0);
6680
6681         return 0;
6682 }
6683
6684 static int intel_modeset_synced_crtcs(struct intel_atomic_state *state,
6685                                       struct drm_connector *connector)
6686 {
6687         const struct drm_connector_state *old_conn_state =
6688                 drm_atomic_get_old_connector_state(&state->base, connector);
6689         const struct intel_crtc_state *old_crtc_state;
6690         struct intel_crtc *crtc;
6691         u8 transcoders;
6692
6693         crtc = to_intel_crtc(old_conn_state->crtc);
6694         if (!crtc)
6695                 return 0;
6696
6697         old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
6698
6699         if (!old_crtc_state->hw.active)
6700                 return 0;
6701
6702         transcoders = old_crtc_state->sync_mode_slaves_mask;
6703         if (old_crtc_state->master_transcoder != INVALID_TRANSCODER)
6704                 transcoders |= BIT(old_crtc_state->master_transcoder);
6705
6706         return intel_modeset_affected_transcoders(state,
6707                                                   transcoders);
6708 }
6709
6710 static int intel_dp_connector_atomic_check(struct drm_connector *conn,
6711                                            struct drm_atomic_state *_state)
6712 {
6713         struct drm_i915_private *dev_priv = to_i915(conn->dev);
6714         struct intel_atomic_state *state = to_intel_atomic_state(_state);
6715         int ret;
6716
6717         ret = intel_digital_connector_atomic_check(conn, &state->base);
6718         if (ret)
6719                 return ret;
6720
6721         if (INTEL_GEN(dev_priv) < 11)
6722                 return 0;
6723
6724         if (!intel_connector_needs_modeset(state, conn))
6725                 return 0;
6726
6727         if (conn->has_tile) {
6728                 ret = intel_modeset_tile_group(state, conn->tile_group->id);
6729                 if (ret)
6730                         return ret;
6731         }
6732
6733         return intel_modeset_synced_crtcs(state, conn);
6734 }
6735
6736 static const struct drm_connector_funcs intel_dp_connector_funcs = {
6737         .force = intel_dp_force,
6738         .fill_modes = drm_helper_probe_single_connector_modes,
6739         .atomic_get_property = intel_digital_connector_atomic_get_property,
6740         .atomic_set_property = intel_digital_connector_atomic_set_property,
6741         .late_register = intel_dp_connector_register,
6742         .early_unregister = intel_dp_connector_unregister,
6743         .destroy = intel_connector_destroy,
6744         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6745         .atomic_duplicate_state = intel_digital_connector_duplicate_state,
6746 };
6747
6748 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
6749         .detect_ctx = intel_dp_detect,
6750         .get_modes = intel_dp_get_modes,
6751         .mode_valid = intel_dp_mode_valid,
6752         .atomic_check = intel_dp_connector_atomic_check,
6753 };
6754
6755 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
6756         .reset = intel_dp_encoder_reset,
6757         .destroy = intel_dp_encoder_destroy,
6758 };
6759
6760 enum irqreturn
6761 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
6762 {
6763         struct intel_dp *intel_dp = &intel_dig_port->dp;
6764
6765         if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
6766                 /*
6767                  * vdd off can generate a long pulse on eDP which
6768                  * would require vdd on to handle it, and thus we
6769                  * would end up in an endless cycle of
6770                  * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
6771                  */
6772                 DRM_DEBUG_KMS("ignoring long hpd on eDP [ENCODER:%d:%s]\n",
6773                               intel_dig_port->base.base.base.id,
6774                               intel_dig_port->base.base.name);
6775                 return IRQ_HANDLED;
6776         }
6777
6778         DRM_DEBUG_KMS("got hpd irq on [ENCODER:%d:%s] - %s\n",
6779                       intel_dig_port->base.base.base.id,
6780                       intel_dig_port->base.base.name,
6781                       long_hpd ? "long" : "short");
6782
6783         if (long_hpd) {
6784                 intel_dp->reset_link_params = true;
6785                 return IRQ_NONE;
6786         }
6787
6788         if (intel_dp->is_mst) {
6789                 if (intel_dp_check_mst_status(intel_dp) == -EINVAL) {
6790                         /*
6791                          * If we were in MST mode, and device is not
6792                          * there, get out of MST mode
6793                          */
6794                         DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
6795                                       intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
6796                         intel_dp->is_mst = false;
6797                         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
6798                                                         intel_dp->is_mst);
6799
6800                         return IRQ_NONE;
6801                 }
6802         }
6803
6804         if (!intel_dp->is_mst) {
6805                 bool handled;
6806
6807                 handled = intel_dp_short_pulse(intel_dp);
6808
6809                 if (!handled)
6810                         return IRQ_NONE;
6811         }
6812
6813         return IRQ_HANDLED;
6814 }
6815
6816 /* check the VBT to see whether the eDP is on another port */
6817 bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
6818 {
6819         /*
6820          * eDP not supported on g4x. so bail out early just
6821          * for a bit extra safety in case the VBT is bonkers.
6822          */
6823         if (INTEL_GEN(dev_priv) < 5)
6824                 return false;
6825
6826         if (INTEL_GEN(dev_priv) < 9 && port == PORT_A)
6827                 return true;
6828
6829         return intel_bios_is_port_edp(dev_priv, port);
6830 }
6831
6832 static void
6833 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
6834 {
6835         struct drm_i915_private *dev_priv = to_i915(connector->dev);
6836         enum port port = dp_to_dig_port(intel_dp)->base.port;
6837
6838         if (!IS_G4X(dev_priv) && port != PORT_A)
6839                 intel_attach_force_audio_property(connector);
6840
6841         intel_attach_broadcast_rgb_property(connector);
6842         if (HAS_GMCH(dev_priv))
6843                 drm_connector_attach_max_bpc_property(connector, 6, 10);
6844         else if (INTEL_GEN(dev_priv) >= 5)
6845                 drm_connector_attach_max_bpc_property(connector, 6, 12);
6846
6847         intel_attach_colorspace_property(connector);
6848
6849         if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 11)
6850                 drm_object_attach_property(&connector->base,
6851                                            connector->dev->mode_config.hdr_output_metadata_property,
6852                                            0);
6853
6854         if (intel_dp_is_edp(intel_dp)) {
6855                 u32 allowed_scalers;
6856
6857                 allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN);
6858                 if (!HAS_GMCH(dev_priv))
6859                         allowed_scalers |= BIT(DRM_MODE_SCALE_CENTER);
6860
6861                 drm_connector_attach_scaling_mode_property(connector, allowed_scalers);
6862
6863                 connector->state->scaling_mode = DRM_MODE_SCALE_ASPECT;
6864
6865         }
6866 }
6867
6868 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
6869 {
6870         intel_dp->panel_power_off_time = ktime_get_boottime();
6871         intel_dp->last_power_on = jiffies;
6872         intel_dp->last_backlight_off = jiffies;
6873 }
6874
6875 static void
6876 intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
6877 {
6878         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6879         u32 pp_on, pp_off, pp_ctl;
6880         struct pps_registers regs;
6881
6882         intel_pps_get_registers(intel_dp, &regs);
6883
6884         pp_ctl = ilk_get_pp_control(intel_dp);
6885
6886         /* Ensure PPS is unlocked */
6887         if (!HAS_DDI(dev_priv))
6888                 intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl);
6889
6890         pp_on = intel_de_read(dev_priv, regs.pp_on);
6891         pp_off = intel_de_read(dev_priv, regs.pp_off);
6892
6893         /* Pull timing values out of registers */
6894         seq->t1_t3 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on);
6895         seq->t8 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, pp_on);
6896         seq->t9 = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, pp_off);
6897         seq->t10 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, pp_off);
6898
6899         if (i915_mmio_reg_valid(regs.pp_div)) {
6900                 u32 pp_div;
6901
6902                 pp_div = intel_de_read(dev_priv, regs.pp_div);
6903
6904                 seq->t11_t12 = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div) * 1000;
6905         } else {
6906                 seq->t11_t12 = REG_FIELD_GET(BXT_POWER_CYCLE_DELAY_MASK, pp_ctl) * 1000;
6907         }
6908 }
6909
6910 static void
6911 intel_pps_dump_state(const char *state_name, const struct edp_power_seq *seq)
6912 {
6913         DRM_DEBUG_KMS("%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
6914                       state_name,
6915                       seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12);
6916 }
6917
6918 static void
6919 intel_pps_verify_state(struct intel_dp *intel_dp)
6920 {
6921         struct edp_power_seq hw;
6922         struct edp_power_seq *sw = &intel_dp->pps_delays;
6923
6924         intel_pps_readout_hw_state(intel_dp, &hw);
6925
6926         if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 ||
6927             hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) {
6928                 DRM_ERROR("PPS state mismatch\n");
6929                 intel_pps_dump_state("sw", sw);
6930                 intel_pps_dump_state("hw", &hw);
6931         }
6932 }
6933
6934 static void
6935 intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp)
6936 {
6937         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6938         struct edp_power_seq cur, vbt, spec,
6939                 *final = &intel_dp->pps_delays;
6940
6941         lockdep_assert_held(&dev_priv->pps_mutex);
6942
6943         /* already initialized? */
6944         if (final->t11_t12 != 0)
6945                 return;
6946
6947         intel_pps_readout_hw_state(intel_dp, &cur);
6948
6949         intel_pps_dump_state("cur", &cur);
6950
6951         vbt = dev_priv->vbt.edp.pps;
6952         /* On Toshiba Satellite P50-C-18C system the VBT T12 delay
6953          * of 500ms appears to be too short. Ocassionally the panel
6954          * just fails to power back on. Increasing the delay to 800ms
6955          * seems sufficient to avoid this problem.
6956          */
6957         if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) {
6958                 vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10);
6959                 drm_dbg_kms(&dev_priv->drm,
6960                             "Increasing T12 panel delay as per the quirk to %d\n",
6961                             vbt.t11_t12);
6962         }
6963         /* T11_T12 delay is special and actually in units of 100ms, but zero
6964          * based in the hw (so we need to add 100 ms). But the sw vbt
6965          * table multiplies it with 1000 to make it in units of 100usec,
6966          * too. */
6967         vbt.t11_t12 += 100 * 10;
6968
6969         /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
6970          * our hw here, which are all in 100usec. */
6971         spec.t1_t3 = 210 * 10;
6972         spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
6973         spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
6974         spec.t10 = 500 * 10;
6975         /* This one is special and actually in units of 100ms, but zero
6976          * based in the hw (so we need to add 100 ms). But the sw vbt
6977          * table multiplies it with 1000 to make it in units of 100usec,
6978          * too. */
6979         spec.t11_t12 = (510 + 100) * 10;
6980
6981         intel_pps_dump_state("vbt", &vbt);
6982
6983         /* Use the max of the register settings and vbt. If both are
6984          * unset, fall back to the spec limits. */
6985 #define assign_final(field)     final->field = (max(cur.field, vbt.field) == 0 ? \
6986                                        spec.field : \
6987                                        max(cur.field, vbt.field))
6988         assign_final(t1_t3);
6989         assign_final(t8);
6990         assign_final(t9);
6991         assign_final(t10);
6992         assign_final(t11_t12);
6993 #undef assign_final
6994
6995 #define get_delay(field)        (DIV_ROUND_UP(final->field, 10))
6996         intel_dp->panel_power_up_delay = get_delay(t1_t3);
6997         intel_dp->backlight_on_delay = get_delay(t8);
6998         intel_dp->backlight_off_delay = get_delay(t9);
6999         intel_dp->panel_power_down_delay = get_delay(t10);
7000         intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
7001 #undef get_delay
7002
7003         drm_dbg_kms(&dev_priv->drm,
7004                     "panel power up delay %d, power down delay %d, power cycle delay %d\n",
7005                     intel_dp->panel_power_up_delay,
7006                     intel_dp->panel_power_down_delay,
7007                     intel_dp->panel_power_cycle_delay);
7008
7009         drm_dbg_kms(&dev_priv->drm, "backlight on delay %d, off delay %d\n",
7010                     intel_dp->backlight_on_delay,
7011                     intel_dp->backlight_off_delay);
7012
7013         /*
7014          * We override the HW backlight delays to 1 because we do manual waits
7015          * on them. For T8, even BSpec recommends doing it. For T9, if we
7016          * don't do this, we'll end up waiting for the backlight off delay
7017          * twice: once when we do the manual sleep, and once when we disable
7018          * the panel and wait for the PP_STATUS bit to become zero.
7019          */
7020         final->t8 = 1;
7021         final->t9 = 1;
7022
7023         /*
7024          * HW has only a 100msec granularity for t11_t12 so round it up
7025          * accordingly.
7026          */
7027         final->t11_t12 = roundup(final->t11_t12, 100 * 10);
7028 }
7029
7030 static void
7031 intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
7032                                               bool force_disable_vdd)
7033 {
7034         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
7035         u32 pp_on, pp_off, port_sel = 0;
7036         int div = RUNTIME_INFO(dev_priv)->rawclk_freq / 1000;
7037         struct pps_registers regs;
7038         enum port port = dp_to_dig_port(intel_dp)->base.port;
7039         const struct edp_power_seq *seq = &intel_dp->pps_delays;
7040
7041         lockdep_assert_held(&dev_priv->pps_mutex);
7042
7043         intel_pps_get_registers(intel_dp, &regs);
7044
7045         /*
7046          * On some VLV machines the BIOS can leave the VDD
7047          * enabled even on power sequencers which aren't
7048          * hooked up to any port. This would mess up the
7049          * power domain tracking the first time we pick
7050          * one of these power sequencers for use since
7051          * edp_panel_vdd_on() would notice that the VDD was
7052          * already on and therefore wouldn't grab the power
7053          * domain reference. Disable VDD first to avoid this.
7054          * This also avoids spuriously turning the VDD on as
7055          * soon as the new power sequencer gets initialized.
7056          */
7057         if (force_disable_vdd) {
7058                 u32 pp = ilk_get_pp_control(intel_dp);
7059
7060                 drm_WARN(&dev_priv->drm, pp & PANEL_POWER_ON,
7061                          "Panel power already on\n");
7062
7063                 if (pp & EDP_FORCE_VDD)
7064                         drm_dbg_kms(&dev_priv->drm,
7065                                     "VDD already on, disabling first\n");
7066
7067                 pp &= ~EDP_FORCE_VDD;
7068
7069                 intel_de_write(dev_priv, regs.pp_ctrl, pp);
7070         }
7071
7072         pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->t1_t3) |
7073                 REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, seq->t8);
7074         pp_off = REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, seq->t9) |
7075                 REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, seq->t10);
7076
7077         /* Haswell doesn't have any port selection bits for the panel
7078          * power sequencer any more. */
7079         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
7080                 port_sel = PANEL_PORT_SELECT_VLV(port);
7081         } else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
7082                 switch (port) {
7083                 case PORT_A:
7084                         port_sel = PANEL_PORT_SELECT_DPA;
7085                         break;
7086                 case PORT_C:
7087                         port_sel = PANEL_PORT_SELECT_DPC;
7088                         break;
7089                 case PORT_D:
7090                         port_sel = PANEL_PORT_SELECT_DPD;
7091                         break;
7092                 default:
7093                         MISSING_CASE(port);
7094                         break;
7095                 }
7096         }
7097
7098         pp_on |= port_sel;
7099
7100         intel_de_write(dev_priv, regs.pp_on, pp_on);
7101         intel_de_write(dev_priv, regs.pp_off, pp_off);
7102
7103         /*
7104          * Compute the divisor for the pp clock, simply match the Bspec formula.
7105          */
7106         if (i915_mmio_reg_valid(regs.pp_div)) {
7107                 intel_de_write(dev_priv, regs.pp_div,
7108                                REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)));
7109         } else {
7110                 u32 pp_ctl;
7111
7112                 pp_ctl = intel_de_read(dev_priv, regs.pp_ctrl);
7113                 pp_ctl &= ~BXT_POWER_CYCLE_DELAY_MASK;
7114                 pp_ctl |= REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000));
7115                 intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl);
7116         }
7117
7118         drm_dbg_kms(&dev_priv->drm,
7119                     "panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
7120                     intel_de_read(dev_priv, regs.pp_on),
7121                     intel_de_read(dev_priv, regs.pp_off),
7122                     i915_mmio_reg_valid(regs.pp_div) ?
7123                     intel_de_read(dev_priv, regs.pp_div) :
7124                     (intel_de_read(dev_priv, regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK));
7125 }
7126
7127 static void intel_dp_pps_init(struct intel_dp *intel_dp)
7128 {
7129         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
7130
7131         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
7132                 vlv_initial_power_sequencer_setup(intel_dp);
7133         } else {
7134                 intel_dp_init_panel_power_sequencer(intel_dp);
7135                 intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
7136         }
7137 }
7138
7139 /**
7140  * intel_dp_set_drrs_state - program registers for RR switch to take effect
7141  * @dev_priv: i915 device
7142  * @crtc_state: a pointer to the active intel_crtc_state
7143  * @refresh_rate: RR to be programmed
7144  *
7145  * This function gets called when refresh rate (RR) has to be changed from
7146  * one frequency to another. Switches can be between high and low RR
7147  * supported by the panel or to any other RR based on media playback (in
7148  * this case, RR value needs to be passed from user space).
7149  *
7150  * The caller of this function needs to take a lock on dev_priv->drrs.
7151  */
7152 static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
7153                                     const struct intel_crtc_state *crtc_state,
7154                                     int refresh_rate)
7155 {
7156         struct intel_dp *intel_dp = dev_priv->drrs.dp;
7157         struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
7158         enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
7159
7160         if (refresh_rate <= 0) {
7161                 drm_dbg_kms(&dev_priv->drm,
7162                             "Refresh rate should be positive non-zero.\n");
7163                 return;
7164         }
7165
7166         if (intel_dp == NULL) {
7167                 drm_dbg_kms(&dev_priv->drm, "DRRS not supported.\n");
7168                 return;
7169         }
7170
7171         if (!intel_crtc) {
7172                 drm_dbg_kms(&dev_priv->drm,
7173                             "DRRS: intel_crtc not initialized\n");
7174                 return;
7175         }
7176
7177         if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
7178                 drm_dbg_kms(&dev_priv->drm, "Only Seamless DRRS supported.\n");
7179                 return;
7180         }
7181
7182         if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
7183                         refresh_rate)
7184                 index = DRRS_LOW_RR;
7185
7186         if (index == dev_priv->drrs.refresh_rate_type) {
7187                 drm_dbg_kms(&dev_priv->drm,
7188                             "DRRS requested for previously set RR...ignoring\n");
7189                 return;
7190         }
7191
7192         if (!crtc_state->hw.active) {
7193                 drm_dbg_kms(&dev_priv->drm,
7194                             "eDP encoder disabled. CRTC not Active\n");
7195                 return;
7196         }
7197
7198         if (INTEL_GEN(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) {
7199                 switch (index) {
7200                 case DRRS_HIGH_RR:
7201                         intel_dp_set_m_n(crtc_state, M1_N1);
7202                         break;
7203                 case DRRS_LOW_RR:
7204                         intel_dp_set_m_n(crtc_state, M2_N2);
7205                         break;
7206                 case DRRS_MAX_RR:
7207                 default:
7208                         drm_err(&dev_priv->drm,
7209                                 "Unsupported refreshrate type\n");
7210                 }
7211         } else if (INTEL_GEN(dev_priv) > 6) {
7212                 i915_reg_t reg = PIPECONF(crtc_state->cpu_transcoder);
7213                 u32 val;
7214
7215                 val = intel_de_read(dev_priv, reg);
7216                 if (index > DRRS_HIGH_RR) {
7217                         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
7218                                 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
7219                         else
7220                                 val |= PIPECONF_EDP_RR_MODE_SWITCH;
7221                 } else {
7222                         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
7223                                 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
7224                         else
7225                                 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
7226                 }
7227                 intel_de_write(dev_priv, reg, val);
7228         }
7229
7230         dev_priv->drrs.refresh_rate_type = index;
7231
7232         drm_dbg_kms(&dev_priv->drm, "eDP Refresh Rate set to : %dHz\n",
7233                     refresh_rate);
7234 }
7235
7236 /**
7237  * intel_edp_drrs_enable - init drrs struct if supported
7238  * @intel_dp: DP struct
7239  * @crtc_state: A pointer to the active crtc state.
7240  *
7241  * Initializes frontbuffer_bits and drrs.dp
7242  */
7243 void intel_edp_drrs_enable(struct intel_dp *intel_dp,
7244                            const struct intel_crtc_state *crtc_state)
7245 {
7246         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
7247
7248         if (!crtc_state->has_drrs) {
7249                 drm_dbg_kms(&dev_priv->drm, "Panel doesn't support DRRS\n");
7250                 return;
7251         }
7252
7253         if (dev_priv->psr.enabled) {
7254                 drm_dbg_kms(&dev_priv->drm,
7255                             "PSR enabled. Not enabling DRRS.\n");
7256                 return;
7257         }
7258
7259         mutex_lock(&dev_priv->drrs.mutex);
7260         if (dev_priv->drrs.dp) {
7261                 drm_dbg_kms(&dev_priv->drm, "DRRS already enabled\n");
7262                 goto unlock;
7263         }
7264
7265         dev_priv->drrs.busy_frontbuffer_bits = 0;
7266
7267         dev_priv->drrs.dp = intel_dp;
7268
7269 unlock:
7270         mutex_unlock(&dev_priv->drrs.mutex);
7271 }
7272
7273 /**
7274  * intel_edp_drrs_disable - Disable DRRS
7275  * @intel_dp: DP struct
7276  * @old_crtc_state: Pointer to old crtc_state.
7277  *
7278  */
7279 void intel_edp_drrs_disable(struct intel_dp *intel_dp,
7280                             const struct intel_crtc_state *old_crtc_state)
7281 {
7282         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
7283
7284         if (!old_crtc_state->has_drrs)
7285                 return;
7286
7287         mutex_lock(&dev_priv->drrs.mutex);
7288         if (!dev_priv->drrs.dp) {
7289                 mutex_unlock(&dev_priv->drrs.mutex);
7290                 return;
7291         }
7292
7293         if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
7294                 intel_dp_set_drrs_state(dev_priv, old_crtc_state,
7295                         intel_dp->attached_connector->panel.fixed_mode->vrefresh);
7296
7297         dev_priv->drrs.dp = NULL;
7298         mutex_unlock(&dev_priv->drrs.mutex);
7299
7300         cancel_delayed_work_sync(&dev_priv->drrs.work);
7301 }
7302
7303 static void intel_edp_drrs_downclock_work(struct work_struct *work)
7304 {
7305         struct drm_i915_private *dev_priv =
7306                 container_of(work, typeof(*dev_priv), drrs.work.work);
7307         struct intel_dp *intel_dp;
7308
7309         mutex_lock(&dev_priv->drrs.mutex);
7310
7311         intel_dp = dev_priv->drrs.dp;
7312
7313         if (!intel_dp)
7314                 goto unlock;
7315
7316         /*
7317          * The delayed work can race with an invalidate hence we need to
7318          * recheck.
7319          */
7320
7321         if (dev_priv->drrs.busy_frontbuffer_bits)
7322                 goto unlock;
7323
7324         if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR) {
7325                 struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
7326
7327                 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
7328                         intel_dp->attached_connector->panel.downclock_mode->vrefresh);
7329         }
7330
7331 unlock:
7332         mutex_unlock(&dev_priv->drrs.mutex);
7333 }
7334
7335 /**
7336  * intel_edp_drrs_invalidate - Disable Idleness DRRS
7337  * @dev_priv: i915 device
7338  * @frontbuffer_bits: frontbuffer plane tracking bits
7339  *
7340  * This function gets called everytime rendering on the given planes start.
7341  * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
7342  *
7343  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
7344  */
7345 void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
7346                                unsigned int frontbuffer_bits)
7347 {
7348         struct drm_crtc *crtc;
7349         enum pipe pipe;
7350
7351         if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
7352                 return;
7353
7354         cancel_delayed_work(&dev_priv->drrs.work);
7355
7356         mutex_lock(&dev_priv->drrs.mutex);
7357         if (!dev_priv->drrs.dp) {
7358                 mutex_unlock(&dev_priv->drrs.mutex);
7359                 return;
7360         }
7361
7362         crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
7363         pipe = to_intel_crtc(crtc)->pipe;
7364
7365         frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
7366         dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
7367
7368         /* invalidate means busy screen hence upclock */
7369         if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
7370                 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
7371                         dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh);
7372
7373         mutex_unlock(&dev_priv->drrs.mutex);
7374 }
7375
7376 /**
7377  * intel_edp_drrs_flush - Restart Idleness DRRS
7378  * @dev_priv: i915 device
7379  * @frontbuffer_bits: frontbuffer plane tracking bits
7380  *
7381  * This function gets called every time rendering on the given planes has
7382  * completed or flip on a crtc is completed. So DRRS should be upclocked
7383  * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
7384  * if no other planes are dirty.
7385  *
7386  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
7387  */
7388 void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
7389                           unsigned int frontbuffer_bits)
7390 {
7391         struct drm_crtc *crtc;
7392         enum pipe pipe;
7393
7394         if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
7395                 return;
7396
7397         cancel_delayed_work(&dev_priv->drrs.work);
7398
7399         mutex_lock(&dev_priv->drrs.mutex);
7400         if (!dev_priv->drrs.dp) {
7401                 mutex_unlock(&dev_priv->drrs.mutex);
7402                 return;
7403         }
7404
7405         crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
7406         pipe = to_intel_crtc(crtc)->pipe;
7407
7408         frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
7409         dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
7410
7411         /* flush means busy screen hence upclock */
7412         if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
7413                 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
7414                                 dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh);
7415
7416         /*
7417          * flush also means no more activity hence schedule downclock, if all
7418          * other fbs are quiescent too
7419          */
7420         if (!dev_priv->drrs.busy_frontbuffer_bits)
7421                 schedule_delayed_work(&dev_priv->drrs.work,
7422                                 msecs_to_jiffies(1000));
7423         mutex_unlock(&dev_priv->drrs.mutex);
7424 }
7425
7426 /**
7427  * DOC: Display Refresh Rate Switching (DRRS)
7428  *
7429  * Display Refresh Rate Switching (DRRS) is a power conservation feature
7430  * which enables swtching between low and high refresh rates,
7431  * dynamically, based on the usage scenario. This feature is applicable
7432  * for internal panels.
7433  *
7434  * Indication that the panel supports DRRS is given by the panel EDID, which
7435  * would list multiple refresh rates for one resolution.
7436  *
7437  * DRRS is of 2 types - static and seamless.
7438  * Static DRRS involves changing refresh rate (RR) by doing a full modeset
7439  * (may appear as a blink on screen) and is used in dock-undock scenario.
7440  * Seamless DRRS involves changing RR without any visual effect to the user
7441  * and can be used during normal system usage. This is done by programming
7442  * certain registers.
7443  *
7444  * Support for static/seamless DRRS may be indicated in the VBT based on
7445  * inputs from the panel spec.
7446  *
7447  * DRRS saves power by switching to low RR based on usage scenarios.
7448  *
7449  * The implementation is based on frontbuffer tracking implementation.  When
7450  * there is a disturbance on the screen triggered by user activity or a periodic
7451  * system activity, DRRS is disabled (RR is changed to high RR).  When there is
7452  * no movement on screen, after a timeout of 1 second, a switch to low RR is
7453  * made.
7454  *
7455  * For integration with frontbuffer tracking code, intel_edp_drrs_invalidate()
7456  * and intel_edp_drrs_flush() are called.
7457  *
7458  * DRRS can be further extended to support other internal panels and also
7459  * the scenario of video playback wherein RR is set based on the rate
7460  * requested by userspace.
7461  */
7462
7463 /**
7464  * intel_dp_drrs_init - Init basic DRRS work and mutex.
7465  * @connector: eDP connector
7466  * @fixed_mode: preferred mode of panel
7467  *
7468  * This function is  called only once at driver load to initialize basic
7469  * DRRS stuff.
7470  *
7471  * Returns:
7472  * Downclock mode if panel supports it, else return NULL.
7473  * DRRS support is determined by the presence of downclock mode (apart
7474  * from VBT setting).
7475  */
7476 static struct drm_display_mode *
7477 intel_dp_drrs_init(struct intel_connector *connector,
7478                    struct drm_display_mode *fixed_mode)
7479 {
7480         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
7481         struct drm_display_mode *downclock_mode = NULL;
7482
7483         INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
7484         mutex_init(&dev_priv->drrs.mutex);
7485
7486         if (INTEL_GEN(dev_priv) <= 6) {
7487                 drm_dbg_kms(&dev_priv->drm,
7488                             "DRRS supported for Gen7 and above\n");
7489                 return NULL;
7490         }
7491
7492         if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
7493                 drm_dbg_kms(&dev_priv->drm, "VBT doesn't support DRRS\n");
7494                 return NULL;
7495         }
7496
7497         downclock_mode = intel_panel_edid_downclock_mode(connector, fixed_mode);
7498         if (!downclock_mode) {
7499                 drm_dbg_kms(&dev_priv->drm,
7500                             "Downclock mode is not found. DRRS not supported\n");
7501                 return NULL;
7502         }
7503
7504         dev_priv->drrs.type = dev_priv->vbt.drrs_type;
7505
7506         dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
7507         drm_dbg_kms(&dev_priv->drm,
7508                     "seamless DRRS supported for eDP panel.\n");
7509         return downclock_mode;
7510 }
7511
7512 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
7513                                      struct intel_connector *intel_connector)
7514 {
7515         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
7516         struct drm_device *dev = &dev_priv->drm;
7517         struct drm_connector *connector = &intel_connector->base;
7518         struct drm_display_mode *fixed_mode = NULL;
7519         struct drm_display_mode *downclock_mode = NULL;
7520         bool has_dpcd;
7521         enum pipe pipe = INVALID_PIPE;
7522         intel_wakeref_t wakeref;
7523         struct edid *edid;
7524
7525         if (!intel_dp_is_edp(intel_dp))
7526                 return true;
7527
7528         INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, edp_panel_vdd_work);
7529
7530         /*
7531          * On IBX/CPT we may get here with LVDS already registered. Since the
7532          * driver uses the only internal power sequencer available for both
7533          * eDP and LVDS bail out early in this case to prevent interfering
7534          * with an already powered-on LVDS power sequencer.
7535          */
7536         if (intel_get_lvds_encoder(dev_priv)) {
7537                 drm_WARN_ON(dev,
7538                             !(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
7539                 drm_info(&dev_priv->drm,
7540                          "LVDS was detected, not registering eDP\n");
7541
7542                 return false;
7543         }
7544
7545         with_pps_lock(intel_dp, wakeref) {
7546                 intel_dp_init_panel_power_timestamps(intel_dp);
7547                 intel_dp_pps_init(intel_dp);
7548                 intel_edp_panel_vdd_sanitize(intel_dp);
7549         }
7550
7551         /* Cache DPCD and EDID for edp. */
7552         has_dpcd = intel_edp_init_dpcd(intel_dp);
7553
7554         if (!has_dpcd) {
7555                 /* if this fails, presume the device is a ghost */
7556                 drm_info(&dev_priv->drm,
7557                          "failed to retrieve link info, disabling eDP\n");
7558                 goto out_vdd_off;
7559         }
7560
7561         mutex_lock(&dev->mode_config.mutex);
7562         edid = drm_get_edid(connector, &intel_dp->aux.ddc);
7563         if (edid) {
7564                 if (drm_add_edid_modes(connector, edid)) {
7565                         drm_connector_update_edid_property(connector,
7566                                                                 edid);
7567                 } else {
7568                         kfree(edid);
7569                         edid = ERR_PTR(-EINVAL);
7570                 }
7571         } else {
7572                 edid = ERR_PTR(-ENOENT);
7573         }
7574         intel_connector->edid = edid;
7575
7576         fixed_mode = intel_panel_edid_fixed_mode(intel_connector);
7577         if (fixed_mode)
7578                 downclock_mode = intel_dp_drrs_init(intel_connector, fixed_mode);
7579
7580         /* fallback to VBT if available for eDP */
7581         if (!fixed_mode)
7582                 fixed_mode = intel_panel_vbt_fixed_mode(intel_connector);
7583         mutex_unlock(&dev->mode_config.mutex);
7584
7585         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
7586                 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
7587                 register_reboot_notifier(&intel_dp->edp_notifier);
7588
7589                 /*
7590                  * Figure out the current pipe for the initial backlight setup.
7591                  * If the current pipe isn't valid, try the PPS pipe, and if that
7592                  * fails just assume pipe A.
7593                  */
7594                 pipe = vlv_active_pipe(intel_dp);
7595
7596                 if (pipe != PIPE_A && pipe != PIPE_B)
7597                         pipe = intel_dp->pps_pipe;
7598
7599                 if (pipe != PIPE_A && pipe != PIPE_B)
7600                         pipe = PIPE_A;
7601
7602                 drm_dbg_kms(&dev_priv->drm,
7603                             "using pipe %c for initial backlight setup\n",
7604                             pipe_name(pipe));
7605         }
7606
7607         intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
7608         intel_connector->panel.backlight.power = intel_edp_backlight_power;
7609         intel_panel_setup_backlight(connector, pipe);
7610
7611         if (fixed_mode) {
7612                 /* We do not know the orientation, but their might be a quirk */
7613                 drm_connector_set_panel_orientation_with_quirk(connector,
7614                                 DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
7615                                 fixed_mode->hdisplay, fixed_mode->vdisplay);
7616         }
7617
7618         return true;
7619
7620 out_vdd_off:
7621         cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
7622         /*
7623          * vdd might still be enabled do to the delayed vdd off.
7624          * Make sure vdd is actually turned off here.
7625          */
7626         with_pps_lock(intel_dp, wakeref)
7627                 edp_panel_vdd_off_sync(intel_dp);
7628
7629         return false;
7630 }
7631
7632 static void intel_dp_modeset_retry_work_fn(struct work_struct *work)
7633 {
7634         struct intel_connector *intel_connector;
7635         struct drm_connector *connector;
7636
7637         intel_connector = container_of(work, typeof(*intel_connector),
7638                                        modeset_retry_work);
7639         connector = &intel_connector->base;
7640         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
7641                       connector->name);
7642
7643         /* Grab the locks before changing connector property*/
7644         mutex_lock(&connector->dev->mode_config.mutex);
7645         /* Set connector link status to BAD and send a Uevent to notify
7646          * userspace to do a modeset.
7647          */
7648         drm_connector_set_link_status_property(connector,
7649                                                DRM_MODE_LINK_STATUS_BAD);
7650         mutex_unlock(&connector->dev->mode_config.mutex);
7651         /* Send Hotplug uevent so userspace can reprobe */
7652         drm_kms_helper_hotplug_event(connector->dev);
7653 }
7654
7655 bool
7656 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
7657                         struct intel_connector *intel_connector)
7658 {
7659         struct drm_connector *connector = &intel_connector->base;
7660         struct intel_dp *intel_dp = &intel_dig_port->dp;
7661         struct intel_encoder *intel_encoder = &intel_dig_port->base;
7662         struct drm_device *dev = intel_encoder->base.dev;
7663         struct drm_i915_private *dev_priv = to_i915(dev);
7664         enum port port = intel_encoder->port;
7665         enum phy phy = intel_port_to_phy(dev_priv, port);
7666         int type;
7667
7668         /* Initialize the work for modeset in case of link train failure */
7669         INIT_WORK(&intel_connector->modeset_retry_work,
7670                   intel_dp_modeset_retry_work_fn);
7671
7672         if (drm_WARN(dev, intel_dig_port->max_lanes < 1,
7673                      "Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n",
7674                      intel_dig_port->max_lanes, intel_encoder->base.base.id,
7675                      intel_encoder->base.name))
7676                 return false;
7677
7678         intel_dp_set_source_rates(intel_dp);
7679
7680         intel_dp->reset_link_params = true;
7681         intel_dp->pps_pipe = INVALID_PIPE;
7682         intel_dp->active_pipe = INVALID_PIPE;
7683
7684         /* Preserve the current hw state. */
7685         intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg);
7686         intel_dp->attached_connector = intel_connector;
7687
7688         if (intel_dp_is_port_edp(dev_priv, port)) {
7689                 /*
7690                  * Currently we don't support eDP on TypeC ports, although in
7691                  * theory it could work on TypeC legacy ports.
7692                  */
7693                 drm_WARN_ON(dev, intel_phy_is_tc(dev_priv, phy));
7694                 type = DRM_MODE_CONNECTOR_eDP;
7695         } else {
7696                 type = DRM_MODE_CONNECTOR_DisplayPort;
7697         }
7698
7699         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
7700                 intel_dp->active_pipe = vlv_active_pipe(intel_dp);
7701
7702         /*
7703          * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
7704          * for DP the encoder type can be set by the caller to
7705          * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
7706          */
7707         if (type == DRM_MODE_CONNECTOR_eDP)
7708                 intel_encoder->type = INTEL_OUTPUT_EDP;
7709
7710         /* eDP only on port B and/or C on vlv/chv */
7711         if (drm_WARN_ON(dev, (IS_VALLEYVIEW(dev_priv) ||
7712                               IS_CHERRYVIEW(dev_priv)) &&
7713                         intel_dp_is_edp(intel_dp) &&
7714                         port != PORT_B && port != PORT_C))
7715                 return false;
7716
7717         drm_dbg_kms(&dev_priv->drm,
7718                     "Adding %s connector on [ENCODER:%d:%s]\n",
7719                     type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
7720                     intel_encoder->base.base.id, intel_encoder->base.name);
7721
7722         drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
7723         drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
7724
7725         if (!HAS_GMCH(dev_priv))
7726                 connector->interlace_allowed = true;
7727         connector->doublescan_allowed = 0;
7728
7729         if (INTEL_GEN(dev_priv) >= 11)
7730                 connector->ycbcr_420_allowed = true;
7731
7732         intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port);
7733         intel_connector->polled = DRM_CONNECTOR_POLL_HPD;
7734
7735         intel_dp_aux_init(intel_dp);
7736
7737         intel_connector_attach_encoder(intel_connector, intel_encoder);
7738
7739         if (HAS_DDI(dev_priv))
7740                 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
7741         else
7742                 intel_connector->get_hw_state = intel_connector_get_hw_state;
7743
7744         /* init MST on ports that can support it */
7745         intel_dp_mst_encoder_init(intel_dig_port,
7746                                   intel_connector->base.base.id);
7747
7748         if (!intel_edp_init_connector(intel_dp, intel_connector)) {
7749                 intel_dp_aux_fini(intel_dp);
7750                 intel_dp_mst_encoder_cleanup(intel_dig_port);
7751                 goto fail;
7752         }
7753
7754         intel_dp_add_properties(intel_dp, connector);
7755
7756         if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_edp(intel_dp)) {
7757                 int ret = intel_hdcp_init(intel_connector, &intel_dp_hdcp_shim);
7758                 if (ret)
7759                         drm_dbg_kms(&dev_priv->drm,
7760                                     "HDCP init failed, skipping.\n");
7761         }
7762
7763         /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
7764          * 0xd.  Failure to do so will result in spurious interrupts being
7765          * generated on the port when a cable is not attached.
7766          */
7767         if (IS_G45(dev_priv)) {
7768                 u32 temp = intel_de_read(dev_priv, PEG_BAND_GAP_DATA);
7769                 intel_de_write(dev_priv, PEG_BAND_GAP_DATA,
7770                                (temp & ~0xf) | 0xd);
7771         }
7772
7773         return true;
7774
7775 fail:
7776         drm_connector_cleanup(connector);
7777
7778         return false;
7779 }
7780
7781 bool intel_dp_init(struct drm_i915_private *dev_priv,
7782                    i915_reg_t output_reg,
7783                    enum port port)
7784 {
7785         struct intel_digital_port *intel_dig_port;
7786         struct intel_encoder *intel_encoder;
7787         struct drm_encoder *encoder;
7788         struct intel_connector *intel_connector;
7789
7790         intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
7791         if (!intel_dig_port)
7792                 return false;
7793
7794         intel_connector = intel_connector_alloc();
7795         if (!intel_connector)
7796                 goto err_connector_alloc;
7797
7798         intel_encoder = &intel_dig_port->base;
7799         encoder = &intel_encoder->base;
7800
7801         if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
7802                              &intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS,
7803                              "DP %c", port_name(port)))
7804                 goto err_encoder_init;
7805
7806         intel_encoder->hotplug = intel_dp_hotplug;
7807         intel_encoder->compute_config = intel_dp_compute_config;
7808         intel_encoder->get_hw_state = intel_dp_get_hw_state;
7809         intel_encoder->get_config = intel_dp_get_config;
7810         intel_encoder->update_pipe = intel_panel_update_backlight;
7811         intel_encoder->suspend = intel_dp_encoder_suspend;
7812         if (IS_CHERRYVIEW(dev_priv)) {
7813                 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
7814                 intel_encoder->pre_enable = chv_pre_enable_dp;
7815                 intel_encoder->enable = vlv_enable_dp;
7816                 intel_encoder->disable = vlv_disable_dp;
7817                 intel_encoder->post_disable = chv_post_disable_dp;
7818                 intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
7819         } else if (IS_VALLEYVIEW(dev_priv)) {
7820                 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
7821                 intel_encoder->pre_enable = vlv_pre_enable_dp;
7822                 intel_encoder->enable = vlv_enable_dp;
7823                 intel_encoder->disable = vlv_disable_dp;
7824                 intel_encoder->post_disable = vlv_post_disable_dp;
7825         } else {
7826                 intel_encoder->pre_enable = g4x_pre_enable_dp;
7827                 intel_encoder->enable = g4x_enable_dp;
7828                 intel_encoder->disable = g4x_disable_dp;
7829                 intel_encoder->post_disable = g4x_post_disable_dp;
7830         }
7831
7832         intel_dig_port->dp.output_reg = output_reg;
7833         intel_dig_port->max_lanes = 4;
7834
7835         intel_encoder->type = INTEL_OUTPUT_DP;
7836         intel_encoder->power_domain = intel_port_to_power_domain(port);
7837         if (IS_CHERRYVIEW(dev_priv)) {
7838                 if (port == PORT_D)
7839                         intel_encoder->pipe_mask = BIT(PIPE_C);
7840                 else
7841                         intel_encoder->pipe_mask = BIT(PIPE_A) | BIT(PIPE_B);
7842         } else {
7843                 intel_encoder->pipe_mask = ~0;
7844         }
7845         intel_encoder->cloneable = 0;
7846         intel_encoder->port = port;
7847
7848         intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
7849
7850         if (port != PORT_A)
7851                 intel_infoframe_init(intel_dig_port);
7852
7853         intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
7854         if (!intel_dp_init_connector(intel_dig_port, intel_connector))
7855                 goto err_init_connector;
7856
7857         return true;
7858
7859 err_init_connector:
7860         drm_encoder_cleanup(encoder);
7861 err_encoder_init:
7862         kfree(intel_connector);
7863 err_connector_alloc:
7864         kfree(intel_dig_port);
7865         return false;
7866 }
7867
7868 void intel_dp_mst_suspend(struct drm_i915_private *dev_priv)
7869 {
7870         struct intel_encoder *encoder;
7871
7872         for_each_intel_encoder(&dev_priv->drm, encoder) {
7873                 struct intel_dp *intel_dp;
7874
7875                 if (encoder->type != INTEL_OUTPUT_DDI)
7876                         continue;
7877
7878                 intel_dp = enc_to_intel_dp(encoder);
7879
7880                 if (!intel_dp->can_mst)
7881                         continue;
7882
7883                 if (intel_dp->is_mst)
7884                         drm_dp_mst_topology_mgr_suspend(&intel_dp->mst_mgr);
7885         }
7886 }
7887
7888 void intel_dp_mst_resume(struct drm_i915_private *dev_priv)
7889 {
7890         struct intel_encoder *encoder;
7891
7892         for_each_intel_encoder(&dev_priv->drm, encoder) {
7893                 struct intel_dp *intel_dp;
7894                 int ret;
7895
7896                 if (encoder->type != INTEL_OUTPUT_DDI)
7897                         continue;
7898
7899                 intel_dp = enc_to_intel_dp(encoder);
7900
7901                 if (!intel_dp->can_mst)
7902                         continue;
7903
7904                 ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr,
7905                                                      true);
7906                 if (ret) {
7907                         intel_dp->is_mst = false;
7908                         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
7909                                                         false);
7910                 }
7911         }
7912 }