Merge tag 'dma-mapping-5.12' of git://git.infradead.org/users/hch/dma-mapping
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / display / intel_dp.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Keith Packard <keithp@keithp.com>
25  *
26  */
27
28 #include <linux/export.h>
29 #include <linux/i2c.h>
30 #include <linux/notifier.h>
31 #include <linux/slab.h>
32 #include <linux/types.h>
33
34 #include <asm/byteorder.h>
35
36 #include <drm/drm_atomic_helper.h>
37 #include <drm/drm_crtc.h>
38 #include <drm/drm_dp_helper.h>
39 #include <drm/drm_edid.h>
40 #include <drm/drm_probe_helper.h>
41
42 #include "i915_debugfs.h"
43 #include "i915_drv.h"
44 #include "intel_atomic.h"
45 #include "intel_audio.h"
46 #include "intel_connector.h"
47 #include "intel_ddi.h"
48 #include "intel_display_types.h"
49 #include "intel_dp.h"
50 #include "intel_dp_aux.h"
51 #include "intel_dp_link_training.h"
52 #include "intel_dp_mst.h"
53 #include "intel_dpio_phy.h"
54 #include "intel_fifo_underrun.h"
55 #include "intel_hdcp.h"
56 #include "intel_hdmi.h"
57 #include "intel_hotplug.h"
58 #include "intel_lspcon.h"
59 #include "intel_lvds.h"
60 #include "intel_panel.h"
61 #include "intel_pps.h"
62 #include "intel_psr.h"
63 #include "intel_sideband.h"
64 #include "intel_tc.h"
65 #include "intel_vdsc.h"
66 #include "intel_vrr.h"
67
68 #define DP_DPRX_ESI_LEN 14
69
70 /* DP DSC throughput values used for slice count calculations KPixels/s */
71 #define DP_DSC_PEAK_PIXEL_RATE                  2720000
72 #define DP_DSC_MAX_ENC_THROUGHPUT_0             340000
73 #define DP_DSC_MAX_ENC_THROUGHPUT_1             400000
74
75 /* DP DSC FEC Overhead factor = 1/(0.972261) */
76 #define DP_DSC_FEC_OVERHEAD_FACTOR              972261
77
78 /* Compliance test status bits  */
79 #define INTEL_DP_RESOLUTION_SHIFT_MASK  0
80 #define INTEL_DP_RESOLUTION_PREFERRED   (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
81 #define INTEL_DP_RESOLUTION_STANDARD    (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
82 #define INTEL_DP_RESOLUTION_FAILSAFE    (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
83
84 struct dp_link_dpll {
85         int clock;
86         struct dpll dpll;
87 };
88
89 static const struct dp_link_dpll g4x_dpll[] = {
90         { 162000,
91                 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
92         { 270000,
93                 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
94 };
95
96 static const struct dp_link_dpll pch_dpll[] = {
97         { 162000,
98                 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
99         { 270000,
100                 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
101 };
102
103 static const struct dp_link_dpll vlv_dpll[] = {
104         { 162000,
105                 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
106         { 270000,
107                 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
108 };
109
110 /*
111  * CHV supports eDP 1.4 that have  more link rates.
112  * Below only provides the fixed rate but exclude variable rate.
113  */
114 static const struct dp_link_dpll chv_dpll[] = {
115         /*
116          * CHV requires to program fractional division for m2.
117          * m2 is stored in fixed point format using formula below
118          * (m2_int << 22) | m2_fraction
119          */
120         { 162000,       /* m2_int = 32, m2_fraction = 1677722 */
121                 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
122         { 270000,       /* m2_int = 27, m2_fraction = 0 */
123                 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
124 };
125
126 const struct dpll *vlv_get_dpll(struct drm_i915_private *i915)
127 {
128         return IS_CHERRYVIEW(i915) ? &chv_dpll[0].dpll : &vlv_dpll[0].dpll;
129 }
130
131 /* Constants for DP DSC configurations */
132 static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15};
133
134 /* With Single pipe configuration, HW is capable of supporting maximum
135  * of 4 slices per line.
136  */
137 static const u8 valid_dsc_slicecount[] = {1, 2, 4};
138
139 /**
140  * intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH)
141  * @intel_dp: DP struct
142  *
143  * If a CPU or PCH DP output is attached to an eDP panel, this function
144  * will return true, and false otherwise.
145  */
146 bool intel_dp_is_edp(struct intel_dp *intel_dp)
147 {
148         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
149
150         return dig_port->base.type == INTEL_OUTPUT_EDP;
151 }
152
153 static void intel_dp_link_down(struct intel_encoder *encoder,
154                                const struct intel_crtc_state *old_crtc_state);
155 static void intel_dp_unset_edid(struct intel_dp *intel_dp);
156
157 /* update sink rates from dpcd */
158 static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
159 {
160         static const int dp_rates[] = {
161                 162000, 270000, 540000, 810000
162         };
163         int i, max_rate;
164         int max_lttpr_rate;
165
166         if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS)) {
167                 /* Needed, e.g., for Apple MBP 2017, 15 inch eDP Retina panel */
168                 static const int quirk_rates[] = { 162000, 270000, 324000 };
169
170                 memcpy(intel_dp->sink_rates, quirk_rates, sizeof(quirk_rates));
171                 intel_dp->num_sink_rates = ARRAY_SIZE(quirk_rates);
172
173                 return;
174         }
175
176         max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]);
177         max_lttpr_rate = drm_dp_lttpr_max_link_rate(intel_dp->lttpr_common_caps);
178         if (max_lttpr_rate)
179                 max_rate = min(max_rate, max_lttpr_rate);
180
181         for (i = 0; i < ARRAY_SIZE(dp_rates); i++) {
182                 if (dp_rates[i] > max_rate)
183                         break;
184                 intel_dp->sink_rates[i] = dp_rates[i];
185         }
186
187         intel_dp->num_sink_rates = i;
188 }
189
190 /* Get length of rates array potentially limited by max_rate. */
191 static int intel_dp_rate_limit_len(const int *rates, int len, int max_rate)
192 {
193         int i;
194
195         /* Limit results by potentially reduced max rate */
196         for (i = 0; i < len; i++) {
197                 if (rates[len - i - 1] <= max_rate)
198                         return len - i;
199         }
200
201         return 0;
202 }
203
204 /* Get length of common rates array potentially limited by max_rate. */
205 static int intel_dp_common_len_rate_limit(const struct intel_dp *intel_dp,
206                                           int max_rate)
207 {
208         return intel_dp_rate_limit_len(intel_dp->common_rates,
209                                        intel_dp->num_common_rates, max_rate);
210 }
211
212 /* Theoretical max between source and sink */
213 static int intel_dp_max_common_rate(struct intel_dp *intel_dp)
214 {
215         return intel_dp->common_rates[intel_dp->num_common_rates - 1];
216 }
217
218 /* Theoretical max between source and sink */
219 static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp)
220 {
221         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
222         int source_max = dig_port->max_lanes;
223         int sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
224         int fia_max = intel_tc_port_fia_max_lane_count(dig_port);
225         int lttpr_max = drm_dp_lttpr_max_lane_count(intel_dp->lttpr_common_caps);
226
227         if (lttpr_max)
228                 sink_max = min(sink_max, lttpr_max);
229
230         return min3(source_max, sink_max, fia_max);
231 }
232
233 int intel_dp_max_lane_count(struct intel_dp *intel_dp)
234 {
235         return intel_dp->max_link_lane_count;
236 }
237
238 int
239 intel_dp_link_required(int pixel_clock, int bpp)
240 {
241         /* pixel_clock is in kHz, divide bpp by 8 for bit to Byte conversion */
242         return DIV_ROUND_UP(pixel_clock * bpp, 8);
243 }
244
245 int
246 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
247 {
248         /* max_link_clock is the link symbol clock (LS_Clk) in kHz and not the
249          * link rate that is generally expressed in Gbps. Since, 8 bits of data
250          * is transmitted every LS_Clk per lane, there is no need to account for
251          * the channel encoding that is done in the PHY layer here.
252          */
253
254         return max_link_clock * max_lanes;
255 }
256
257 bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp)
258 {
259         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
260         struct intel_encoder *encoder = &intel_dig_port->base;
261         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
262
263         return INTEL_GEN(dev_priv) >= 12 ||
264                 (INTEL_GEN(dev_priv) == 11 &&
265                  encoder->port != PORT_A);
266 }
267
268 static int cnl_max_source_rate(struct intel_dp *intel_dp)
269 {
270         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
271         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
272         enum port port = dig_port->base.port;
273
274         u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
275
276         /* Low voltage SKUs are limited to max of 5.4G */
277         if (voltage == VOLTAGE_INFO_0_85V)
278                 return 540000;
279
280         /* For this SKU 8.1G is supported in all ports */
281         if (IS_CNL_WITH_PORT_F(dev_priv))
282                 return 810000;
283
284         /* For other SKUs, max rate on ports A and D is 5.4G */
285         if (port == PORT_A || port == PORT_D)
286                 return 540000;
287
288         return 810000;
289 }
290
291 static int icl_max_source_rate(struct intel_dp *intel_dp)
292 {
293         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
294         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
295         enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
296
297         if (intel_phy_is_combo(dev_priv, phy) &&
298             !intel_dp_is_edp(intel_dp))
299                 return 540000;
300
301         return 810000;
302 }
303
304 static int ehl_max_source_rate(struct intel_dp *intel_dp)
305 {
306         if (intel_dp_is_edp(intel_dp))
307                 return 540000;
308
309         return 810000;
310 }
311
312 static void
313 intel_dp_set_source_rates(struct intel_dp *intel_dp)
314 {
315         /* The values must be in increasing order */
316         static const int cnl_rates[] = {
317                 162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000
318         };
319         static const int bxt_rates[] = {
320                 162000, 216000, 243000, 270000, 324000, 432000, 540000
321         };
322         static const int skl_rates[] = {
323                 162000, 216000, 270000, 324000, 432000, 540000
324         };
325         static const int hsw_rates[] = {
326                 162000, 270000, 540000
327         };
328         static const int g4x_rates[] = {
329                 162000, 270000
330         };
331         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
332         struct intel_encoder *encoder = &dig_port->base;
333         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
334         const int *source_rates;
335         int size, max_rate = 0, vbt_max_rate;
336
337         /* This should only be done once */
338         drm_WARN_ON(&dev_priv->drm,
339                     intel_dp->source_rates || intel_dp->num_source_rates);
340
341         if (INTEL_GEN(dev_priv) >= 10) {
342                 source_rates = cnl_rates;
343                 size = ARRAY_SIZE(cnl_rates);
344                 if (IS_GEN(dev_priv, 10))
345                         max_rate = cnl_max_source_rate(intel_dp);
346                 else if (IS_JSL_EHL(dev_priv))
347                         max_rate = ehl_max_source_rate(intel_dp);
348                 else
349                         max_rate = icl_max_source_rate(intel_dp);
350         } else if (IS_GEN9_LP(dev_priv)) {
351                 source_rates = bxt_rates;
352                 size = ARRAY_SIZE(bxt_rates);
353         } else if (IS_GEN9_BC(dev_priv)) {
354                 source_rates = skl_rates;
355                 size = ARRAY_SIZE(skl_rates);
356         } else if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) ||
357                    IS_BROADWELL(dev_priv)) {
358                 source_rates = hsw_rates;
359                 size = ARRAY_SIZE(hsw_rates);
360         } else {
361                 source_rates = g4x_rates;
362                 size = ARRAY_SIZE(g4x_rates);
363         }
364
365         vbt_max_rate = intel_bios_dp_max_link_rate(encoder);
366         if (max_rate && vbt_max_rate)
367                 max_rate = min(max_rate, vbt_max_rate);
368         else if (vbt_max_rate)
369                 max_rate = vbt_max_rate;
370
371         if (max_rate)
372                 size = intel_dp_rate_limit_len(source_rates, size, max_rate);
373
374         intel_dp->source_rates = source_rates;
375         intel_dp->num_source_rates = size;
376 }
377
378 static int intersect_rates(const int *source_rates, int source_len,
379                            const int *sink_rates, int sink_len,
380                            int *common_rates)
381 {
382         int i = 0, j = 0, k = 0;
383
384         while (i < source_len && j < sink_len) {
385                 if (source_rates[i] == sink_rates[j]) {
386                         if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
387                                 return k;
388                         common_rates[k] = source_rates[i];
389                         ++k;
390                         ++i;
391                         ++j;
392                 } else if (source_rates[i] < sink_rates[j]) {
393                         ++i;
394                 } else {
395                         ++j;
396                 }
397         }
398         return k;
399 }
400
401 /* return index of rate in rates array, or -1 if not found */
402 static int intel_dp_rate_index(const int *rates, int len, int rate)
403 {
404         int i;
405
406         for (i = 0; i < len; i++)
407                 if (rate == rates[i])
408                         return i;
409
410         return -1;
411 }
412
413 static void intel_dp_set_common_rates(struct intel_dp *intel_dp)
414 {
415         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
416
417         drm_WARN_ON(&i915->drm,
418                     !intel_dp->num_source_rates || !intel_dp->num_sink_rates);
419
420         intel_dp->num_common_rates = intersect_rates(intel_dp->source_rates,
421                                                      intel_dp->num_source_rates,
422                                                      intel_dp->sink_rates,
423                                                      intel_dp->num_sink_rates,
424                                                      intel_dp->common_rates);
425
426         /* Paranoia, there should always be something in common. */
427         if (drm_WARN_ON(&i915->drm, intel_dp->num_common_rates == 0)) {
428                 intel_dp->common_rates[0] = 162000;
429                 intel_dp->num_common_rates = 1;
430         }
431 }
432
433 static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate,
434                                        u8 lane_count)
435 {
436         /*
437          * FIXME: we need to synchronize the current link parameters with
438          * hardware readout. Currently fast link training doesn't work on
439          * boot-up.
440          */
441         if (link_rate == 0 ||
442             link_rate > intel_dp->max_link_rate)
443                 return false;
444
445         if (lane_count == 0 ||
446             lane_count > intel_dp_max_lane_count(intel_dp))
447                 return false;
448
449         return true;
450 }
451
452 static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp,
453                                                      int link_rate,
454                                                      u8 lane_count)
455 {
456         const struct drm_display_mode *fixed_mode =
457                 intel_dp->attached_connector->panel.fixed_mode;
458         int mode_rate, max_rate;
459
460         mode_rate = intel_dp_link_required(fixed_mode->clock, 18);
461         max_rate = intel_dp_max_data_rate(link_rate, lane_count);
462         if (mode_rate > max_rate)
463                 return false;
464
465         return true;
466 }
467
468 int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
469                                             int link_rate, u8 lane_count)
470 {
471         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
472         int index;
473
474         /*
475          * TODO: Enable fallback on MST links once MST link compute can handle
476          * the fallback params.
477          */
478         if (intel_dp->is_mst) {
479                 drm_err(&i915->drm, "Link Training Unsuccessful\n");
480                 return -1;
481         }
482
483         if (intel_dp_is_edp(intel_dp) && !intel_dp->use_max_params) {
484                 drm_dbg_kms(&i915->drm,
485                             "Retrying Link training for eDP with max parameters\n");
486                 intel_dp->use_max_params = true;
487                 return 0;
488         }
489
490         index = intel_dp_rate_index(intel_dp->common_rates,
491                                     intel_dp->num_common_rates,
492                                     link_rate);
493         if (index > 0) {
494                 if (intel_dp_is_edp(intel_dp) &&
495                     !intel_dp_can_link_train_fallback_for_edp(intel_dp,
496                                                               intel_dp->common_rates[index - 1],
497                                                               lane_count)) {
498                         drm_dbg_kms(&i915->drm,
499                                     "Retrying Link training for eDP with same parameters\n");
500                         return 0;
501                 }
502                 intel_dp->max_link_rate = intel_dp->common_rates[index - 1];
503                 intel_dp->max_link_lane_count = lane_count;
504         } else if (lane_count > 1) {
505                 if (intel_dp_is_edp(intel_dp) &&
506                     !intel_dp_can_link_train_fallback_for_edp(intel_dp,
507                                                               intel_dp_max_common_rate(intel_dp),
508                                                               lane_count >> 1)) {
509                         drm_dbg_kms(&i915->drm,
510                                     "Retrying Link training for eDP with same parameters\n");
511                         return 0;
512                 }
513                 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
514                 intel_dp->max_link_lane_count = lane_count >> 1;
515         } else {
516                 drm_err(&i915->drm, "Link Training Unsuccessful\n");
517                 return -1;
518         }
519
520         return 0;
521 }
522
523 u32 intel_dp_mode_to_fec_clock(u32 mode_clock)
524 {
525         return div_u64(mul_u32_u32(mode_clock, 1000000U),
526                        DP_DSC_FEC_OVERHEAD_FACTOR);
527 }
528
529 static int
530 small_joiner_ram_size_bits(struct drm_i915_private *i915)
531 {
532         if (INTEL_GEN(i915) >= 11)
533                 return 7680 * 8;
534         else
535                 return 6144 * 8;
536 }
537
538 static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915,
539                                        u32 link_clock, u32 lane_count,
540                                        u32 mode_clock, u32 mode_hdisplay,
541                                        bool bigjoiner)
542 {
543         u32 bits_per_pixel, max_bpp_small_joiner_ram;
544         int i;
545
546         /*
547          * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)*
548          * (LinkSymbolClock)* 8 * (TimeSlotsPerMTP)
549          * for SST -> TimeSlotsPerMTP is 1,
550          * for MST -> TimeSlotsPerMTP has to be calculated
551          */
552         bits_per_pixel = (link_clock * lane_count * 8) /
553                          intel_dp_mode_to_fec_clock(mode_clock);
554         drm_dbg_kms(&i915->drm, "Max link bpp: %u\n", bits_per_pixel);
555
556         /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */
557         max_bpp_small_joiner_ram = small_joiner_ram_size_bits(i915) /
558                 mode_hdisplay;
559
560         if (bigjoiner)
561                 max_bpp_small_joiner_ram *= 2;
562
563         drm_dbg_kms(&i915->drm, "Max small joiner bpp: %u\n",
564                     max_bpp_small_joiner_ram);
565
566         /*
567          * Greatest allowed DSC BPP = MIN (output BPP from available Link BW
568          * check, output bpp from small joiner RAM check)
569          */
570         bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram);
571
572         if (bigjoiner) {
573                 u32 max_bpp_bigjoiner =
574                         i915->max_cdclk_freq * 48 /
575                         intel_dp_mode_to_fec_clock(mode_clock);
576
577                 DRM_DEBUG_KMS("Max big joiner bpp: %u\n", max_bpp_bigjoiner);
578                 bits_per_pixel = min(bits_per_pixel, max_bpp_bigjoiner);
579         }
580
581         /* Error out if the max bpp is less than smallest allowed valid bpp */
582         if (bits_per_pixel < valid_dsc_bpp[0]) {
583                 drm_dbg_kms(&i915->drm, "Unsupported BPP %u, min %u\n",
584                             bits_per_pixel, valid_dsc_bpp[0]);
585                 return 0;
586         }
587
588         /* Find the nearest match in the array of known BPPs from VESA */
589         for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) {
590                 if (bits_per_pixel < valid_dsc_bpp[i + 1])
591                         break;
592         }
593         bits_per_pixel = valid_dsc_bpp[i];
594
595         /*
596          * Compressed BPP in U6.4 format so multiply by 16, for Gen 11,
597          * fractional part is 0
598          */
599         return bits_per_pixel << 4;
600 }
601
602 static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
603                                        int mode_clock, int mode_hdisplay,
604                                        bool bigjoiner)
605 {
606         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
607         u8 min_slice_count, i;
608         int max_slice_width;
609
610         if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE)
611                 min_slice_count = DIV_ROUND_UP(mode_clock,
612                                                DP_DSC_MAX_ENC_THROUGHPUT_0);
613         else
614                 min_slice_count = DIV_ROUND_UP(mode_clock,
615                                                DP_DSC_MAX_ENC_THROUGHPUT_1);
616
617         max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd);
618         if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) {
619                 drm_dbg_kms(&i915->drm,
620                             "Unsupported slice width %d by DP DSC Sink device\n",
621                             max_slice_width);
622                 return 0;
623         }
624         /* Also take into account max slice width */
625         min_slice_count = max_t(u8, min_slice_count,
626                                 DIV_ROUND_UP(mode_hdisplay,
627                                              max_slice_width));
628
629         /* Find the closest match to the valid slice count values */
630         for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) {
631                 u8 test_slice_count = valid_dsc_slicecount[i] << bigjoiner;
632
633                 if (test_slice_count >
634                     drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, false))
635                         break;
636
637                 /* big joiner needs small joiner to be enabled */
638                 if (bigjoiner && test_slice_count < 4)
639                         continue;
640
641                 if (min_slice_count <= test_slice_count)
642                         return test_slice_count;
643         }
644
645         drm_dbg_kms(&i915->drm, "Unsupported Slice Count %d\n",
646                     min_slice_count);
647         return 0;
648 }
649
650 static enum intel_output_format
651 intel_dp_output_format(struct drm_connector *connector,
652                        const struct drm_display_mode *mode)
653 {
654         struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
655         const struct drm_display_info *info = &connector->display_info;
656
657         if (!connector->ycbcr_420_allowed ||
658             !drm_mode_is_420_only(info, mode))
659                 return INTEL_OUTPUT_FORMAT_RGB;
660
661         if (intel_dp->dfp.rgb_to_ycbcr &&
662             intel_dp->dfp.ycbcr_444_to_420)
663                 return INTEL_OUTPUT_FORMAT_RGB;
664
665         if (intel_dp->dfp.ycbcr_444_to_420)
666                 return INTEL_OUTPUT_FORMAT_YCBCR444;
667         else
668                 return INTEL_OUTPUT_FORMAT_YCBCR420;
669 }
670
671 int intel_dp_min_bpp(enum intel_output_format output_format)
672 {
673         if (output_format == INTEL_OUTPUT_FORMAT_RGB)
674                 return 6 * 3;
675         else
676                 return 8 * 3;
677 }
678
679 static int intel_dp_output_bpp(enum intel_output_format output_format, int bpp)
680 {
681         /*
682          * bpp value was assumed to RGB format. And YCbCr 4:2:0 output
683          * format of the number of bytes per pixel will be half the number
684          * of bytes of RGB pixel.
685          */
686         if (output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
687                 bpp /= 2;
688
689         return bpp;
690 }
691
692 static int
693 intel_dp_mode_min_output_bpp(struct drm_connector *connector,
694                              const struct drm_display_mode *mode)
695 {
696         enum intel_output_format output_format =
697                 intel_dp_output_format(connector, mode);
698
699         return intel_dp_output_bpp(output_format, intel_dp_min_bpp(output_format));
700 }
701
702 static bool intel_dp_hdisplay_bad(struct drm_i915_private *dev_priv,
703                                   int hdisplay)
704 {
705         /*
706          * Older platforms don't like hdisplay==4096 with DP.
707          *
708          * On ILK/SNB/IVB the pipe seems to be somewhat running (scanline
709          * and frame counter increment), but we don't get vblank interrupts,
710          * and the pipe underruns immediately. The link also doesn't seem
711          * to get trained properly.
712          *
713          * On CHV the vblank interrupts don't seem to disappear but
714          * otherwise the symptoms are similar.
715          *
716          * TODO: confirm the behaviour on HSW+
717          */
718         return hdisplay == 4096 && !HAS_DDI(dev_priv);
719 }
720
721 static enum drm_mode_status
722 intel_dp_mode_valid_downstream(struct intel_connector *connector,
723                                const struct drm_display_mode *mode,
724                                int target_clock)
725 {
726         struct intel_dp *intel_dp = intel_attached_dp(connector);
727         const struct drm_display_info *info = &connector->base.display_info;
728         int tmds_clock;
729
730         /* If PCON supports FRL MODE, check FRL bandwidth constraints */
731         if (intel_dp->dfp.pcon_max_frl_bw) {
732                 int target_bw;
733                 int max_frl_bw;
734                 int bpp = intel_dp_mode_min_output_bpp(&connector->base, mode);
735
736                 target_bw = bpp * target_clock;
737
738                 max_frl_bw = intel_dp->dfp.pcon_max_frl_bw;
739
740                 /* converting bw from Gbps to Kbps*/
741                 max_frl_bw = max_frl_bw * 1000000;
742
743                 if (target_bw > max_frl_bw)
744                         return MODE_CLOCK_HIGH;
745
746                 return MODE_OK;
747         }
748
749         if (intel_dp->dfp.max_dotclock &&
750             target_clock > intel_dp->dfp.max_dotclock)
751                 return MODE_CLOCK_HIGH;
752
753         /* Assume 8bpc for the DP++/HDMI/DVI TMDS clock check */
754         tmds_clock = target_clock;
755         if (drm_mode_is_420_only(info, mode))
756                 tmds_clock /= 2;
757
758         if (intel_dp->dfp.min_tmds_clock &&
759             tmds_clock < intel_dp->dfp.min_tmds_clock)
760                 return MODE_CLOCK_LOW;
761         if (intel_dp->dfp.max_tmds_clock &&
762             tmds_clock > intel_dp->dfp.max_tmds_clock)
763                 return MODE_CLOCK_HIGH;
764
765         return MODE_OK;
766 }
767
768 static enum drm_mode_status
769 intel_dp_mode_valid(struct drm_connector *connector,
770                     struct drm_display_mode *mode)
771 {
772         struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
773         struct intel_connector *intel_connector = to_intel_connector(connector);
774         struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
775         struct drm_i915_private *dev_priv = to_i915(connector->dev);
776         int target_clock = mode->clock;
777         int max_rate, mode_rate, max_lanes, max_link_clock;
778         int max_dotclk = dev_priv->max_dotclk_freq;
779         u16 dsc_max_output_bpp = 0;
780         u8 dsc_slice_count = 0;
781         enum drm_mode_status status;
782         bool dsc = false, bigjoiner = false;
783
784         if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
785                 return MODE_NO_DBLESCAN;
786
787         if (mode->flags & DRM_MODE_FLAG_DBLCLK)
788                 return MODE_H_ILLEGAL;
789
790         if (intel_dp_is_edp(intel_dp) && fixed_mode) {
791                 if (mode->hdisplay > fixed_mode->hdisplay)
792                         return MODE_PANEL;
793
794                 if (mode->vdisplay > fixed_mode->vdisplay)
795                         return MODE_PANEL;
796
797                 target_clock = fixed_mode->clock;
798         }
799
800         if (mode->clock < 10000)
801                 return MODE_CLOCK_LOW;
802
803         if ((target_clock > max_dotclk || mode->hdisplay > 5120) &&
804             intel_dp_can_bigjoiner(intel_dp)) {
805                 bigjoiner = true;
806                 max_dotclk *= 2;
807         }
808         if (target_clock > max_dotclk)
809                 return MODE_CLOCK_HIGH;
810
811         max_link_clock = intel_dp_max_link_rate(intel_dp);
812         max_lanes = intel_dp_max_lane_count(intel_dp);
813
814         max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
815         mode_rate = intel_dp_link_required(target_clock,
816                                            intel_dp_mode_min_output_bpp(connector, mode));
817
818         if (intel_dp_hdisplay_bad(dev_priv, mode->hdisplay))
819                 return MODE_H_ILLEGAL;
820
821         /*
822          * Output bpp is stored in 6.4 format so right shift by 4 to get the
823          * integer value since we support only integer values of bpp.
824          */
825         if ((INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) &&
826             drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)) {
827                 if (intel_dp_is_edp(intel_dp)) {
828                         dsc_max_output_bpp =
829                                 drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4;
830                         dsc_slice_count =
831                                 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
832                                                                 true);
833                 } else if (drm_dp_sink_supports_fec(intel_dp->fec_capable)) {
834                         dsc_max_output_bpp =
835                                 intel_dp_dsc_get_output_bpp(dev_priv,
836                                                             max_link_clock,
837                                                             max_lanes,
838                                                             target_clock,
839                                                             mode->hdisplay,
840                                                             bigjoiner) >> 4;
841                         dsc_slice_count =
842                                 intel_dp_dsc_get_slice_count(intel_dp,
843                                                              target_clock,
844                                                              mode->hdisplay,
845                                                              bigjoiner);
846                 }
847
848                 dsc = dsc_max_output_bpp && dsc_slice_count;
849         }
850
851         /* big joiner configuration needs DSC */
852         if (bigjoiner && !dsc)
853                 return MODE_CLOCK_HIGH;
854
855         if (mode_rate > max_rate && !dsc)
856                 return MODE_CLOCK_HIGH;
857
858         status = intel_dp_mode_valid_downstream(intel_connector,
859                                                 mode, target_clock);
860         if (status != MODE_OK)
861                 return status;
862
863         return intel_mode_valid_max_plane_size(dev_priv, mode, bigjoiner);
864 }
865
866 bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
867 {
868         int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1];
869
870         return max_rate >= 540000;
871 }
872
873 bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp)
874 {
875         int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1];
876
877         return max_rate >= 810000;
878 }
879
880 static void
881 intel_dp_set_clock(struct intel_encoder *encoder,
882                    struct intel_crtc_state *pipe_config)
883 {
884         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
885         const struct dp_link_dpll *divisor = NULL;
886         int i, count = 0;
887
888         if (IS_G4X(dev_priv)) {
889                 divisor = g4x_dpll;
890                 count = ARRAY_SIZE(g4x_dpll);
891         } else if (HAS_PCH_SPLIT(dev_priv)) {
892                 divisor = pch_dpll;
893                 count = ARRAY_SIZE(pch_dpll);
894         } else if (IS_CHERRYVIEW(dev_priv)) {
895                 divisor = chv_dpll;
896                 count = ARRAY_SIZE(chv_dpll);
897         } else if (IS_VALLEYVIEW(dev_priv)) {
898                 divisor = vlv_dpll;
899                 count = ARRAY_SIZE(vlv_dpll);
900         }
901
902         if (divisor && count) {
903                 for (i = 0; i < count; i++) {
904                         if (pipe_config->port_clock == divisor[i].clock) {
905                                 pipe_config->dpll = divisor[i].dpll;
906                                 pipe_config->clock_set = true;
907                                 break;
908                         }
909                 }
910         }
911 }
912
913 static void snprintf_int_array(char *str, size_t len,
914                                const int *array, int nelem)
915 {
916         int i;
917
918         str[0] = '\0';
919
920         for (i = 0; i < nelem; i++) {
921                 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
922                 if (r >= len)
923                         return;
924                 str += r;
925                 len -= r;
926         }
927 }
928
929 static void intel_dp_print_rates(struct intel_dp *intel_dp)
930 {
931         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
932         char str[128]; /* FIXME: too big for stack? */
933
934         if (!drm_debug_enabled(DRM_UT_KMS))
935                 return;
936
937         snprintf_int_array(str, sizeof(str),
938                            intel_dp->source_rates, intel_dp->num_source_rates);
939         drm_dbg_kms(&i915->drm, "source rates: %s\n", str);
940
941         snprintf_int_array(str, sizeof(str),
942                            intel_dp->sink_rates, intel_dp->num_sink_rates);
943         drm_dbg_kms(&i915->drm, "sink rates: %s\n", str);
944
945         snprintf_int_array(str, sizeof(str),
946                            intel_dp->common_rates, intel_dp->num_common_rates);
947         drm_dbg_kms(&i915->drm, "common rates: %s\n", str);
948 }
949
950 int
951 intel_dp_max_link_rate(struct intel_dp *intel_dp)
952 {
953         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
954         int len;
955
956         len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->max_link_rate);
957         if (drm_WARN_ON(&i915->drm, len <= 0))
958                 return 162000;
959
960         return intel_dp->common_rates[len - 1];
961 }
962
963 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
964 {
965         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
966         int i = intel_dp_rate_index(intel_dp->sink_rates,
967                                     intel_dp->num_sink_rates, rate);
968
969         if (drm_WARN_ON(&i915->drm, i < 0))
970                 i = 0;
971
972         return i;
973 }
974
975 void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
976                            u8 *link_bw, u8 *rate_select)
977 {
978         /* eDP 1.4 rate select method. */
979         if (intel_dp->use_rate_select) {
980                 *link_bw = 0;
981                 *rate_select =
982                         intel_dp_rate_select(intel_dp, port_clock);
983         } else {
984                 *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
985                 *rate_select = 0;
986         }
987 }
988
989 static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp,
990                                          const struct intel_crtc_state *pipe_config)
991 {
992         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
993
994         /* On TGL, FEC is supported on all Pipes */
995         if (INTEL_GEN(dev_priv) >= 12)
996                 return true;
997
998         if (IS_GEN(dev_priv, 11) && pipe_config->cpu_transcoder != TRANSCODER_A)
999                 return true;
1000
1001         return false;
1002 }
1003
1004 static bool intel_dp_supports_fec(struct intel_dp *intel_dp,
1005                                   const struct intel_crtc_state *pipe_config)
1006 {
1007         return intel_dp_source_supports_fec(intel_dp, pipe_config) &&
1008                 drm_dp_sink_supports_fec(intel_dp->fec_capable);
1009 }
1010
1011 static bool intel_dp_supports_dsc(struct intel_dp *intel_dp,
1012                                   const struct intel_crtc_state *crtc_state)
1013 {
1014         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP) && !crtc_state->fec_enable)
1015                 return false;
1016
1017         return intel_dsc_source_support(crtc_state) &&
1018                 drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd);
1019 }
1020
1021 static bool intel_dp_hdmi_ycbcr420(struct intel_dp *intel_dp,
1022                                    const struct intel_crtc_state *crtc_state)
1023 {
1024         return crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
1025                 (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444 &&
1026                  intel_dp->dfp.ycbcr_444_to_420);
1027 }
1028
1029 static int intel_dp_hdmi_tmds_clock(struct intel_dp *intel_dp,
1030                                     const struct intel_crtc_state *crtc_state, int bpc)
1031 {
1032         int clock = crtc_state->hw.adjusted_mode.crtc_clock * bpc / 8;
1033
1034         if (intel_dp_hdmi_ycbcr420(intel_dp, crtc_state))
1035                 clock /= 2;
1036
1037         return clock;
1038 }
1039
1040 static bool intel_dp_hdmi_tmds_clock_valid(struct intel_dp *intel_dp,
1041                                            const struct intel_crtc_state *crtc_state, int bpc)
1042 {
1043         int tmds_clock = intel_dp_hdmi_tmds_clock(intel_dp, crtc_state, bpc);
1044
1045         if (intel_dp->dfp.min_tmds_clock &&
1046             tmds_clock < intel_dp->dfp.min_tmds_clock)
1047                 return false;
1048
1049         if (intel_dp->dfp.max_tmds_clock &&
1050             tmds_clock > intel_dp->dfp.max_tmds_clock)
1051                 return false;
1052
1053         return true;
1054 }
1055
1056 static bool intel_dp_hdmi_deep_color_possible(struct intel_dp *intel_dp,
1057                                               const struct intel_crtc_state *crtc_state,
1058                                               int bpc)
1059 {
1060
1061         return intel_hdmi_deep_color_possible(crtc_state, bpc,
1062                                               intel_dp->has_hdmi_sink,
1063                                               intel_dp_hdmi_ycbcr420(intel_dp, crtc_state)) &&
1064                 intel_dp_hdmi_tmds_clock_valid(intel_dp, crtc_state, bpc);
1065 }
1066
1067 static int intel_dp_max_bpp(struct intel_dp *intel_dp,
1068                             const struct intel_crtc_state *crtc_state)
1069 {
1070         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1071         struct intel_connector *intel_connector = intel_dp->attached_connector;
1072         int bpp, bpc;
1073
1074         bpc = crtc_state->pipe_bpp / 3;
1075
1076         if (intel_dp->dfp.max_bpc)
1077                 bpc = min_t(int, bpc, intel_dp->dfp.max_bpc);
1078
1079         if (intel_dp->dfp.min_tmds_clock) {
1080                 for (; bpc >= 10; bpc -= 2) {
1081                         if (intel_dp_hdmi_deep_color_possible(intel_dp, crtc_state, bpc))
1082                                 break;
1083                 }
1084         }
1085
1086         bpp = bpc * 3;
1087         if (intel_dp_is_edp(intel_dp)) {
1088                 /* Get bpp from vbt only for panels that dont have bpp in edid */
1089                 if (intel_connector->base.display_info.bpc == 0 &&
1090                     dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp) {
1091                         drm_dbg_kms(&dev_priv->drm,
1092                                     "clamping bpp for eDP panel to BIOS-provided %i\n",
1093                                     dev_priv->vbt.edp.bpp);
1094                         bpp = dev_priv->vbt.edp.bpp;
1095                 }
1096         }
1097
1098         return bpp;
1099 }
1100
1101 /* Adjust link config limits based on compliance test requests. */
1102 void
1103 intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
1104                                   struct intel_crtc_state *pipe_config,
1105                                   struct link_config_limits *limits)
1106 {
1107         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1108
1109         /* For DP Compliance we override the computed bpp for the pipe */
1110         if (intel_dp->compliance.test_data.bpc != 0) {
1111                 int bpp = 3 * intel_dp->compliance.test_data.bpc;
1112
1113                 limits->min_bpp = limits->max_bpp = bpp;
1114                 pipe_config->dither_force_disable = bpp == 6 * 3;
1115
1116                 drm_dbg_kms(&i915->drm, "Setting pipe_bpp to %d\n", bpp);
1117         }
1118
1119         /* Use values requested by Compliance Test Request */
1120         if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
1121                 int index;
1122
1123                 /* Validate the compliance test data since max values
1124                  * might have changed due to link train fallback.
1125                  */
1126                 if (intel_dp_link_params_valid(intel_dp, intel_dp->compliance.test_link_rate,
1127                                                intel_dp->compliance.test_lane_count)) {
1128                         index = intel_dp_rate_index(intel_dp->common_rates,
1129                                                     intel_dp->num_common_rates,
1130                                                     intel_dp->compliance.test_link_rate);
1131                         if (index >= 0)
1132                                 limits->min_clock = limits->max_clock = index;
1133                         limits->min_lane_count = limits->max_lane_count =
1134                                 intel_dp->compliance.test_lane_count;
1135                 }
1136         }
1137 }
1138
1139 /* Optimize link config in order: max bpp, min clock, min lanes */
1140 static int
1141 intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
1142                                   struct intel_crtc_state *pipe_config,
1143                                   const struct link_config_limits *limits)
1144 {
1145         struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
1146         int bpp, clock, lane_count;
1147         int mode_rate, link_clock, link_avail;
1148
1149         for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
1150                 int output_bpp = intel_dp_output_bpp(pipe_config->output_format, bpp);
1151
1152                 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1153                                                    output_bpp);
1154
1155                 for (clock = limits->min_clock; clock <= limits->max_clock; clock++) {
1156                         for (lane_count = limits->min_lane_count;
1157                              lane_count <= limits->max_lane_count;
1158                              lane_count <<= 1) {
1159                                 link_clock = intel_dp->common_rates[clock];
1160                                 link_avail = intel_dp_max_data_rate(link_clock,
1161                                                                     lane_count);
1162
1163                                 if (mode_rate <= link_avail) {
1164                                         pipe_config->lane_count = lane_count;
1165                                         pipe_config->pipe_bpp = bpp;
1166                                         pipe_config->port_clock = link_clock;
1167
1168                                         return 0;
1169                                 }
1170                         }
1171                 }
1172         }
1173
1174         return -EINVAL;
1175 }
1176
1177 /* Optimize link config in order: max bpp, min lanes, min clock */
1178 static int
1179 intel_dp_compute_link_config_fast(struct intel_dp *intel_dp,
1180                                   struct intel_crtc_state *pipe_config,
1181                                   const struct link_config_limits *limits)
1182 {
1183         const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
1184         int bpp, clock, lane_count;
1185         int mode_rate, link_clock, link_avail;
1186
1187         for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
1188                 int output_bpp = intel_dp_output_bpp(pipe_config->output_format, bpp);
1189
1190                 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1191                                                    output_bpp);
1192
1193                 for (lane_count = limits->min_lane_count;
1194                      lane_count <= limits->max_lane_count;
1195                      lane_count <<= 1) {
1196                         for (clock = limits->min_clock; clock <= limits->max_clock; clock++) {
1197                                 link_clock = intel_dp->common_rates[clock];
1198                                 link_avail = intel_dp_max_data_rate(link_clock,
1199                                                                     lane_count);
1200
1201                                 if (mode_rate <= link_avail) {
1202                                         pipe_config->lane_count = lane_count;
1203                                         pipe_config->pipe_bpp = bpp;
1204                                         pipe_config->port_clock = link_clock;
1205
1206                                         return 0;
1207                                 }
1208                         }
1209                 }
1210         }
1211
1212         return -EINVAL;
1213 }
1214
1215 static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc)
1216 {
1217         int i, num_bpc;
1218         u8 dsc_bpc[3] = {0};
1219
1220         num_bpc = drm_dp_dsc_sink_supported_input_bpcs(intel_dp->dsc_dpcd,
1221                                                        dsc_bpc);
1222         for (i = 0; i < num_bpc; i++) {
1223                 if (dsc_max_bpc >= dsc_bpc[i])
1224                         return dsc_bpc[i] * 3;
1225         }
1226
1227         return 0;
1228 }
1229
1230 #define DSC_SUPPORTED_VERSION_MIN               1
1231
1232 static int intel_dp_dsc_compute_params(struct intel_encoder *encoder,
1233                                        struct intel_crtc_state *crtc_state)
1234 {
1235         struct drm_i915_private *i915 = to_i915(encoder->base.dev);
1236         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1237         struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
1238         u8 line_buf_depth;
1239         int ret;
1240
1241         /*
1242          * RC_MODEL_SIZE is currently a constant across all configurations.
1243          *
1244          * FIXME: Look into using sink defined DPCD DP_DSC_RC_BUF_BLK_SIZE and
1245          * DP_DSC_RC_BUF_SIZE for this.
1246          */
1247         vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST;
1248
1249         ret = intel_dsc_compute_params(encoder, crtc_state);
1250         if (ret)
1251                 return ret;
1252
1253         /*
1254          * Slice Height of 8 works for all currently available panels. So start
1255          * with that if pic_height is an integral multiple of 8. Eventually add
1256          * logic to try multiple slice heights.
1257          */
1258         if (vdsc_cfg->pic_height % 8 == 0)
1259                 vdsc_cfg->slice_height = 8;
1260         else if (vdsc_cfg->pic_height % 4 == 0)
1261                 vdsc_cfg->slice_height = 4;
1262         else
1263                 vdsc_cfg->slice_height = 2;
1264
1265         vdsc_cfg->dsc_version_major =
1266                 (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] &
1267                  DP_DSC_MAJOR_MASK) >> DP_DSC_MAJOR_SHIFT;
1268         vdsc_cfg->dsc_version_minor =
1269                 min(DSC_SUPPORTED_VERSION_MIN,
1270                     (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] &
1271                      DP_DSC_MINOR_MASK) >> DP_DSC_MINOR_SHIFT);
1272
1273         vdsc_cfg->convert_rgb = intel_dp->dsc_dpcd[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT] &
1274                 DP_DSC_RGB;
1275
1276         line_buf_depth = drm_dp_dsc_sink_line_buf_depth(intel_dp->dsc_dpcd);
1277         if (!line_buf_depth) {
1278                 drm_dbg_kms(&i915->drm,
1279                             "DSC Sink Line Buffer Depth invalid\n");
1280                 return -EINVAL;
1281         }
1282
1283         if (vdsc_cfg->dsc_version_minor == 2)
1284                 vdsc_cfg->line_buf_depth = (line_buf_depth == DSC_1_2_MAX_LINEBUF_DEPTH_BITS) ?
1285                         DSC_1_2_MAX_LINEBUF_DEPTH_VAL : line_buf_depth;
1286         else
1287                 vdsc_cfg->line_buf_depth = (line_buf_depth > DSC_1_1_MAX_LINEBUF_DEPTH_BITS) ?
1288                         DSC_1_1_MAX_LINEBUF_DEPTH_BITS : line_buf_depth;
1289
1290         vdsc_cfg->block_pred_enable =
1291                 intel_dp->dsc_dpcd[DP_DSC_BLK_PREDICTION_SUPPORT - DP_DSC_SUPPORT] &
1292                 DP_DSC_BLK_PREDICTION_IS_SUPPORTED;
1293
1294         return drm_dsc_compute_rc_parameters(vdsc_cfg);
1295 }
1296
1297 static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
1298                                        struct intel_crtc_state *pipe_config,
1299                                        struct drm_connector_state *conn_state,
1300                                        struct link_config_limits *limits)
1301 {
1302         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1303         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
1304         const struct drm_display_mode *adjusted_mode =
1305                 &pipe_config->hw.adjusted_mode;
1306         u8 dsc_max_bpc;
1307         int pipe_bpp;
1308         int ret;
1309
1310         pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) &&
1311                 intel_dp_supports_fec(intel_dp, pipe_config);
1312
1313         if (!intel_dp_supports_dsc(intel_dp, pipe_config))
1314                 return -EINVAL;
1315
1316         /* Max DSC Input BPC for ICL is 10 and for TGL+ is 12 */
1317         if (INTEL_GEN(dev_priv) >= 12)
1318                 dsc_max_bpc = min_t(u8, 12, conn_state->max_requested_bpc);
1319         else
1320                 dsc_max_bpc = min_t(u8, 10,
1321                                     conn_state->max_requested_bpc);
1322
1323         pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, dsc_max_bpc);
1324
1325         /* Min Input BPC for ICL+ is 8 */
1326         if (pipe_bpp < 8 * 3) {
1327                 drm_dbg_kms(&dev_priv->drm,
1328                             "No DSC support for less than 8bpc\n");
1329                 return -EINVAL;
1330         }
1331
1332         /*
1333          * For now enable DSC for max bpp, max link rate, max lane count.
1334          * Optimize this later for the minimum possible link rate/lane count
1335          * with DSC enabled for the requested mode.
1336          */
1337         pipe_config->pipe_bpp = pipe_bpp;
1338         pipe_config->port_clock = intel_dp->common_rates[limits->max_clock];
1339         pipe_config->lane_count = limits->max_lane_count;
1340
1341         if (intel_dp_is_edp(intel_dp)) {
1342                 pipe_config->dsc.compressed_bpp =
1343                         min_t(u16, drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4,
1344                               pipe_config->pipe_bpp);
1345                 pipe_config->dsc.slice_count =
1346                         drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
1347                                                         true);
1348         } else {
1349                 u16 dsc_max_output_bpp;
1350                 u8 dsc_dp_slice_count;
1351
1352                 dsc_max_output_bpp =
1353                         intel_dp_dsc_get_output_bpp(dev_priv,
1354                                                     pipe_config->port_clock,
1355                                                     pipe_config->lane_count,
1356                                                     adjusted_mode->crtc_clock,
1357                                                     adjusted_mode->crtc_hdisplay,
1358                                                     pipe_config->bigjoiner);
1359                 dsc_dp_slice_count =
1360                         intel_dp_dsc_get_slice_count(intel_dp,
1361                                                      adjusted_mode->crtc_clock,
1362                                                      adjusted_mode->crtc_hdisplay,
1363                                                      pipe_config->bigjoiner);
1364                 if (!dsc_max_output_bpp || !dsc_dp_slice_count) {
1365                         drm_dbg_kms(&dev_priv->drm,
1366                                     "Compressed BPP/Slice Count not supported\n");
1367                         return -EINVAL;
1368                 }
1369                 pipe_config->dsc.compressed_bpp = min_t(u16,
1370                                                                dsc_max_output_bpp >> 4,
1371                                                                pipe_config->pipe_bpp);
1372                 pipe_config->dsc.slice_count = dsc_dp_slice_count;
1373         }
1374         /*
1375          * VDSC engine operates at 1 Pixel per clock, so if peak pixel rate
1376          * is greater than the maximum Cdclock and if slice count is even
1377          * then we need to use 2 VDSC instances.
1378          */
1379         if (adjusted_mode->crtc_clock > dev_priv->max_cdclk_freq ||
1380             pipe_config->bigjoiner) {
1381                 if (pipe_config->dsc.slice_count < 2) {
1382                         drm_dbg_kms(&dev_priv->drm,
1383                                     "Cannot split stream to use 2 VDSC instances\n");
1384                         return -EINVAL;
1385                 }
1386
1387                 pipe_config->dsc.dsc_split = true;
1388         }
1389
1390         ret = intel_dp_dsc_compute_params(&dig_port->base, pipe_config);
1391         if (ret < 0) {
1392                 drm_dbg_kms(&dev_priv->drm,
1393                             "Cannot compute valid DSC parameters for Input Bpp = %d "
1394                             "Compressed BPP = %d\n",
1395                             pipe_config->pipe_bpp,
1396                             pipe_config->dsc.compressed_bpp);
1397                 return ret;
1398         }
1399
1400         pipe_config->dsc.compression_enable = true;
1401         drm_dbg_kms(&dev_priv->drm, "DP DSC computed with Input Bpp = %d "
1402                     "Compressed Bpp = %d Slice Count = %d\n",
1403                     pipe_config->pipe_bpp,
1404                     pipe_config->dsc.compressed_bpp,
1405                     pipe_config->dsc.slice_count);
1406
1407         return 0;
1408 }
1409
1410 static int
1411 intel_dp_compute_link_config(struct intel_encoder *encoder,
1412                              struct intel_crtc_state *pipe_config,
1413                              struct drm_connector_state *conn_state)
1414 {
1415         struct drm_i915_private *i915 = to_i915(encoder->base.dev);
1416         const struct drm_display_mode *adjusted_mode =
1417                 &pipe_config->hw.adjusted_mode;
1418         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1419         struct link_config_limits limits;
1420         int common_len;
1421         int ret;
1422
1423         common_len = intel_dp_common_len_rate_limit(intel_dp,
1424                                                     intel_dp->max_link_rate);
1425
1426         /* No common link rates between source and sink */
1427         drm_WARN_ON(encoder->base.dev, common_len <= 0);
1428
1429         limits.min_clock = 0;
1430         limits.max_clock = common_len - 1;
1431
1432         limits.min_lane_count = 1;
1433         limits.max_lane_count = intel_dp_max_lane_count(intel_dp);
1434
1435         limits.min_bpp = intel_dp_min_bpp(pipe_config->output_format);
1436         limits.max_bpp = intel_dp_max_bpp(intel_dp, pipe_config);
1437
1438         if (intel_dp->use_max_params) {
1439                 /*
1440                  * Use the maximum clock and number of lanes the eDP panel
1441                  * advertizes being capable of in case the initial fast
1442                  * optimal params failed us. The panels are generally
1443                  * designed to support only a single clock and lane
1444                  * configuration, and typically on older panels these
1445                  * values correspond to the native resolution of the panel.
1446                  */
1447                 limits.min_lane_count = limits.max_lane_count;
1448                 limits.min_clock = limits.max_clock;
1449         }
1450
1451         intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits);
1452
1453         drm_dbg_kms(&i915->drm, "DP link computation with max lane count %i "
1454                     "max rate %d max bpp %d pixel clock %iKHz\n",
1455                     limits.max_lane_count,
1456                     intel_dp->common_rates[limits.max_clock],
1457                     limits.max_bpp, adjusted_mode->crtc_clock);
1458
1459         if ((adjusted_mode->crtc_clock > i915->max_dotclk_freq ||
1460              adjusted_mode->crtc_hdisplay > 5120) &&
1461             intel_dp_can_bigjoiner(intel_dp))
1462                 pipe_config->bigjoiner = true;
1463
1464         if (intel_dp_is_edp(intel_dp))
1465                 /*
1466                  * Optimize for fast and narrow. eDP 1.3 section 3.3 and eDP 1.4
1467                  * section A.1: "It is recommended that the minimum number of
1468                  * lanes be used, using the minimum link rate allowed for that
1469                  * lane configuration."
1470                  *
1471                  * Note that we fall back to the max clock and lane count for eDP
1472                  * panels that fail with the fast optimal settings (see
1473                  * intel_dp->use_max_params), in which case the fast vs. wide
1474                  * choice doesn't matter.
1475                  */
1476                 ret = intel_dp_compute_link_config_fast(intel_dp, pipe_config, &limits);
1477         else
1478                 /* Optimize for slow and wide. */
1479                 ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits);
1480
1481         /* enable compression if the mode doesn't fit available BW */
1482         drm_dbg_kms(&i915->drm, "Force DSC en = %d\n", intel_dp->force_dsc_en);
1483         if (ret || intel_dp->force_dsc_en || pipe_config->bigjoiner) {
1484                 ret = intel_dp_dsc_compute_config(intel_dp, pipe_config,
1485                                                   conn_state, &limits);
1486                 if (ret < 0)
1487                         return ret;
1488         }
1489
1490         if (pipe_config->dsc.compression_enable) {
1491                 drm_dbg_kms(&i915->drm,
1492                             "DP lane count %d clock %d Input bpp %d Compressed bpp %d\n",
1493                             pipe_config->lane_count, pipe_config->port_clock,
1494                             pipe_config->pipe_bpp,
1495                             pipe_config->dsc.compressed_bpp);
1496
1497                 drm_dbg_kms(&i915->drm,
1498                             "DP link rate required %i available %i\n",
1499                             intel_dp_link_required(adjusted_mode->crtc_clock,
1500                                                    pipe_config->dsc.compressed_bpp),
1501                             intel_dp_max_data_rate(pipe_config->port_clock,
1502                                                    pipe_config->lane_count));
1503         } else {
1504                 drm_dbg_kms(&i915->drm, "DP lane count %d clock %d bpp %d\n",
1505                             pipe_config->lane_count, pipe_config->port_clock,
1506                             pipe_config->pipe_bpp);
1507
1508                 drm_dbg_kms(&i915->drm,
1509                             "DP link rate required %i available %i\n",
1510                             intel_dp_link_required(adjusted_mode->crtc_clock,
1511                                                    pipe_config->pipe_bpp),
1512                             intel_dp_max_data_rate(pipe_config->port_clock,
1513                                                    pipe_config->lane_count));
1514         }
1515         return 0;
1516 }
1517
1518 bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
1519                                   const struct drm_connector_state *conn_state)
1520 {
1521         const struct intel_digital_connector_state *intel_conn_state =
1522                 to_intel_digital_connector_state(conn_state);
1523         const struct drm_display_mode *adjusted_mode =
1524                 &crtc_state->hw.adjusted_mode;
1525
1526         /*
1527          * Our YCbCr output is always limited range.
1528          * crtc_state->limited_color_range only applies to RGB,
1529          * and it must never be set for YCbCr or we risk setting
1530          * some conflicting bits in PIPECONF which will mess up
1531          * the colors on the monitor.
1532          */
1533         if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
1534                 return false;
1535
1536         if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
1537                 /*
1538                  * See:
1539                  * CEA-861-E - 5.1 Default Encoding Parameters
1540                  * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1541                  */
1542                 return crtc_state->pipe_bpp != 18 &&
1543                         drm_default_rgb_quant_range(adjusted_mode) ==
1544                         HDMI_QUANTIZATION_RANGE_LIMITED;
1545         } else {
1546                 return intel_conn_state->broadcast_rgb ==
1547                         INTEL_BROADCAST_RGB_LIMITED;
1548         }
1549 }
1550
1551 static bool intel_dp_port_has_audio(struct drm_i915_private *dev_priv,
1552                                     enum port port)
1553 {
1554         if (IS_G4X(dev_priv))
1555                 return false;
1556         if (INTEL_GEN(dev_priv) < 12 && port == PORT_A)
1557                 return false;
1558
1559         return true;
1560 }
1561
1562 static void intel_dp_compute_vsc_colorimetry(const struct intel_crtc_state *crtc_state,
1563                                              const struct drm_connector_state *conn_state,
1564                                              struct drm_dp_vsc_sdp *vsc)
1565 {
1566         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1567         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1568
1569         /*
1570          * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118
1571          * VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/
1572          * Colorimetry Format indication.
1573          */
1574         vsc->revision = 0x5;
1575         vsc->length = 0x13;
1576
1577         /* DP 1.4a spec, Table 2-120 */
1578         switch (crtc_state->output_format) {
1579         case INTEL_OUTPUT_FORMAT_YCBCR444:
1580                 vsc->pixelformat = DP_PIXELFORMAT_YUV444;
1581                 break;
1582         case INTEL_OUTPUT_FORMAT_YCBCR420:
1583                 vsc->pixelformat = DP_PIXELFORMAT_YUV420;
1584                 break;
1585         case INTEL_OUTPUT_FORMAT_RGB:
1586         default:
1587                 vsc->pixelformat = DP_PIXELFORMAT_RGB;
1588         }
1589
1590         switch (conn_state->colorspace) {
1591         case DRM_MODE_COLORIMETRY_BT709_YCC:
1592                 vsc->colorimetry = DP_COLORIMETRY_BT709_YCC;
1593                 break;
1594         case DRM_MODE_COLORIMETRY_XVYCC_601:
1595                 vsc->colorimetry = DP_COLORIMETRY_XVYCC_601;
1596                 break;
1597         case DRM_MODE_COLORIMETRY_XVYCC_709:
1598                 vsc->colorimetry = DP_COLORIMETRY_XVYCC_709;
1599                 break;
1600         case DRM_MODE_COLORIMETRY_SYCC_601:
1601                 vsc->colorimetry = DP_COLORIMETRY_SYCC_601;
1602                 break;
1603         case DRM_MODE_COLORIMETRY_OPYCC_601:
1604                 vsc->colorimetry = DP_COLORIMETRY_OPYCC_601;
1605                 break;
1606         case DRM_MODE_COLORIMETRY_BT2020_CYCC:
1607                 vsc->colorimetry = DP_COLORIMETRY_BT2020_CYCC;
1608                 break;
1609         case DRM_MODE_COLORIMETRY_BT2020_RGB:
1610                 vsc->colorimetry = DP_COLORIMETRY_BT2020_RGB;
1611                 break;
1612         case DRM_MODE_COLORIMETRY_BT2020_YCC:
1613                 vsc->colorimetry = DP_COLORIMETRY_BT2020_YCC;
1614                 break;
1615         case DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65:
1616         case DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER:
1617                 vsc->colorimetry = DP_COLORIMETRY_DCI_P3_RGB;
1618                 break;
1619         default:
1620                 /*
1621                  * RGB->YCBCR color conversion uses the BT.709
1622                  * color space.
1623                  */
1624                 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
1625                         vsc->colorimetry = DP_COLORIMETRY_BT709_YCC;
1626                 else
1627                         vsc->colorimetry = DP_COLORIMETRY_DEFAULT;
1628                 break;
1629         }
1630
1631         vsc->bpc = crtc_state->pipe_bpp / 3;
1632
1633         /* only RGB pixelformat supports 6 bpc */
1634         drm_WARN_ON(&dev_priv->drm,
1635                     vsc->bpc == 6 && vsc->pixelformat != DP_PIXELFORMAT_RGB);
1636
1637         /* all YCbCr are always limited range */
1638         vsc->dynamic_range = DP_DYNAMIC_RANGE_CTA;
1639         vsc->content_type = DP_CONTENT_TYPE_NOT_DEFINED;
1640 }
1641
1642 static void intel_dp_compute_vsc_sdp(struct intel_dp *intel_dp,
1643                                      struct intel_crtc_state *crtc_state,
1644                                      const struct drm_connector_state *conn_state)
1645 {
1646         struct drm_dp_vsc_sdp *vsc = &crtc_state->infoframes.vsc;
1647
1648         /* When a crtc state has PSR, VSC SDP will be handled by PSR routine */
1649         if (crtc_state->has_psr)
1650                 return;
1651
1652         if (!intel_dp_needs_vsc_sdp(crtc_state, conn_state))
1653                 return;
1654
1655         crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
1656         vsc->sdp_type = DP_SDP_VSC;
1657         intel_dp_compute_vsc_colorimetry(crtc_state, conn_state,
1658                                          &crtc_state->infoframes.vsc);
1659 }
1660
1661 void intel_dp_compute_psr_vsc_sdp(struct intel_dp *intel_dp,
1662                                   const struct intel_crtc_state *crtc_state,
1663                                   const struct drm_connector_state *conn_state,
1664                                   struct drm_dp_vsc_sdp *vsc)
1665 {
1666         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1667
1668         vsc->sdp_type = DP_SDP_VSC;
1669
1670         if (dev_priv->psr.psr2_enabled) {
1671                 if (dev_priv->psr.colorimetry_support &&
1672                     intel_dp_needs_vsc_sdp(crtc_state, conn_state)) {
1673                         /* [PSR2, +Colorimetry] */
1674                         intel_dp_compute_vsc_colorimetry(crtc_state, conn_state,
1675                                                          vsc);
1676                 } else {
1677                         /*
1678                          * [PSR2, -Colorimetry]
1679                          * Prepare VSC Header for SU as per eDP 1.4 spec, Table 6-11
1680                          * 3D stereo + PSR/PSR2 + Y-coordinate.
1681                          */
1682                         vsc->revision = 0x4;
1683                         vsc->length = 0xe;
1684                 }
1685         } else {
1686                 /*
1687                  * [PSR1]
1688                  * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118
1689                  * VSC SDP supporting 3D stereo + PSR (applies to eDP v1.3 or
1690                  * higher).
1691                  */
1692                 vsc->revision = 0x2;
1693                 vsc->length = 0x8;
1694         }
1695 }
1696
1697 static void
1698 intel_dp_compute_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp,
1699                                             struct intel_crtc_state *crtc_state,
1700                                             const struct drm_connector_state *conn_state)
1701 {
1702         int ret;
1703         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1704         struct hdmi_drm_infoframe *drm_infoframe = &crtc_state->infoframes.drm.drm;
1705
1706         if (!conn_state->hdr_output_metadata)
1707                 return;
1708
1709         ret = drm_hdmi_infoframe_set_hdr_metadata(drm_infoframe, conn_state);
1710
1711         if (ret) {
1712                 drm_dbg_kms(&dev_priv->drm, "couldn't set HDR metadata in infoframe\n");
1713                 return;
1714         }
1715
1716         crtc_state->infoframes.enable |=
1717                 intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA);
1718 }
1719
1720 static void
1721 intel_dp_drrs_compute_config(struct intel_dp *intel_dp,
1722                              struct intel_crtc_state *pipe_config,
1723                              int output_bpp, bool constant_n)
1724 {
1725         struct intel_connector *intel_connector = intel_dp->attached_connector;
1726         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1727
1728         if (pipe_config->vrr.enable)
1729                 return;
1730
1731         /*
1732          * DRRS and PSR can't be enable together, so giving preference to PSR
1733          * as it allows more power-savings by complete shutting down display,
1734          * so to guarantee this, intel_dp_drrs_compute_config() must be called
1735          * after intel_psr_compute_config().
1736          */
1737         if (pipe_config->has_psr)
1738                 return;
1739
1740         if (!intel_connector->panel.downclock_mode ||
1741             dev_priv->drrs.type != SEAMLESS_DRRS_SUPPORT)
1742                 return;
1743
1744         pipe_config->has_drrs = true;
1745         intel_link_compute_m_n(output_bpp, pipe_config->lane_count,
1746                                intel_connector->panel.downclock_mode->clock,
1747                                pipe_config->port_clock, &pipe_config->dp_m2_n2,
1748                                constant_n, pipe_config->fec_enable);
1749 }
1750
1751 int
1752 intel_dp_compute_config(struct intel_encoder *encoder,
1753                         struct intel_crtc_state *pipe_config,
1754                         struct drm_connector_state *conn_state)
1755 {
1756         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1757         struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
1758         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1759         enum port port = encoder->port;
1760         struct intel_connector *intel_connector = intel_dp->attached_connector;
1761         struct intel_digital_connector_state *intel_conn_state =
1762                 to_intel_digital_connector_state(conn_state);
1763         bool constant_n = drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_CONSTANT_N);
1764         int ret = 0, output_bpp;
1765
1766         if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A)
1767                 pipe_config->has_pch_encoder = true;
1768
1769         pipe_config->output_format = intel_dp_output_format(&intel_connector->base,
1770                                                             adjusted_mode);
1771
1772         if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) {
1773                 ret = intel_pch_panel_fitting(pipe_config, conn_state);
1774                 if (ret)
1775                         return ret;
1776         }
1777
1778         if (!intel_dp_port_has_audio(dev_priv, port))
1779                 pipe_config->has_audio = false;
1780         else if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
1781                 pipe_config->has_audio = intel_dp->has_audio;
1782         else
1783                 pipe_config->has_audio = intel_conn_state->force_audio == HDMI_AUDIO_ON;
1784
1785         if (intel_dp_is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1786                 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1787                                        adjusted_mode);
1788
1789                 if (HAS_GMCH(dev_priv))
1790                         ret = intel_gmch_panel_fitting(pipe_config, conn_state);
1791                 else
1792                         ret = intel_pch_panel_fitting(pipe_config, conn_state);
1793                 if (ret)
1794                         return ret;
1795         }
1796
1797         if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
1798                 return -EINVAL;
1799
1800         if (HAS_GMCH(dev_priv) &&
1801             adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
1802                 return -EINVAL;
1803
1804         if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1805                 return -EINVAL;
1806
1807         if (intel_dp_hdisplay_bad(dev_priv, adjusted_mode->crtc_hdisplay))
1808                 return -EINVAL;
1809
1810         ret = intel_dp_compute_link_config(encoder, pipe_config, conn_state);
1811         if (ret < 0)
1812                 return ret;
1813
1814         pipe_config->limited_color_range =
1815                 intel_dp_limited_color_range(pipe_config, conn_state);
1816
1817         if (pipe_config->dsc.compression_enable)
1818                 output_bpp = pipe_config->dsc.compressed_bpp;
1819         else
1820                 output_bpp = intel_dp_output_bpp(pipe_config->output_format,
1821                                                  pipe_config->pipe_bpp);
1822
1823         intel_link_compute_m_n(output_bpp,
1824                                pipe_config->lane_count,
1825                                adjusted_mode->crtc_clock,
1826                                pipe_config->port_clock,
1827                                &pipe_config->dp_m_n,
1828                                constant_n, pipe_config->fec_enable);
1829
1830         if (!HAS_DDI(dev_priv))
1831                 intel_dp_set_clock(encoder, pipe_config);
1832
1833         intel_vrr_compute_config(pipe_config, conn_state);
1834         intel_psr_compute_config(intel_dp, pipe_config);
1835         intel_dp_drrs_compute_config(intel_dp, pipe_config, output_bpp,
1836                                      constant_n);
1837         intel_dp_compute_vsc_sdp(intel_dp, pipe_config, conn_state);
1838         intel_dp_compute_hdr_metadata_infoframe_sdp(intel_dp, pipe_config, conn_state);
1839
1840         return 0;
1841 }
1842
1843 void intel_dp_set_link_params(struct intel_dp *intel_dp,
1844                               int link_rate, int lane_count)
1845 {
1846         intel_dp->link_trained = false;
1847         intel_dp->link_rate = link_rate;
1848         intel_dp->lane_count = lane_count;
1849 }
1850
1851 static void intel_dp_prepare(struct intel_encoder *encoder,
1852                              const struct intel_crtc_state *pipe_config)
1853 {
1854         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1855         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1856         enum port port = encoder->port;
1857         struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
1858         const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
1859
1860         intel_dp_set_link_params(intel_dp,
1861                                  pipe_config->port_clock,
1862                                  pipe_config->lane_count);
1863
1864         /*
1865          * There are four kinds of DP registers:
1866          *
1867          *      IBX PCH
1868          *      SNB CPU
1869          *      IVB CPU
1870          *      CPT PCH
1871          *
1872          * IBX PCH and CPU are the same for almost everything,
1873          * except that the CPU DP PLL is configured in this
1874          * register
1875          *
1876          * CPT PCH is quite different, having many bits moved
1877          * to the TRANS_DP_CTL register instead. That
1878          * configuration happens (oddly) in ilk_pch_enable
1879          */
1880
1881         /* Preserve the BIOS-computed detected bit. This is
1882          * supposed to be read-only.
1883          */
1884         intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED;
1885
1886         /* Handle DP bits in common between all three register formats */
1887         intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1888         intel_dp->DP |= DP_PORT_WIDTH(pipe_config->lane_count);
1889
1890         /* Split out the IBX/CPU vs CPT settings */
1891
1892         if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
1893                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1894                         intel_dp->DP |= DP_SYNC_HS_HIGH;
1895                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1896                         intel_dp->DP |= DP_SYNC_VS_HIGH;
1897                 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1898
1899                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1900                         intel_dp->DP |= DP_ENHANCED_FRAMING;
1901
1902                 intel_dp->DP |= DP_PIPE_SEL_IVB(crtc->pipe);
1903         } else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
1904                 u32 trans_dp;
1905
1906                 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1907
1908                 trans_dp = intel_de_read(dev_priv, TRANS_DP_CTL(crtc->pipe));
1909                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1910                         trans_dp |= TRANS_DP_ENH_FRAMING;
1911                 else
1912                         trans_dp &= ~TRANS_DP_ENH_FRAMING;
1913                 intel_de_write(dev_priv, TRANS_DP_CTL(crtc->pipe), trans_dp);
1914         } else {
1915                 if (IS_G4X(dev_priv) && pipe_config->limited_color_range)
1916                         intel_dp->DP |= DP_COLOR_RANGE_16_235;
1917
1918                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1919                         intel_dp->DP |= DP_SYNC_HS_HIGH;
1920                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1921                         intel_dp->DP |= DP_SYNC_VS_HIGH;
1922                 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1923
1924                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1925                         intel_dp->DP |= DP_ENHANCED_FRAMING;
1926
1927                 if (IS_CHERRYVIEW(dev_priv))
1928                         intel_dp->DP |= DP_PIPE_SEL_CHV(crtc->pipe);
1929                 else
1930                         intel_dp->DP |= DP_PIPE_SEL(crtc->pipe);
1931         }
1932 }
1933
1934
1935 /* Enable backlight PWM and backlight PP control. */
1936 void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
1937                             const struct drm_connector_state *conn_state)
1938 {
1939         struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(conn_state->best_encoder));
1940         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1941
1942         if (!intel_dp_is_edp(intel_dp))
1943                 return;
1944
1945         drm_dbg_kms(&i915->drm, "\n");
1946
1947         intel_panel_enable_backlight(crtc_state, conn_state);
1948         intel_pps_backlight_on(intel_dp);
1949 }
1950
1951 /* Disable backlight PP control and backlight PWM. */
1952 void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state)
1953 {
1954         struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(old_conn_state->best_encoder));
1955         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1956
1957         if (!intel_dp_is_edp(intel_dp))
1958                 return;
1959
1960         drm_dbg_kms(&i915->drm, "\n");
1961
1962         intel_pps_backlight_off(intel_dp);
1963         intel_panel_disable_backlight(old_conn_state);
1964 }
1965
1966 static void assert_dp_port(struct intel_dp *intel_dp, bool state)
1967 {
1968         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1969         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
1970         bool cur_state = intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN;
1971
1972         I915_STATE_WARN(cur_state != state,
1973                         "[ENCODER:%d:%s] state assertion failure (expected %s, current %s)\n",
1974                         dig_port->base.base.base.id, dig_port->base.base.name,
1975                         onoff(state), onoff(cur_state));
1976 }
1977 #define assert_dp_port_disabled(d) assert_dp_port((d), false)
1978
1979 static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
1980 {
1981         bool cur_state = intel_de_read(dev_priv, DP_A) & DP_PLL_ENABLE;
1982
1983         I915_STATE_WARN(cur_state != state,
1984                         "eDP PLL state assertion failure (expected %s, current %s)\n",
1985                         onoff(state), onoff(cur_state));
1986 }
1987 #define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
1988 #define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
1989
1990 static void ilk_edp_pll_on(struct intel_dp *intel_dp,
1991                            const struct intel_crtc_state *pipe_config)
1992 {
1993         struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
1994         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1995
1996         assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
1997         assert_dp_port_disabled(intel_dp);
1998         assert_edp_pll_disabled(dev_priv);
1999
2000         drm_dbg_kms(&dev_priv->drm, "enabling eDP PLL for clock %d\n",
2001                     pipe_config->port_clock);
2002
2003         intel_dp->DP &= ~DP_PLL_FREQ_MASK;
2004
2005         if (pipe_config->port_clock == 162000)
2006                 intel_dp->DP |= DP_PLL_FREQ_162MHZ;
2007         else
2008                 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
2009
2010         intel_de_write(dev_priv, DP_A, intel_dp->DP);
2011         intel_de_posting_read(dev_priv, DP_A);
2012         udelay(500);
2013
2014         /*
2015          * [DevILK] Work around required when enabling DP PLL
2016          * while a pipe is enabled going to FDI:
2017          * 1. Wait for the start of vertical blank on the enabled pipe going to FDI
2018          * 2. Program DP PLL enable
2019          */
2020         if (IS_GEN(dev_priv, 5))
2021                 intel_wait_for_vblank_if_active(dev_priv, !crtc->pipe);
2022
2023         intel_dp->DP |= DP_PLL_ENABLE;
2024
2025         intel_de_write(dev_priv, DP_A, intel_dp->DP);
2026         intel_de_posting_read(dev_priv, DP_A);
2027         udelay(200);
2028 }
2029
2030 static void ilk_edp_pll_off(struct intel_dp *intel_dp,
2031                             const struct intel_crtc_state *old_crtc_state)
2032 {
2033         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
2034         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2035
2036         assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder);
2037         assert_dp_port_disabled(intel_dp);
2038         assert_edp_pll_enabled(dev_priv);
2039
2040         drm_dbg_kms(&dev_priv->drm, "disabling eDP PLL\n");
2041
2042         intel_dp->DP &= ~DP_PLL_ENABLE;
2043
2044         intel_de_write(dev_priv, DP_A, intel_dp->DP);
2045         intel_de_posting_read(dev_priv, DP_A);
2046         udelay(200);
2047 }
2048
2049 static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp)
2050 {
2051         /*
2052          * DPCD 1.2+ should support BRANCH_DEVICE_CTRL, and thus
2053          * be capable of signalling downstream hpd with a long pulse.
2054          * Whether or not that means D3 is safe to use is not clear,
2055          * but let's assume so until proven otherwise.
2056          *
2057          * FIXME should really check all downstream ports...
2058          */
2059         return intel_dp->dpcd[DP_DPCD_REV] == 0x11 &&
2060                 drm_dp_is_branch(intel_dp->dpcd) &&
2061                 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD;
2062 }
2063
2064 void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
2065                                            const struct intel_crtc_state *crtc_state,
2066                                            bool enable)
2067 {
2068         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2069         int ret;
2070
2071         if (!crtc_state->dsc.compression_enable)
2072                 return;
2073
2074         ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_DSC_ENABLE,
2075                                  enable ? DP_DECOMPRESSION_EN : 0);
2076         if (ret < 0)
2077                 drm_dbg_kms(&i915->drm,
2078                             "Failed to %s sink decompression state\n",
2079                             enable ? "enable" : "disable");
2080 }
2081
2082 static void
2083 intel_edp_init_source_oui(struct intel_dp *intel_dp, bool careful)
2084 {
2085         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2086         u8 oui[] = { 0x00, 0xaa, 0x01 };
2087         u8 buf[3] = { 0 };
2088
2089         /*
2090          * During driver init, we want to be careful and avoid changing the source OUI if it's
2091          * already set to what we want, so as to avoid clearing any state by accident
2092          */
2093         if (careful) {
2094                 if (drm_dp_dpcd_read(&intel_dp->aux, DP_SOURCE_OUI, buf, sizeof(buf)) < 0)
2095                         drm_err(&i915->drm, "Failed to read source OUI\n");
2096
2097                 if (memcmp(oui, buf, sizeof(oui)) == 0)
2098                         return;
2099         }
2100
2101         if (drm_dp_dpcd_write(&intel_dp->aux, DP_SOURCE_OUI, oui, sizeof(oui)) < 0)
2102                 drm_err(&i915->drm, "Failed to write source OUI\n");
2103 }
2104
2105 /* If the device supports it, try to set the power state appropriately */
2106 void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode)
2107 {
2108         struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
2109         struct drm_i915_private *i915 = to_i915(encoder->base.dev);
2110         int ret, i;
2111
2112         /* Should have a valid DPCD by this point */
2113         if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2114                 return;
2115
2116         if (mode != DP_SET_POWER_D0) {
2117                 if (downstream_hpd_needs_d0(intel_dp))
2118                         return;
2119
2120                 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, mode);
2121         } else {
2122                 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
2123
2124                 lspcon_resume(dp_to_dig_port(intel_dp));
2125
2126                 /* Write the source OUI as early as possible */
2127                 if (intel_dp_is_edp(intel_dp))
2128                         intel_edp_init_source_oui(intel_dp, false);
2129
2130                 /*
2131                  * When turning on, we need to retry for 1ms to give the sink
2132                  * time to wake up.
2133                  */
2134                 for (i = 0; i < 3; i++) {
2135                         ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, mode);
2136                         if (ret == 1)
2137                                 break;
2138                         msleep(1);
2139                 }
2140
2141                 if (ret == 1 && lspcon->active)
2142                         lspcon_wait_pcon_mode(lspcon);
2143         }
2144
2145         if (ret != 1)
2146                 drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Set power to %s failed\n",
2147                             encoder->base.base.id, encoder->base.name,
2148                             mode == DP_SET_POWER_D0 ? "D0" : "D3");
2149 }
2150
2151 static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv,
2152                                  enum port port, enum pipe *pipe)
2153 {
2154         enum pipe p;
2155
2156         for_each_pipe(dev_priv, p) {
2157                 u32 val = intel_de_read(dev_priv, TRANS_DP_CTL(p));
2158
2159                 if ((val & TRANS_DP_PORT_SEL_MASK) == TRANS_DP_PORT_SEL(port)) {
2160                         *pipe = p;
2161                         return true;
2162                 }
2163         }
2164
2165         drm_dbg_kms(&dev_priv->drm, "No pipe for DP port %c found\n",
2166                     port_name(port));
2167
2168         /* must initialize pipe to something for the asserts */
2169         *pipe = PIPE_A;
2170
2171         return false;
2172 }
2173
2174 bool intel_dp_port_enabled(struct drm_i915_private *dev_priv,
2175                            i915_reg_t dp_reg, enum port port,
2176                            enum pipe *pipe)
2177 {
2178         bool ret;
2179         u32 val;
2180
2181         val = intel_de_read(dev_priv, dp_reg);
2182
2183         ret = val & DP_PORT_EN;
2184
2185         /* asserts want to know the pipe even if the port is disabled */
2186         if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
2187                 *pipe = (val & DP_PIPE_SEL_MASK_IVB) >> DP_PIPE_SEL_SHIFT_IVB;
2188         else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
2189                 ret &= cpt_dp_port_selected(dev_priv, port, pipe);
2190         else if (IS_CHERRYVIEW(dev_priv))
2191                 *pipe = (val & DP_PIPE_SEL_MASK_CHV) >> DP_PIPE_SEL_SHIFT_CHV;
2192         else
2193                 *pipe = (val & DP_PIPE_SEL_MASK) >> DP_PIPE_SEL_SHIFT;
2194
2195         return ret;
2196 }
2197
2198 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2199                                   enum pipe *pipe)
2200 {
2201         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2202         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2203         intel_wakeref_t wakeref;
2204         bool ret;
2205
2206         wakeref = intel_display_power_get_if_enabled(dev_priv,
2207                                                      encoder->power_domain);
2208         if (!wakeref)
2209                 return false;
2210
2211         ret = intel_dp_port_enabled(dev_priv, intel_dp->output_reg,
2212                                     encoder->port, pipe);
2213
2214         intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
2215
2216         return ret;
2217 }
2218
2219 static void intel_dp_get_config(struct intel_encoder *encoder,
2220                                 struct intel_crtc_state *pipe_config)
2221 {
2222         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2223         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2224         u32 tmp, flags = 0;
2225         enum port port = encoder->port;
2226         struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
2227
2228         if (encoder->type == INTEL_OUTPUT_EDP)
2229                 pipe_config->output_types |= BIT(INTEL_OUTPUT_EDP);
2230         else
2231                 pipe_config->output_types |= BIT(INTEL_OUTPUT_DP);
2232
2233         tmp = intel_de_read(dev_priv, intel_dp->output_reg);
2234
2235         pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
2236
2237         if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
2238                 u32 trans_dp = intel_de_read(dev_priv,
2239                                              TRANS_DP_CTL(crtc->pipe));
2240
2241                 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2242                         flags |= DRM_MODE_FLAG_PHSYNC;
2243                 else
2244                         flags |= DRM_MODE_FLAG_NHSYNC;
2245
2246                 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2247                         flags |= DRM_MODE_FLAG_PVSYNC;
2248                 else
2249                         flags |= DRM_MODE_FLAG_NVSYNC;
2250         } else {
2251                 if (tmp & DP_SYNC_HS_HIGH)
2252                         flags |= DRM_MODE_FLAG_PHSYNC;
2253                 else
2254                         flags |= DRM_MODE_FLAG_NHSYNC;
2255
2256                 if (tmp & DP_SYNC_VS_HIGH)
2257                         flags |= DRM_MODE_FLAG_PVSYNC;
2258                 else
2259                         flags |= DRM_MODE_FLAG_NVSYNC;
2260         }
2261
2262         pipe_config->hw.adjusted_mode.flags |= flags;
2263
2264         if (IS_G4X(dev_priv) && tmp & DP_COLOR_RANGE_16_235)
2265                 pipe_config->limited_color_range = true;
2266
2267         pipe_config->lane_count =
2268                 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
2269
2270         intel_dp_get_m_n(crtc, pipe_config);
2271
2272         if (port == PORT_A) {
2273                 if ((intel_de_read(dev_priv, DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
2274                         pipe_config->port_clock = 162000;
2275                 else
2276                         pipe_config->port_clock = 270000;
2277         }
2278
2279         pipe_config->hw.adjusted_mode.crtc_clock =
2280                 intel_dotclock_calculate(pipe_config->port_clock,
2281                                          &pipe_config->dp_m_n);
2282
2283         if (intel_dp_is_edp(intel_dp) && dev_priv->vbt.edp.bpp &&
2284             pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) {
2285                 /*
2286                  * This is a big fat ugly hack.
2287                  *
2288                  * Some machines in UEFI boot mode provide us a VBT that has 18
2289                  * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2290                  * unknown we fail to light up. Yet the same BIOS boots up with
2291                  * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2292                  * max, not what it tells us to use.
2293                  *
2294                  * Note: This will still be broken if the eDP panel is not lit
2295                  * up by the BIOS, and thus we can't get the mode at module
2296                  * load.
2297                  */
2298                 drm_dbg_kms(&dev_priv->drm,
2299                             "pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2300                             pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp);
2301                 dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp;
2302         }
2303 }
2304
2305 static bool
2306 intel_dp_get_dpcd(struct intel_dp *intel_dp);
2307
2308 /**
2309  * intel_dp_sync_state - sync the encoder state during init/resume
2310  * @encoder: intel encoder to sync
2311  * @crtc_state: state for the CRTC connected to the encoder
2312  *
2313  * Sync any state stored in the encoder wrt. HW state during driver init
2314  * and system resume.
2315  */
2316 void intel_dp_sync_state(struct intel_encoder *encoder,
2317                          const struct intel_crtc_state *crtc_state)
2318 {
2319         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2320
2321         /*
2322          * Don't clobber DPCD if it's been already read out during output
2323          * setup (eDP) or detect.
2324          */
2325         if (intel_dp->dpcd[DP_DPCD_REV] == 0)
2326                 intel_dp_get_dpcd(intel_dp);
2327
2328         intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp);
2329         intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
2330 }
2331
2332 bool intel_dp_initial_fastset_check(struct intel_encoder *encoder,
2333                                     struct intel_crtc_state *crtc_state)
2334 {
2335         struct drm_i915_private *i915 = to_i915(encoder->base.dev);
2336         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2337
2338         /*
2339          * If BIOS has set an unsupported or non-standard link rate for some
2340          * reason force an encoder recompute and full modeset.
2341          */
2342         if (intel_dp_rate_index(intel_dp->source_rates, intel_dp->num_source_rates,
2343                                 crtc_state->port_clock) < 0) {
2344                 drm_dbg_kms(&i915->drm, "Forcing full modeset due to unsupported link rate\n");
2345                 crtc_state->uapi.connectors_changed = true;
2346                 return false;
2347         }
2348
2349         /*
2350          * FIXME hack to force full modeset when DSC is being used.
2351          *
2352          * As long as we do not have full state readout and config comparison
2353          * of crtc_state->dsc, we have no way to ensure reliable fastset.
2354          * Remove once we have readout for DSC.
2355          */
2356         if (crtc_state->dsc.compression_enable) {
2357                 drm_dbg_kms(&i915->drm, "Forcing full modeset due to DSC being enabled\n");
2358                 crtc_state->uapi.mode_changed = true;
2359                 return false;
2360         }
2361
2362         if (CAN_PSR(i915) && intel_dp_is_edp(intel_dp)) {
2363                 drm_dbg_kms(&i915->drm, "Forcing full modeset to compute PSR state\n");
2364                 crtc_state->uapi.mode_changed = true;
2365                 return false;
2366         }
2367
2368         return true;
2369 }
2370
2371 static void intel_disable_dp(struct intel_atomic_state *state,
2372                              struct intel_encoder *encoder,
2373                              const struct intel_crtc_state *old_crtc_state,
2374                              const struct drm_connector_state *old_conn_state)
2375 {
2376         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2377
2378         intel_dp->link_trained = false;
2379
2380         if (old_crtc_state->has_audio)
2381                 intel_audio_codec_disable(encoder,
2382                                           old_crtc_state, old_conn_state);
2383
2384         /* Make sure the panel is off before trying to change the mode. But also
2385          * ensure that we have vdd while we switch off the panel. */
2386         intel_pps_vdd_on(intel_dp);
2387         intel_edp_backlight_off(old_conn_state);
2388         intel_dp_set_power(intel_dp, DP_SET_POWER_D3);
2389         intel_pps_off(intel_dp);
2390         intel_dp->frl.is_trained = false;
2391         intel_dp->frl.trained_rate_gbps = 0;
2392 }
2393
2394 static void g4x_disable_dp(struct intel_atomic_state *state,
2395                            struct intel_encoder *encoder,
2396                            const struct intel_crtc_state *old_crtc_state,
2397                            const struct drm_connector_state *old_conn_state)
2398 {
2399         intel_disable_dp(state, encoder, old_crtc_state, old_conn_state);
2400 }
2401
2402 static void vlv_disable_dp(struct intel_atomic_state *state,
2403                            struct intel_encoder *encoder,
2404                            const struct intel_crtc_state *old_crtc_state,
2405                            const struct drm_connector_state *old_conn_state)
2406 {
2407         intel_disable_dp(state, encoder, old_crtc_state, old_conn_state);
2408 }
2409
2410 static void g4x_post_disable_dp(struct intel_atomic_state *state,
2411                                 struct intel_encoder *encoder,
2412                                 const struct intel_crtc_state *old_crtc_state,
2413                                 const struct drm_connector_state *old_conn_state)
2414 {
2415         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2416         enum port port = encoder->port;
2417
2418         /*
2419          * Bspec does not list a specific disable sequence for g4x DP.
2420          * Follow the ilk+ sequence (disable pipe before the port) for
2421          * g4x DP as it does not suffer from underruns like the normal
2422          * g4x modeset sequence (disable pipe after the port).
2423          */
2424         intel_dp_link_down(encoder, old_crtc_state);
2425
2426         /* Only ilk+ has port A */
2427         if (port == PORT_A)
2428                 ilk_edp_pll_off(intel_dp, old_crtc_state);
2429 }
2430
2431 static void vlv_post_disable_dp(struct intel_atomic_state *state,
2432                                 struct intel_encoder *encoder,
2433                                 const struct intel_crtc_state *old_crtc_state,
2434                                 const struct drm_connector_state *old_conn_state)
2435 {
2436         intel_dp_link_down(encoder, old_crtc_state);
2437 }
2438
2439 static void chv_post_disable_dp(struct intel_atomic_state *state,
2440                                 struct intel_encoder *encoder,
2441                                 const struct intel_crtc_state *old_crtc_state,
2442                                 const struct drm_connector_state *old_conn_state)
2443 {
2444         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2445
2446         intel_dp_link_down(encoder, old_crtc_state);
2447
2448         vlv_dpio_get(dev_priv);
2449
2450         /* Assert data lane reset */
2451         chv_data_lane_soft_reset(encoder, old_crtc_state, true);
2452
2453         vlv_dpio_put(dev_priv);
2454 }
2455
2456 static void
2457 cpt_set_link_train(struct intel_dp *intel_dp,
2458                    const struct intel_crtc_state *crtc_state,
2459                    u8 dp_train_pat)
2460 {
2461         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2462         u32 *DP = &intel_dp->DP;
2463
2464         *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2465
2466         switch (intel_dp_training_pattern_symbol(dp_train_pat)) {
2467         case DP_TRAINING_PATTERN_DISABLE:
2468                 *DP |= DP_LINK_TRAIN_OFF_CPT;
2469                 break;
2470         case DP_TRAINING_PATTERN_1:
2471                 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2472                 break;
2473         case DP_TRAINING_PATTERN_2:
2474                 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2475                 break;
2476         case DP_TRAINING_PATTERN_3:
2477                 drm_dbg_kms(&dev_priv->drm,
2478                             "TPS3 not supported, using TPS2 instead\n");
2479                 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2480                 break;
2481         }
2482
2483         intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
2484         intel_de_posting_read(dev_priv, intel_dp->output_reg);
2485 }
2486
2487 static void intel_dp_get_pcon_dsc_cap(struct intel_dp *intel_dp)
2488 {
2489         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2490
2491         /* Clear the cached register set to avoid using stale values */
2492
2493         memset(intel_dp->pcon_dsc_dpcd, 0, sizeof(intel_dp->pcon_dsc_dpcd));
2494
2495         if (drm_dp_dpcd_read(&intel_dp->aux, DP_PCON_DSC_ENCODER,
2496                              intel_dp->pcon_dsc_dpcd,
2497                              sizeof(intel_dp->pcon_dsc_dpcd)) < 0)
2498                 drm_err(&i915->drm, "Failed to read DPCD register 0x%x\n",
2499                         DP_PCON_DSC_ENCODER);
2500
2501         drm_dbg_kms(&i915->drm, "PCON ENCODER DSC DPCD: %*ph\n",
2502                     (int)sizeof(intel_dp->pcon_dsc_dpcd), intel_dp->pcon_dsc_dpcd);
2503 }
2504
2505 static int intel_dp_pcon_get_frl_mask(u8 frl_bw_mask)
2506 {
2507         int bw_gbps[] = {9, 18, 24, 32, 40, 48};
2508         int i;
2509
2510         for (i = ARRAY_SIZE(bw_gbps) - 1; i >= 0; i--) {
2511                 if (frl_bw_mask & (1 << i))
2512                         return bw_gbps[i];
2513         }
2514         return 0;
2515 }
2516
2517 static int intel_dp_pcon_set_frl_mask(int max_frl)
2518 {
2519         switch (max_frl) {
2520         case 48:
2521                 return DP_PCON_FRL_BW_MASK_48GBPS;
2522         case 40:
2523                 return DP_PCON_FRL_BW_MASK_40GBPS;
2524         case 32:
2525                 return DP_PCON_FRL_BW_MASK_32GBPS;
2526         case 24:
2527                 return DP_PCON_FRL_BW_MASK_24GBPS;
2528         case 18:
2529                 return DP_PCON_FRL_BW_MASK_18GBPS;
2530         case 9:
2531                 return DP_PCON_FRL_BW_MASK_9GBPS;
2532         }
2533
2534         return 0;
2535 }
2536
2537 static int intel_dp_hdmi_sink_max_frl(struct intel_dp *intel_dp)
2538 {
2539         struct intel_connector *intel_connector = intel_dp->attached_connector;
2540         struct drm_connector *connector = &intel_connector->base;
2541         int max_frl_rate;
2542         int max_lanes, rate_per_lane;
2543         int max_dsc_lanes, dsc_rate_per_lane;
2544
2545         max_lanes = connector->display_info.hdmi.max_lanes;
2546         rate_per_lane = connector->display_info.hdmi.max_frl_rate_per_lane;
2547         max_frl_rate = max_lanes * rate_per_lane;
2548
2549         if (connector->display_info.hdmi.dsc_cap.v_1p2) {
2550                 max_dsc_lanes = connector->display_info.hdmi.dsc_cap.max_lanes;
2551                 dsc_rate_per_lane = connector->display_info.hdmi.dsc_cap.max_frl_rate_per_lane;
2552                 if (max_dsc_lanes && dsc_rate_per_lane)
2553                         max_frl_rate = min(max_frl_rate, max_dsc_lanes * dsc_rate_per_lane);
2554         }
2555
2556         return max_frl_rate;
2557 }
2558
2559 static int intel_dp_pcon_start_frl_training(struct intel_dp *intel_dp)
2560 {
2561 #define PCON_EXTENDED_TRAIN_MODE (1 > 0)
2562 #define PCON_CONCURRENT_MODE (1 > 0)
2563 #define PCON_SEQUENTIAL_MODE !PCON_CONCURRENT_MODE
2564 #define PCON_NORMAL_TRAIN_MODE !PCON_EXTENDED_TRAIN_MODE
2565 #define TIMEOUT_FRL_READY_MS 500
2566 #define TIMEOUT_HDMI_LINK_ACTIVE_MS 1000
2567
2568         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2569         int max_frl_bw, max_pcon_frl_bw, max_edid_frl_bw, ret;
2570         u8 max_frl_bw_mask = 0, frl_trained_mask;
2571         bool is_active;
2572
2573         ret = drm_dp_pcon_reset_frl_config(&intel_dp->aux);
2574         if (ret < 0)
2575                 return ret;
2576
2577         max_pcon_frl_bw = intel_dp->dfp.pcon_max_frl_bw;
2578         drm_dbg(&i915->drm, "PCON max rate = %d Gbps\n", max_pcon_frl_bw);
2579
2580         max_edid_frl_bw = intel_dp_hdmi_sink_max_frl(intel_dp);
2581         drm_dbg(&i915->drm, "Sink max rate from EDID = %d Gbps\n", max_edid_frl_bw);
2582
2583         max_frl_bw = min(max_edid_frl_bw, max_pcon_frl_bw);
2584
2585         if (max_frl_bw <= 0)
2586                 return -EINVAL;
2587
2588         ret = drm_dp_pcon_frl_prepare(&intel_dp->aux, false);
2589         if (ret < 0)
2590                 return ret;
2591         /* Wait for PCON to be FRL Ready */
2592         wait_for(is_active = drm_dp_pcon_is_frl_ready(&intel_dp->aux) == true, TIMEOUT_FRL_READY_MS);
2593
2594         if (!is_active)
2595                 return -ETIMEDOUT;
2596
2597         max_frl_bw_mask = intel_dp_pcon_set_frl_mask(max_frl_bw);
2598         ret = drm_dp_pcon_frl_configure_1(&intel_dp->aux, max_frl_bw, PCON_SEQUENTIAL_MODE);
2599         if (ret < 0)
2600                 return ret;
2601         ret = drm_dp_pcon_frl_configure_2(&intel_dp->aux, max_frl_bw_mask, PCON_NORMAL_TRAIN_MODE);
2602         if (ret < 0)
2603                 return ret;
2604         ret = drm_dp_pcon_frl_enable(&intel_dp->aux);
2605         if (ret < 0)
2606                 return ret;
2607         /*
2608          * Wait for FRL to be completed
2609          * Check if the HDMI Link is up and active.
2610          */
2611         wait_for(is_active = drm_dp_pcon_hdmi_link_active(&intel_dp->aux) == true, TIMEOUT_HDMI_LINK_ACTIVE_MS);
2612
2613         if (!is_active)
2614                 return -ETIMEDOUT;
2615
2616         /* Verify HDMI Link configuration shows FRL Mode */
2617         if (drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, &frl_trained_mask) !=
2618             DP_PCON_HDMI_MODE_FRL) {
2619                 drm_dbg(&i915->drm, "HDMI couldn't be trained in FRL Mode\n");
2620                 return -EINVAL;
2621         }
2622         drm_dbg(&i915->drm, "MAX_FRL_MASK = %u, FRL_TRAINED_MASK = %u\n", max_frl_bw_mask, frl_trained_mask);
2623
2624         intel_dp->frl.trained_rate_gbps = intel_dp_pcon_get_frl_mask(frl_trained_mask);
2625         intel_dp->frl.is_trained = true;
2626         drm_dbg(&i915->drm, "FRL trained with : %d Gbps\n", intel_dp->frl.trained_rate_gbps);
2627
2628         return 0;
2629 }
2630
2631 static bool intel_dp_is_hdmi_2_1_sink(struct intel_dp *intel_dp)
2632 {
2633         if (drm_dp_is_branch(intel_dp->dpcd) &&
2634             intel_dp->has_hdmi_sink &&
2635             intel_dp_hdmi_sink_max_frl(intel_dp) > 0)
2636                 return true;
2637
2638         return false;
2639 }
2640
2641 void intel_dp_check_frl_training(struct intel_dp *intel_dp)
2642 {
2643         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2644
2645         /* Always go for FRL training if supported */
2646         if (!intel_dp_is_hdmi_2_1_sink(intel_dp) ||
2647             intel_dp->frl.is_trained)
2648                 return;
2649
2650         if (intel_dp_pcon_start_frl_training(intel_dp) < 0) {
2651                 int ret, mode;
2652
2653                 drm_dbg(&dev_priv->drm, "Couldnt set FRL mode, continuing with TMDS mode\n");
2654                 ret = drm_dp_pcon_reset_frl_config(&intel_dp->aux);
2655                 mode = drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, NULL);
2656
2657                 if (ret < 0 || mode != DP_PCON_HDMI_MODE_TMDS)
2658                         drm_dbg(&dev_priv->drm, "Issue with PCON, cannot set TMDS mode\n");
2659         } else {
2660                 drm_dbg(&dev_priv->drm, "FRL training Completed\n");
2661         }
2662 }
2663
2664 static int
2665 intel_dp_pcon_dsc_enc_slice_height(const struct intel_crtc_state *crtc_state)
2666 {
2667         int vactive = crtc_state->hw.adjusted_mode.vdisplay;
2668
2669         return intel_hdmi_dsc_get_slice_height(vactive);
2670 }
2671
2672 static int
2673 intel_dp_pcon_dsc_enc_slices(struct intel_dp *intel_dp,
2674                              const struct intel_crtc_state *crtc_state)
2675 {
2676         struct intel_connector *intel_connector = intel_dp->attached_connector;
2677         struct drm_connector *connector = &intel_connector->base;
2678         int hdmi_throughput = connector->display_info.hdmi.dsc_cap.clk_per_slice;
2679         int hdmi_max_slices = connector->display_info.hdmi.dsc_cap.max_slices;
2680         int pcon_max_slices = drm_dp_pcon_dsc_max_slices(intel_dp->pcon_dsc_dpcd);
2681         int pcon_max_slice_width = drm_dp_pcon_dsc_max_slice_width(intel_dp->pcon_dsc_dpcd);
2682
2683         return intel_hdmi_dsc_get_num_slices(crtc_state, pcon_max_slices,
2684                                              pcon_max_slice_width,
2685                                              hdmi_max_slices, hdmi_throughput);
2686 }
2687
2688 static int
2689 intel_dp_pcon_dsc_enc_bpp(struct intel_dp *intel_dp,
2690                           const struct intel_crtc_state *crtc_state,
2691                           int num_slices, int slice_width)
2692 {
2693         struct intel_connector *intel_connector = intel_dp->attached_connector;
2694         struct drm_connector *connector = &intel_connector->base;
2695         int output_format = crtc_state->output_format;
2696         bool hdmi_all_bpp = connector->display_info.hdmi.dsc_cap.all_bpp;
2697         int pcon_fractional_bpp = drm_dp_pcon_dsc_bpp_incr(intel_dp->pcon_dsc_dpcd);
2698         int hdmi_max_chunk_bytes =
2699                 connector->display_info.hdmi.dsc_cap.total_chunk_kbytes * 1024;
2700
2701         return intel_hdmi_dsc_get_bpp(pcon_fractional_bpp, slice_width,
2702                                       num_slices, output_format, hdmi_all_bpp,
2703                                       hdmi_max_chunk_bytes);
2704 }
2705
2706 void
2707 intel_dp_pcon_dsc_configure(struct intel_dp *intel_dp,
2708                             const struct intel_crtc_state *crtc_state)
2709 {
2710         u8 pps_param[6];
2711         int slice_height;
2712         int slice_width;
2713         int num_slices;
2714         int bits_per_pixel;
2715         int ret;
2716         struct intel_connector *intel_connector = intel_dp->attached_connector;
2717         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2718         struct drm_connector *connector;
2719         bool hdmi_is_dsc_1_2;
2720
2721         if (!intel_dp_is_hdmi_2_1_sink(intel_dp))
2722                 return;
2723
2724         if (!intel_connector)
2725                 return;
2726         connector = &intel_connector->base;
2727         hdmi_is_dsc_1_2 = connector->display_info.hdmi.dsc_cap.v_1p2;
2728
2729         if (!drm_dp_pcon_enc_is_dsc_1_2(intel_dp->pcon_dsc_dpcd) ||
2730             !hdmi_is_dsc_1_2)
2731                 return;
2732
2733         slice_height = intel_dp_pcon_dsc_enc_slice_height(crtc_state);
2734         if (!slice_height)
2735                 return;
2736
2737         num_slices = intel_dp_pcon_dsc_enc_slices(intel_dp, crtc_state);
2738         if (!num_slices)
2739                 return;
2740
2741         slice_width = DIV_ROUND_UP(crtc_state->hw.adjusted_mode.hdisplay,
2742                                    num_slices);
2743
2744         bits_per_pixel = intel_dp_pcon_dsc_enc_bpp(intel_dp, crtc_state,
2745                                                    num_slices, slice_width);
2746         if (!bits_per_pixel)
2747                 return;
2748
2749         pps_param[0] = slice_height & 0xFF;
2750         pps_param[1] = slice_height >> 8;
2751         pps_param[2] = slice_width & 0xFF;
2752         pps_param[3] = slice_width >> 8;
2753         pps_param[4] = bits_per_pixel & 0xFF;
2754         pps_param[5] = (bits_per_pixel >> 8) & 0x3;
2755
2756         ret = drm_dp_pcon_pps_override_param(&intel_dp->aux, pps_param);
2757         if (ret < 0)
2758                 drm_dbg_kms(&i915->drm, "Failed to set pcon DSC\n");
2759 }
2760
2761 static void
2762 g4x_set_link_train(struct intel_dp *intel_dp,
2763                    const struct intel_crtc_state *crtc_state,
2764                    u8 dp_train_pat)
2765 {
2766         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2767         u32 *DP = &intel_dp->DP;
2768
2769         *DP &= ~DP_LINK_TRAIN_MASK;
2770
2771         switch (intel_dp_training_pattern_symbol(dp_train_pat)) {
2772         case DP_TRAINING_PATTERN_DISABLE:
2773                 *DP |= DP_LINK_TRAIN_OFF;
2774                 break;
2775         case DP_TRAINING_PATTERN_1:
2776                 *DP |= DP_LINK_TRAIN_PAT_1;
2777                 break;
2778         case DP_TRAINING_PATTERN_2:
2779                 *DP |= DP_LINK_TRAIN_PAT_2;
2780                 break;
2781         case DP_TRAINING_PATTERN_3:
2782                 drm_dbg_kms(&dev_priv->drm,
2783                             "TPS3 not supported, using TPS2 instead\n");
2784                 *DP |= DP_LINK_TRAIN_PAT_2;
2785                 break;
2786         }
2787
2788         intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
2789         intel_de_posting_read(dev_priv, intel_dp->output_reg);
2790 }
2791
2792 static void intel_dp_enable_port(struct intel_dp *intel_dp,
2793                                  const struct intel_crtc_state *crtc_state)
2794 {
2795         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2796
2797         /* enable with pattern 1 (as per spec) */
2798
2799         intel_dp_program_link_training_pattern(intel_dp, crtc_state,
2800                                                DP_TRAINING_PATTERN_1);
2801
2802         /*
2803          * Magic for VLV/CHV. We _must_ first set up the register
2804          * without actually enabling the port, and then do another
2805          * write to enable the port. Otherwise link training will
2806          * fail when the power sequencer is freshly used for this port.
2807          */
2808         intel_dp->DP |= DP_PORT_EN;
2809         if (crtc_state->has_audio)
2810                 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
2811
2812         intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
2813         intel_de_posting_read(dev_priv, intel_dp->output_reg);
2814 }
2815
2816 void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp,
2817                                            const struct intel_crtc_state *crtc_state)
2818 {
2819         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2820         u8 tmp;
2821
2822         if (intel_dp->dpcd[DP_DPCD_REV] < 0x13)
2823                 return;
2824
2825         if (!drm_dp_is_branch(intel_dp->dpcd))
2826                 return;
2827
2828         tmp = intel_dp->has_hdmi_sink ?
2829                 DP_HDMI_DVI_OUTPUT_CONFIG : 0;
2830
2831         if (drm_dp_dpcd_writeb(&intel_dp->aux,
2832                                DP_PROTOCOL_CONVERTER_CONTROL_0, tmp) != 1)
2833                 drm_dbg_kms(&i915->drm, "Failed to set protocol converter HDMI mode to %s\n",
2834                             enableddisabled(intel_dp->has_hdmi_sink));
2835
2836         tmp = crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444 &&
2837                 intel_dp->dfp.ycbcr_444_to_420 ? DP_CONVERSION_TO_YCBCR420_ENABLE : 0;
2838
2839         if (drm_dp_dpcd_writeb(&intel_dp->aux,
2840                                DP_PROTOCOL_CONVERTER_CONTROL_1, tmp) != 1)
2841                 drm_dbg_kms(&i915->drm,
2842                             "Failed to set protocol converter YCbCr 4:2:0 conversion mode to %s\n",
2843                             enableddisabled(intel_dp->dfp.ycbcr_444_to_420));
2844
2845         tmp = 0;
2846         if (intel_dp->dfp.rgb_to_ycbcr) {
2847                 bool bt2020, bt709;
2848
2849                 /*
2850                  * FIXME: Currently if userspace selects BT2020 or BT709, but PCON supports only
2851                  * RGB->YCbCr for BT601 colorspace, we go ahead with BT601, as default.
2852                  *
2853                  */
2854                 tmp = DP_CONVERSION_BT601_RGB_YCBCR_ENABLE;
2855
2856                 bt2020 = drm_dp_downstream_rgb_to_ycbcr_conversion(intel_dp->dpcd,
2857                                                                    intel_dp->downstream_ports,
2858                                                                    DP_DS_HDMI_BT2020_RGB_YCBCR_CONV);
2859                 bt709 = drm_dp_downstream_rgb_to_ycbcr_conversion(intel_dp->dpcd,
2860                                                                   intel_dp->downstream_ports,
2861                                                                   DP_DS_HDMI_BT709_RGB_YCBCR_CONV);
2862                 switch (crtc_state->infoframes.vsc.colorimetry) {
2863                 case DP_COLORIMETRY_BT2020_RGB:
2864                 case DP_COLORIMETRY_BT2020_YCC:
2865                         if (bt2020)
2866                                 tmp = DP_CONVERSION_BT2020_RGB_YCBCR_ENABLE;
2867                         break;
2868                 case DP_COLORIMETRY_BT709_YCC:
2869                 case DP_COLORIMETRY_XVYCC_709:
2870                         if (bt709)
2871                                 tmp = DP_CONVERSION_BT709_RGB_YCBCR_ENABLE;
2872                         break;
2873                 default:
2874                         break;
2875                 }
2876         }
2877
2878         if (drm_dp_pcon_convert_rgb_to_ycbcr(&intel_dp->aux, tmp) < 0)
2879                 drm_dbg_kms(&i915->drm,
2880                            "Failed to set protocol converter RGB->YCbCr conversion mode to %s\n",
2881                            enableddisabled(tmp ? true : false));
2882 }
2883
2884 static void intel_enable_dp(struct intel_atomic_state *state,
2885                             struct intel_encoder *encoder,
2886                             const struct intel_crtc_state *pipe_config,
2887                             const struct drm_connector_state *conn_state)
2888 {
2889         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2890         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2891         struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
2892         u32 dp_reg = intel_de_read(dev_priv, intel_dp->output_reg);
2893         enum pipe pipe = crtc->pipe;
2894         intel_wakeref_t wakeref;
2895
2896         if (drm_WARN_ON(&dev_priv->drm, dp_reg & DP_PORT_EN))
2897                 return;
2898
2899         with_intel_pps_lock(intel_dp, wakeref) {
2900                 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2901                         vlv_pps_init(encoder, pipe_config);
2902
2903                 intel_dp_enable_port(intel_dp, pipe_config);
2904
2905                 intel_pps_vdd_on_unlocked(intel_dp);
2906                 intel_pps_on_unlocked(intel_dp);
2907                 intel_pps_vdd_off_unlocked(intel_dp, true);
2908         }
2909
2910         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
2911                 unsigned int lane_mask = 0x0;
2912
2913                 if (IS_CHERRYVIEW(dev_priv))
2914                         lane_mask = intel_dp_unused_lane_mask(pipe_config->lane_count);
2915
2916                 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2917                                     lane_mask);
2918         }
2919
2920         intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
2921         intel_dp_configure_protocol_converter(intel_dp, pipe_config);
2922         intel_dp_check_frl_training(intel_dp);
2923         intel_dp_pcon_dsc_configure(intel_dp, pipe_config);
2924         intel_dp_start_link_train(intel_dp, pipe_config);
2925         intel_dp_stop_link_train(intel_dp, pipe_config);
2926
2927         if (pipe_config->has_audio) {
2928                 drm_dbg(&dev_priv->drm, "Enabling DP audio on pipe %c\n",
2929                         pipe_name(pipe));
2930                 intel_audio_codec_enable(encoder, pipe_config, conn_state);
2931         }
2932 }
2933
2934 static void g4x_enable_dp(struct intel_atomic_state *state,
2935                           struct intel_encoder *encoder,
2936                           const struct intel_crtc_state *pipe_config,
2937                           const struct drm_connector_state *conn_state)
2938 {
2939         intel_enable_dp(state, encoder, pipe_config, conn_state);
2940         intel_edp_backlight_on(pipe_config, conn_state);
2941 }
2942
2943 static void vlv_enable_dp(struct intel_atomic_state *state,
2944                           struct intel_encoder *encoder,
2945                           const struct intel_crtc_state *pipe_config,
2946                           const struct drm_connector_state *conn_state)
2947 {
2948         intel_edp_backlight_on(pipe_config, conn_state);
2949 }
2950
2951 static void g4x_pre_enable_dp(struct intel_atomic_state *state,
2952                               struct intel_encoder *encoder,
2953                               const struct intel_crtc_state *pipe_config,
2954                               const struct drm_connector_state *conn_state)
2955 {
2956         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2957         enum port port = encoder->port;
2958
2959         intel_dp_prepare(encoder, pipe_config);
2960
2961         /* Only ilk+ has port A */
2962         if (port == PORT_A)
2963                 ilk_edp_pll_on(intel_dp, pipe_config);
2964 }
2965
2966 static void vlv_pre_enable_dp(struct intel_atomic_state *state,
2967                               struct intel_encoder *encoder,
2968                               const struct intel_crtc_state *pipe_config,
2969                               const struct drm_connector_state *conn_state)
2970 {
2971         vlv_phy_pre_encoder_enable(encoder, pipe_config);
2972
2973         intel_enable_dp(state, encoder, pipe_config, conn_state);
2974 }
2975
2976 static void vlv_dp_pre_pll_enable(struct intel_atomic_state *state,
2977                                   struct intel_encoder *encoder,
2978                                   const struct intel_crtc_state *pipe_config,
2979                                   const struct drm_connector_state *conn_state)
2980 {
2981         intel_dp_prepare(encoder, pipe_config);
2982
2983         vlv_phy_pre_pll_enable(encoder, pipe_config);
2984 }
2985
2986 static void chv_pre_enable_dp(struct intel_atomic_state *state,
2987                               struct intel_encoder *encoder,
2988                               const struct intel_crtc_state *pipe_config,
2989                               const struct drm_connector_state *conn_state)
2990 {
2991         chv_phy_pre_encoder_enable(encoder, pipe_config);
2992
2993         intel_enable_dp(state, encoder, pipe_config, conn_state);
2994
2995         /* Second common lane will stay alive on its own now */
2996         chv_phy_release_cl2_override(encoder);
2997 }
2998
2999 static void chv_dp_pre_pll_enable(struct intel_atomic_state *state,
3000                                   struct intel_encoder *encoder,
3001                                   const struct intel_crtc_state *pipe_config,
3002                                   const struct drm_connector_state *conn_state)
3003 {
3004         intel_dp_prepare(encoder, pipe_config);
3005
3006         chv_phy_pre_pll_enable(encoder, pipe_config);
3007 }
3008
3009 static void chv_dp_post_pll_disable(struct intel_atomic_state *state,
3010                                     struct intel_encoder *encoder,
3011                                     const struct intel_crtc_state *old_crtc_state,
3012                                     const struct drm_connector_state *old_conn_state)
3013 {
3014         chv_phy_post_pll_disable(encoder, old_crtc_state);
3015 }
3016
3017 static u8 intel_dp_voltage_max_2(struct intel_dp *intel_dp,
3018                                  const struct intel_crtc_state *crtc_state)
3019 {
3020         return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3021 }
3022
3023 static u8 intel_dp_voltage_max_3(struct intel_dp *intel_dp,
3024                                  const struct intel_crtc_state *crtc_state)
3025 {
3026         return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3027 }
3028
3029 static u8 intel_dp_preemph_max_2(struct intel_dp *intel_dp)
3030 {
3031         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3032 }
3033
3034 static u8 intel_dp_preemph_max_3(struct intel_dp *intel_dp)
3035 {
3036         return DP_TRAIN_PRE_EMPH_LEVEL_3;
3037 }
3038
3039 static void vlv_set_signal_levels(struct intel_dp *intel_dp,
3040                                   const struct intel_crtc_state *crtc_state)
3041 {
3042         struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3043         unsigned long demph_reg_value, preemph_reg_value,
3044                 uniqtranscale_reg_value;
3045         u8 train_set = intel_dp->train_set[0];
3046
3047         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3048         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3049                 preemph_reg_value = 0x0004000;
3050                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3051                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3052                         demph_reg_value = 0x2B405555;
3053                         uniqtranscale_reg_value = 0x552AB83A;
3054                         break;
3055                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3056                         demph_reg_value = 0x2B404040;
3057                         uniqtranscale_reg_value = 0x5548B83A;
3058                         break;
3059                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3060                         demph_reg_value = 0x2B245555;
3061                         uniqtranscale_reg_value = 0x5560B83A;
3062                         break;
3063                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3064                         demph_reg_value = 0x2B405555;
3065                         uniqtranscale_reg_value = 0x5598DA3A;
3066                         break;
3067                 default:
3068                         return;
3069                 }
3070                 break;
3071         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3072                 preemph_reg_value = 0x0002000;
3073                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3074                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3075                         demph_reg_value = 0x2B404040;
3076                         uniqtranscale_reg_value = 0x5552B83A;
3077                         break;
3078                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3079                         demph_reg_value = 0x2B404848;
3080                         uniqtranscale_reg_value = 0x5580B83A;
3081                         break;
3082                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3083                         demph_reg_value = 0x2B404040;
3084                         uniqtranscale_reg_value = 0x55ADDA3A;
3085                         break;
3086                 default:
3087                         return;
3088                 }
3089                 break;
3090         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3091                 preemph_reg_value = 0x0000000;
3092                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3093                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3094                         demph_reg_value = 0x2B305555;
3095                         uniqtranscale_reg_value = 0x5570B83A;
3096                         break;
3097                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3098                         demph_reg_value = 0x2B2B4040;
3099                         uniqtranscale_reg_value = 0x55ADDA3A;
3100                         break;
3101                 default:
3102                         return;
3103                 }
3104                 break;
3105         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3106                 preemph_reg_value = 0x0006000;
3107                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3108                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3109                         demph_reg_value = 0x1B405555;
3110                         uniqtranscale_reg_value = 0x55ADDA3A;
3111                         break;
3112                 default:
3113                         return;
3114                 }
3115                 break;
3116         default:
3117                 return;
3118         }
3119
3120         vlv_set_phy_signal_level(encoder, crtc_state,
3121                                  demph_reg_value, preemph_reg_value,
3122                                  uniqtranscale_reg_value, 0);
3123 }
3124
3125 static void chv_set_signal_levels(struct intel_dp *intel_dp,
3126                                   const struct intel_crtc_state *crtc_state)
3127 {
3128         struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3129         u32 deemph_reg_value, margin_reg_value;
3130         bool uniq_trans_scale = false;
3131         u8 train_set = intel_dp->train_set[0];
3132
3133         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3134         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3135                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3136                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3137                         deemph_reg_value = 128;
3138                         margin_reg_value = 52;
3139                         break;
3140                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3141                         deemph_reg_value = 128;
3142                         margin_reg_value = 77;
3143                         break;
3144                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3145                         deemph_reg_value = 128;
3146                         margin_reg_value = 102;
3147                         break;
3148                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3149                         deemph_reg_value = 128;
3150                         margin_reg_value = 154;
3151                         uniq_trans_scale = true;
3152                         break;
3153                 default:
3154                         return;
3155                 }
3156                 break;
3157         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3158                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3159                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3160                         deemph_reg_value = 85;
3161                         margin_reg_value = 78;
3162                         break;
3163                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3164                         deemph_reg_value = 85;
3165                         margin_reg_value = 116;
3166                         break;
3167                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3168                         deemph_reg_value = 85;
3169                         margin_reg_value = 154;
3170                         break;
3171                 default:
3172                         return;
3173                 }
3174                 break;
3175         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3176                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3177                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3178                         deemph_reg_value = 64;
3179                         margin_reg_value = 104;
3180                         break;
3181                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3182                         deemph_reg_value = 64;
3183                         margin_reg_value = 154;
3184                         break;
3185                 default:
3186                         return;
3187                 }
3188                 break;
3189         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3190                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3191                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3192                         deemph_reg_value = 43;
3193                         margin_reg_value = 154;
3194                         break;
3195                 default:
3196                         return;
3197                 }
3198                 break;
3199         default:
3200                 return;
3201         }
3202
3203         chv_set_phy_signal_level(encoder, crtc_state,
3204                                  deemph_reg_value, margin_reg_value,
3205                                  uniq_trans_scale);
3206 }
3207
3208 static u32 g4x_signal_levels(u8 train_set)
3209 {
3210         u32 signal_levels = 0;
3211
3212         switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3213         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3214         default:
3215                 signal_levels |= DP_VOLTAGE_0_4;
3216                 break;
3217         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3218                 signal_levels |= DP_VOLTAGE_0_6;
3219                 break;
3220         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3221                 signal_levels |= DP_VOLTAGE_0_8;
3222                 break;
3223         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3224                 signal_levels |= DP_VOLTAGE_1_2;
3225                 break;
3226         }
3227         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3228         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3229         default:
3230                 signal_levels |= DP_PRE_EMPHASIS_0;
3231                 break;
3232         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3233                 signal_levels |= DP_PRE_EMPHASIS_3_5;
3234                 break;
3235         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3236                 signal_levels |= DP_PRE_EMPHASIS_6;
3237                 break;
3238         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3239                 signal_levels |= DP_PRE_EMPHASIS_9_5;
3240                 break;
3241         }
3242         return signal_levels;
3243 }
3244
3245 static void
3246 g4x_set_signal_levels(struct intel_dp *intel_dp,
3247                       const struct intel_crtc_state *crtc_state)
3248 {
3249         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3250         u8 train_set = intel_dp->train_set[0];
3251         u32 signal_levels;
3252
3253         signal_levels = g4x_signal_levels(train_set);
3254
3255         drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
3256                     signal_levels);
3257
3258         intel_dp->DP &= ~(DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK);
3259         intel_dp->DP |= signal_levels;
3260
3261         intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
3262         intel_de_posting_read(dev_priv, intel_dp->output_reg);
3263 }
3264
3265 /* SNB CPU eDP voltage swing and pre-emphasis control */
3266 static u32 snb_cpu_edp_signal_levels(u8 train_set)
3267 {
3268         u8 signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3269                                         DP_TRAIN_PRE_EMPHASIS_MASK);
3270
3271         switch (signal_levels) {
3272         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3273         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3274                 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3275         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3276                 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3277         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3278         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3279                 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3280         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3281         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3282                 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3283         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3284         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3285                 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3286         default:
3287                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3288                               "0x%x\n", signal_levels);
3289                 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3290         }
3291 }
3292
3293 static void
3294 snb_cpu_edp_set_signal_levels(struct intel_dp *intel_dp,
3295                               const struct intel_crtc_state *crtc_state)
3296 {
3297         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3298         u8 train_set = intel_dp->train_set[0];
3299         u32 signal_levels;
3300
3301         signal_levels = snb_cpu_edp_signal_levels(train_set);
3302
3303         drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
3304                     signal_levels);
3305
3306         intel_dp->DP &= ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3307         intel_dp->DP |= signal_levels;
3308
3309         intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
3310         intel_de_posting_read(dev_priv, intel_dp->output_reg);
3311 }
3312
3313 /* IVB CPU eDP voltage swing and pre-emphasis control */
3314 static u32 ivb_cpu_edp_signal_levels(u8 train_set)
3315 {
3316         u8 signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3317                                         DP_TRAIN_PRE_EMPHASIS_MASK);
3318
3319         switch (signal_levels) {
3320         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3321                 return EDP_LINK_TRAIN_400MV_0DB_IVB;
3322         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3323                 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3324         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3325         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3326                 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3327
3328         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3329                 return EDP_LINK_TRAIN_600MV_0DB_IVB;
3330         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3331                 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3332
3333         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3334                 return EDP_LINK_TRAIN_800MV_0DB_IVB;
3335         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3336                 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3337
3338         default:
3339                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3340                               "0x%x\n", signal_levels);
3341                 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3342         }
3343 }
3344
3345 static void
3346 ivb_cpu_edp_set_signal_levels(struct intel_dp *intel_dp,
3347                               const struct intel_crtc_state *crtc_state)
3348 {
3349         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3350         u8 train_set = intel_dp->train_set[0];
3351         u32 signal_levels;
3352
3353         signal_levels = ivb_cpu_edp_signal_levels(train_set);
3354
3355         drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
3356                     signal_levels);
3357
3358         intel_dp->DP &= ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3359         intel_dp->DP |= signal_levels;
3360
3361         intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
3362         intel_de_posting_read(dev_priv, intel_dp->output_reg);
3363 }
3364
3365 static char dp_training_pattern_name(u8 train_pat)
3366 {
3367         switch (train_pat) {
3368         case DP_TRAINING_PATTERN_1:
3369         case DP_TRAINING_PATTERN_2:
3370         case DP_TRAINING_PATTERN_3:
3371                 return '0' + train_pat;
3372         case DP_TRAINING_PATTERN_4:
3373                 return '4';
3374         default:
3375                 MISSING_CASE(train_pat);
3376                 return '?';
3377         }
3378 }
3379
3380 void
3381 intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
3382                                        const struct intel_crtc_state *crtc_state,
3383                                        u8 dp_train_pat)
3384 {
3385         struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3386         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3387         u8 train_pat = intel_dp_training_pattern_symbol(dp_train_pat);
3388
3389         if (train_pat != DP_TRAINING_PATTERN_DISABLE)
3390                 drm_dbg_kms(&dev_priv->drm,
3391                             "[ENCODER:%d:%s] Using DP training pattern TPS%c\n",
3392                             encoder->base.base.id, encoder->base.name,
3393                             dp_training_pattern_name(train_pat));
3394
3395         intel_dp->set_link_train(intel_dp, crtc_state, dp_train_pat);
3396 }
3397
3398 static void
3399 intel_dp_link_down(struct intel_encoder *encoder,
3400                    const struct intel_crtc_state *old_crtc_state)
3401 {
3402         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3403         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3404         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
3405         enum port port = encoder->port;
3406         u32 DP = intel_dp->DP;
3407
3408         if (drm_WARN_ON(&dev_priv->drm,
3409                         (intel_de_read(dev_priv, intel_dp->output_reg) &
3410                          DP_PORT_EN) == 0))
3411                 return;
3412
3413         drm_dbg_kms(&dev_priv->drm, "\n");
3414
3415         if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
3416             (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
3417                 DP &= ~DP_LINK_TRAIN_MASK_CPT;
3418                 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
3419         } else {
3420                 DP &= ~DP_LINK_TRAIN_MASK;
3421                 DP |= DP_LINK_TRAIN_PAT_IDLE;
3422         }
3423         intel_de_write(dev_priv, intel_dp->output_reg, DP);
3424         intel_de_posting_read(dev_priv, intel_dp->output_reg);
3425
3426         DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3427         intel_de_write(dev_priv, intel_dp->output_reg, DP);
3428         intel_de_posting_read(dev_priv, intel_dp->output_reg);
3429
3430         /*
3431          * HW workaround for IBX, we need to move the port
3432          * to transcoder A after disabling it to allow the
3433          * matching HDMI port to be enabled on transcoder A.
3434          */
3435         if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B && port != PORT_A) {
3436                 /*
3437                  * We get CPU/PCH FIFO underruns on the other pipe when
3438                  * doing the workaround. Sweep them under the rug.
3439                  */
3440                 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3441                 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3442
3443                 /* always enable with pattern 1 (as per spec) */
3444                 DP &= ~(DP_PIPE_SEL_MASK | DP_LINK_TRAIN_MASK);
3445                 DP |= DP_PORT_EN | DP_PIPE_SEL(PIPE_A) |
3446                         DP_LINK_TRAIN_PAT_1;
3447                 intel_de_write(dev_priv, intel_dp->output_reg, DP);
3448                 intel_de_posting_read(dev_priv, intel_dp->output_reg);
3449
3450                 DP &= ~DP_PORT_EN;
3451                 intel_de_write(dev_priv, intel_dp->output_reg, DP);
3452                 intel_de_posting_read(dev_priv, intel_dp->output_reg);
3453
3454                 intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
3455                 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3456                 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3457         }
3458
3459         msleep(intel_dp->pps.panel_power_down_delay);
3460
3461         intel_dp->DP = DP;
3462
3463         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
3464                 intel_wakeref_t wakeref;
3465
3466                 with_intel_pps_lock(intel_dp, wakeref)
3467                         intel_dp->pps.active_pipe = INVALID_PIPE;
3468         }
3469 }
3470
3471 bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
3472 {
3473         u8 dprx = 0;
3474
3475         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST,
3476                               &dprx) != 1)
3477                 return false;
3478         return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED;
3479 }
3480
3481 static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp)
3482 {
3483         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3484
3485         /*
3486          * Clear the cached register set to avoid using stale values
3487          * for the sinks that do not support DSC.
3488          */
3489         memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd));
3490
3491         /* Clear fec_capable to avoid using stale values */
3492         intel_dp->fec_capable = 0;
3493
3494         /* Cache the DSC DPCD if eDP or DP rev >= 1.4 */
3495         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x14 ||
3496             intel_dp->edp_dpcd[0] >= DP_EDP_14) {
3497                 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DSC_SUPPORT,
3498                                      intel_dp->dsc_dpcd,
3499                                      sizeof(intel_dp->dsc_dpcd)) < 0)
3500                         drm_err(&i915->drm,
3501                                 "Failed to read DPCD register 0x%x\n",
3502                                 DP_DSC_SUPPORT);
3503
3504                 drm_dbg_kms(&i915->drm, "DSC DPCD: %*ph\n",
3505                             (int)sizeof(intel_dp->dsc_dpcd),
3506                             intel_dp->dsc_dpcd);
3507
3508                 /* FEC is supported only on DP 1.4 */
3509                 if (!intel_dp_is_edp(intel_dp) &&
3510                     drm_dp_dpcd_readb(&intel_dp->aux, DP_FEC_CAPABILITY,
3511                                       &intel_dp->fec_capable) < 0)
3512                         drm_err(&i915->drm,
3513                                 "Failed to read FEC DPCD register\n");
3514
3515                 drm_dbg_kms(&i915->drm, "FEC CAPABILITY: %x\n",
3516                             intel_dp->fec_capable);
3517         }
3518 }
3519
3520 static bool
3521 intel_edp_init_dpcd(struct intel_dp *intel_dp)
3522 {
3523         struct drm_i915_private *dev_priv =
3524                 to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
3525
3526         /* this function is meant to be called only once */
3527         drm_WARN_ON(&dev_priv->drm, intel_dp->dpcd[DP_DPCD_REV] != 0);
3528
3529         if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd) != 0)
3530                 return false;
3531
3532         drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
3533                          drm_dp_is_branch(intel_dp->dpcd));
3534
3535         /*
3536          * Read the eDP display control registers.
3537          *
3538          * Do this independent of DP_DPCD_DISPLAY_CONTROL_CAPABLE bit in
3539          * DP_EDP_CONFIGURATION_CAP, because some buggy displays do not have it
3540          * set, but require eDP 1.4+ detection (e.g. for supported link rates
3541          * method). The display control registers should read zero if they're
3542          * not supported anyway.
3543          */
3544         if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
3545                              intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
3546                              sizeof(intel_dp->edp_dpcd))
3547                 drm_dbg_kms(&dev_priv->drm, "eDP DPCD: %*ph\n",
3548                             (int)sizeof(intel_dp->edp_dpcd),
3549                             intel_dp->edp_dpcd);
3550
3551         /*
3552          * This has to be called after intel_dp->edp_dpcd is filled, PSR checks
3553          * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1]
3554          */
3555         intel_psr_init_dpcd(intel_dp);
3556
3557         /* Read the eDP 1.4+ supported link rates. */
3558         if (intel_dp->edp_dpcd[0] >= DP_EDP_14) {
3559                 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
3560                 int i;
3561
3562                 drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES,
3563                                 sink_rates, sizeof(sink_rates));
3564
3565                 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3566                         int val = le16_to_cpu(sink_rates[i]);
3567
3568                         if (val == 0)
3569                                 break;
3570
3571                         /* Value read multiplied by 200kHz gives the per-lane
3572                          * link rate in kHz. The source rates are, however,
3573                          * stored in terms of LS_Clk kHz. The full conversion
3574                          * back to symbols is
3575                          * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte)
3576                          */
3577                         intel_dp->sink_rates[i] = (val * 200) / 10;
3578                 }
3579                 intel_dp->num_sink_rates = i;
3580         }
3581
3582         /*
3583          * Use DP_LINK_RATE_SET if DP_SUPPORTED_LINK_RATES are available,
3584          * default to DP_MAX_LINK_RATE and DP_LINK_BW_SET otherwise.
3585          */
3586         if (intel_dp->num_sink_rates)
3587                 intel_dp->use_rate_select = true;
3588         else
3589                 intel_dp_set_sink_rates(intel_dp);
3590
3591         intel_dp_set_common_rates(intel_dp);
3592
3593         /* Read the eDP DSC DPCD registers */
3594         if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
3595                 intel_dp_get_dsc_sink_cap(intel_dp);
3596
3597         /*
3598          * If needed, program our source OUI so we can make various Intel-specific AUX services
3599          * available (such as HDR backlight controls)
3600          */
3601         intel_edp_init_source_oui(intel_dp, true);
3602
3603         return true;
3604 }
3605
3606 static bool
3607 intel_dp_has_sink_count(struct intel_dp *intel_dp)
3608 {
3609         if (!intel_dp->attached_connector)
3610                 return false;
3611
3612         return drm_dp_read_sink_count_cap(&intel_dp->attached_connector->base,
3613                                           intel_dp->dpcd,
3614                                           &intel_dp->desc);
3615 }
3616
3617 static bool
3618 intel_dp_get_dpcd(struct intel_dp *intel_dp)
3619 {
3620         int ret;
3621
3622         intel_dp_lttpr_init(intel_dp);
3623
3624         if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd))
3625                 return false;
3626
3627         /*
3628          * Don't clobber cached eDP rates. Also skip re-reading
3629          * the OUI/ID since we know it won't change.
3630          */
3631         if (!intel_dp_is_edp(intel_dp)) {
3632                 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
3633                                  drm_dp_is_branch(intel_dp->dpcd));
3634
3635                 intel_dp_set_sink_rates(intel_dp);
3636                 intel_dp_set_common_rates(intel_dp);
3637         }
3638
3639         if (intel_dp_has_sink_count(intel_dp)) {
3640                 ret = drm_dp_read_sink_count(&intel_dp->aux);
3641                 if (ret < 0)
3642                         return false;
3643
3644                 /*
3645                  * Sink count can change between short pulse hpd hence
3646                  * a member variable in intel_dp will track any changes
3647                  * between short pulse interrupts.
3648                  */
3649                 intel_dp->sink_count = ret;
3650
3651                 /*
3652                  * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that
3653                  * a dongle is present but no display. Unless we require to know
3654                  * if a dongle is present or not, we don't need to update
3655                  * downstream port information. So, an early return here saves
3656                  * time from performing other operations which are not required.
3657                  */
3658                 if (!intel_dp->sink_count)
3659                         return false;
3660         }
3661
3662         return drm_dp_read_downstream_info(&intel_dp->aux, intel_dp->dpcd,
3663                                            intel_dp->downstream_ports) == 0;
3664 }
3665
3666 static bool
3667 intel_dp_can_mst(struct intel_dp *intel_dp)
3668 {
3669         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3670
3671         return i915->params.enable_dp_mst &&
3672                 intel_dp->can_mst &&
3673                 drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd);
3674 }
3675
3676 static void
3677 intel_dp_configure_mst(struct intel_dp *intel_dp)
3678 {
3679         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3680         struct intel_encoder *encoder =
3681                 &dp_to_dig_port(intel_dp)->base;
3682         bool sink_can_mst = drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd);
3683
3684         drm_dbg_kms(&i915->drm,
3685                     "[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s\n",
3686                     encoder->base.base.id, encoder->base.name,
3687                     yesno(intel_dp->can_mst), yesno(sink_can_mst),
3688                     yesno(i915->params.enable_dp_mst));
3689
3690         if (!intel_dp->can_mst)
3691                 return;
3692
3693         intel_dp->is_mst = sink_can_mst &&
3694                 i915->params.enable_dp_mst;
3695
3696         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
3697                                         intel_dp->is_mst);
3698 }
3699
3700 static bool
3701 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3702 {
3703         return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI,
3704                                 sink_irq_vector, DP_DPRX_ESI_LEN) ==
3705                 DP_DPRX_ESI_LEN;
3706 }
3707
3708 bool
3709 intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state,
3710                        const struct drm_connector_state *conn_state)
3711 {
3712         /*
3713          * As per DP 1.4a spec section 2.2.4.3 [MSA Field for Indication
3714          * of Color Encoding Format and Content Color Gamut], in order to
3715          * sending YCBCR 420 or HDR BT.2020 signals we should use DP VSC SDP.
3716          */
3717         if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
3718                 return true;
3719
3720         switch (conn_state->colorspace) {
3721         case DRM_MODE_COLORIMETRY_SYCC_601:
3722         case DRM_MODE_COLORIMETRY_OPYCC_601:
3723         case DRM_MODE_COLORIMETRY_BT2020_YCC:
3724         case DRM_MODE_COLORIMETRY_BT2020_RGB:
3725         case DRM_MODE_COLORIMETRY_BT2020_CYCC:
3726                 return true;
3727         default:
3728                 break;
3729         }
3730
3731         return false;
3732 }
3733
3734 static ssize_t intel_dp_vsc_sdp_pack(const struct drm_dp_vsc_sdp *vsc,
3735                                      struct dp_sdp *sdp, size_t size)
3736 {
3737         size_t length = sizeof(struct dp_sdp);
3738
3739         if (size < length)
3740                 return -ENOSPC;
3741
3742         memset(sdp, 0, size);
3743
3744         /*
3745          * Prepare VSC Header for SU as per DP 1.4a spec, Table 2-119
3746          * VSC SDP Header Bytes
3747          */
3748         sdp->sdp_header.HB0 = 0; /* Secondary-Data Packet ID = 0 */
3749         sdp->sdp_header.HB1 = vsc->sdp_type; /* Secondary-data Packet Type */
3750         sdp->sdp_header.HB2 = vsc->revision; /* Revision Number */
3751         sdp->sdp_header.HB3 = vsc->length; /* Number of Valid Data Bytes */
3752
3753         /*
3754          * Only revision 0x5 supports Pixel Encoding/Colorimetry Format as
3755          * per DP 1.4a spec.
3756          */
3757         if (vsc->revision != 0x5)
3758                 goto out;
3759
3760         /* VSC SDP Payload for DB16 through DB18 */
3761         /* Pixel Encoding and Colorimetry Formats  */
3762         sdp->db[16] = (vsc->pixelformat & 0xf) << 4; /* DB16[7:4] */
3763         sdp->db[16] |= vsc->colorimetry & 0xf; /* DB16[3:0] */
3764
3765         switch (vsc->bpc) {
3766         case 6:
3767                 /* 6bpc: 0x0 */
3768                 break;
3769         case 8:
3770                 sdp->db[17] = 0x1; /* DB17[3:0] */
3771                 break;
3772         case 10:
3773                 sdp->db[17] = 0x2;
3774                 break;
3775         case 12:
3776                 sdp->db[17] = 0x3;
3777                 break;
3778         case 16:
3779                 sdp->db[17] = 0x4;
3780                 break;
3781         default:
3782                 MISSING_CASE(vsc->bpc);
3783                 break;
3784         }
3785         /* Dynamic Range and Component Bit Depth */
3786         if (vsc->dynamic_range == DP_DYNAMIC_RANGE_CTA)
3787                 sdp->db[17] |= 0x80;  /* DB17[7] */
3788
3789         /* Content Type */
3790         sdp->db[18] = vsc->content_type & 0x7;
3791
3792 out:
3793         return length;
3794 }
3795
3796 static ssize_t
3797 intel_dp_hdr_metadata_infoframe_sdp_pack(const struct hdmi_drm_infoframe *drm_infoframe,
3798                                          struct dp_sdp *sdp,
3799                                          size_t size)
3800 {
3801         size_t length = sizeof(struct dp_sdp);
3802         const int infoframe_size = HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE;
3803         unsigned char buf[HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE];
3804         ssize_t len;
3805
3806         if (size < length)
3807                 return -ENOSPC;
3808
3809         memset(sdp, 0, size);
3810
3811         len = hdmi_drm_infoframe_pack_only(drm_infoframe, buf, sizeof(buf));
3812         if (len < 0) {
3813                 DRM_DEBUG_KMS("buffer size is smaller than hdr metadata infoframe\n");
3814                 return -ENOSPC;
3815         }
3816
3817         if (len != infoframe_size) {
3818                 DRM_DEBUG_KMS("wrong static hdr metadata size\n");
3819                 return -ENOSPC;
3820         }
3821
3822         /*
3823          * Set up the infoframe sdp packet for HDR static metadata.
3824          * Prepare VSC Header for SU as per DP 1.4a spec,
3825          * Table 2-100 and Table 2-101
3826          */
3827
3828         /* Secondary-Data Packet ID, 00h for non-Audio INFOFRAME */
3829         sdp->sdp_header.HB0 = 0;
3830         /*
3831          * Packet Type 80h + Non-audio INFOFRAME Type value
3832          * HDMI_INFOFRAME_TYPE_DRM: 0x87
3833          * - 80h + Non-audio INFOFRAME Type value
3834          * - InfoFrame Type: 0x07
3835          *    [CTA-861-G Table-42 Dynamic Range and Mastering InfoFrame]
3836          */
3837         sdp->sdp_header.HB1 = drm_infoframe->type;
3838         /*
3839          * Least Significant Eight Bits of (Data Byte Count – 1)
3840          * infoframe_size - 1
3841          */
3842         sdp->sdp_header.HB2 = 0x1D;
3843         /* INFOFRAME SDP Version Number */
3844         sdp->sdp_header.HB3 = (0x13 << 2);
3845         /* CTA Header Byte 2 (INFOFRAME Version Number) */
3846         sdp->db[0] = drm_infoframe->version;
3847         /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */
3848         sdp->db[1] = drm_infoframe->length;
3849         /*
3850          * Copy HDMI_DRM_INFOFRAME_SIZE size from a buffer after
3851          * HDMI_INFOFRAME_HEADER_SIZE
3852          */
3853         BUILD_BUG_ON(sizeof(sdp->db) < HDMI_DRM_INFOFRAME_SIZE + 2);
3854         memcpy(&sdp->db[2], &buf[HDMI_INFOFRAME_HEADER_SIZE],
3855                HDMI_DRM_INFOFRAME_SIZE);
3856
3857         /*
3858          * Size of DP infoframe sdp packet for HDR static metadata consists of
3859          * - DP SDP Header(struct dp_sdp_header): 4 bytes
3860          * - Two Data Blocks: 2 bytes
3861          *    CTA Header Byte2 (INFOFRAME Version Number)
3862          *    CTA Header Byte3 (Length of INFOFRAME)
3863          * - HDMI_DRM_INFOFRAME_SIZE: 26 bytes
3864          *
3865          * Prior to GEN11's GMP register size is identical to DP HDR static metadata
3866          * infoframe size. But GEN11+ has larger than that size, write_infoframe
3867          * will pad rest of the size.
3868          */
3869         return sizeof(struct dp_sdp_header) + 2 + HDMI_DRM_INFOFRAME_SIZE;
3870 }
3871
3872 static void intel_write_dp_sdp(struct intel_encoder *encoder,
3873                                const struct intel_crtc_state *crtc_state,
3874                                unsigned int type)
3875 {
3876         struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
3877         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3878         struct dp_sdp sdp = {};
3879         ssize_t len;
3880
3881         if ((crtc_state->infoframes.enable &
3882              intel_hdmi_infoframe_enable(type)) == 0)
3883                 return;
3884
3885         switch (type) {
3886         case DP_SDP_VSC:
3887                 len = intel_dp_vsc_sdp_pack(&crtc_state->infoframes.vsc, &sdp,
3888                                             sizeof(sdp));
3889                 break;
3890         case HDMI_PACKET_TYPE_GAMUT_METADATA:
3891                 len = intel_dp_hdr_metadata_infoframe_sdp_pack(&crtc_state->infoframes.drm.drm,
3892                                                                &sdp, sizeof(sdp));
3893                 break;
3894         default:
3895                 MISSING_CASE(type);
3896                 return;
3897         }
3898
3899         if (drm_WARN_ON(&dev_priv->drm, len < 0))
3900                 return;
3901
3902         dig_port->write_infoframe(encoder, crtc_state, type, &sdp, len);
3903 }
3904
3905 void intel_write_dp_vsc_sdp(struct intel_encoder *encoder,
3906                             const struct intel_crtc_state *crtc_state,
3907                             struct drm_dp_vsc_sdp *vsc)
3908 {
3909         struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
3910         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3911         struct dp_sdp sdp = {};
3912         ssize_t len;
3913
3914         len = intel_dp_vsc_sdp_pack(vsc, &sdp, sizeof(sdp));
3915
3916         if (drm_WARN_ON(&dev_priv->drm, len < 0))
3917                 return;
3918
3919         dig_port->write_infoframe(encoder, crtc_state, DP_SDP_VSC,
3920                                         &sdp, len);
3921 }
3922
3923 void intel_dp_set_infoframes(struct intel_encoder *encoder,
3924                              bool enable,
3925                              const struct intel_crtc_state *crtc_state,
3926                              const struct drm_connector_state *conn_state)
3927 {
3928         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3929         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3930         i915_reg_t reg = HSW_TVIDEO_DIP_CTL(crtc_state->cpu_transcoder);
3931         u32 dip_enable = VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_GCP_HSW |
3932                          VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW |
3933                          VIDEO_DIP_ENABLE_SPD_HSW | VIDEO_DIP_ENABLE_DRM_GLK;
3934         u32 val = intel_de_read(dev_priv, reg);
3935
3936         /* TODO: Add DSC case (DIP_ENABLE_PPS) */
3937         /* When PSR is enabled, this routine doesn't disable VSC DIP */
3938         if (intel_psr_enabled(intel_dp))
3939                 val &= ~dip_enable;
3940         else
3941                 val &= ~(dip_enable | VIDEO_DIP_ENABLE_VSC_HSW);
3942
3943         if (!enable) {
3944                 intel_de_write(dev_priv, reg, val);
3945                 intel_de_posting_read(dev_priv, reg);
3946                 return;
3947         }
3948
3949         intel_de_write(dev_priv, reg, val);
3950         intel_de_posting_read(dev_priv, reg);
3951
3952         /* When PSR is enabled, VSC SDP is handled by PSR routine */
3953         if (!intel_psr_enabled(intel_dp))
3954                 intel_write_dp_sdp(encoder, crtc_state, DP_SDP_VSC);
3955
3956         intel_write_dp_sdp(encoder, crtc_state, HDMI_PACKET_TYPE_GAMUT_METADATA);
3957 }
3958
3959 static int intel_dp_vsc_sdp_unpack(struct drm_dp_vsc_sdp *vsc,
3960                                    const void *buffer, size_t size)
3961 {
3962         const struct dp_sdp *sdp = buffer;
3963
3964         if (size < sizeof(struct dp_sdp))
3965                 return -EINVAL;
3966
3967         memset(vsc, 0, size);
3968
3969         if (sdp->sdp_header.HB0 != 0)
3970                 return -EINVAL;
3971
3972         if (sdp->sdp_header.HB1 != DP_SDP_VSC)
3973                 return -EINVAL;
3974
3975         vsc->sdp_type = sdp->sdp_header.HB1;
3976         vsc->revision = sdp->sdp_header.HB2;
3977         vsc->length = sdp->sdp_header.HB3;
3978
3979         if ((sdp->sdp_header.HB2 == 0x2 && sdp->sdp_header.HB3 == 0x8) ||
3980             (sdp->sdp_header.HB2 == 0x4 && sdp->sdp_header.HB3 == 0xe)) {
3981                 /*
3982                  * - HB2 = 0x2, HB3 = 0x8
3983                  *   VSC SDP supporting 3D stereo + PSR
3984                  * - HB2 = 0x4, HB3 = 0xe
3985                  *   VSC SDP supporting 3D stereo + PSR2 with Y-coordinate of
3986                  *   first scan line of the SU region (applies to eDP v1.4b
3987                  *   and higher).
3988                  */
3989                 return 0;
3990         } else if (sdp->sdp_header.HB2 == 0x5 && sdp->sdp_header.HB3 == 0x13) {
3991                 /*
3992                  * - HB2 = 0x5, HB3 = 0x13
3993                  *   VSC SDP supporting 3D stereo + PSR2 + Pixel Encoding/Colorimetry
3994                  *   Format.
3995                  */
3996                 vsc->pixelformat = (sdp->db[16] >> 4) & 0xf;
3997                 vsc->colorimetry = sdp->db[16] & 0xf;
3998                 vsc->dynamic_range = (sdp->db[17] >> 7) & 0x1;
3999
4000                 switch (sdp->db[17] & 0x7) {
4001                 case 0x0:
4002                         vsc->bpc = 6;
4003                         break;
4004                 case 0x1:
4005                         vsc->bpc = 8;
4006                         break;
4007                 case 0x2:
4008                         vsc->bpc = 10;
4009                         break;
4010                 case 0x3:
4011                         vsc->bpc = 12;
4012                         break;
4013                 case 0x4:
4014                         vsc->bpc = 16;
4015                         break;
4016                 default:
4017                         MISSING_CASE(sdp->db[17] & 0x7);
4018                         return -EINVAL;
4019                 }
4020
4021                 vsc->content_type = sdp->db[18] & 0x7;
4022         } else {
4023                 return -EINVAL;
4024         }
4025
4026         return 0;
4027 }
4028
4029 static int
4030 intel_dp_hdr_metadata_infoframe_sdp_unpack(struct hdmi_drm_infoframe *drm_infoframe,
4031                                            const void *buffer, size_t size)
4032 {
4033         int ret;
4034
4035         const struct dp_sdp *sdp = buffer;
4036
4037         if (size < sizeof(struct dp_sdp))
4038                 return -EINVAL;
4039
4040         if (sdp->sdp_header.HB0 != 0)
4041                 return -EINVAL;
4042
4043         if (sdp->sdp_header.HB1 != HDMI_INFOFRAME_TYPE_DRM)
4044                 return -EINVAL;
4045
4046         /*
4047          * Least Significant Eight Bits of (Data Byte Count – 1)
4048          * 1Dh (i.e., Data Byte Count = 30 bytes).
4049          */
4050         if (sdp->sdp_header.HB2 != 0x1D)
4051                 return -EINVAL;
4052
4053         /* Most Significant Two Bits of (Data Byte Count – 1), Clear to 00b. */
4054         if ((sdp->sdp_header.HB3 & 0x3) != 0)
4055                 return -EINVAL;
4056
4057         /* INFOFRAME SDP Version Number */
4058         if (((sdp->sdp_header.HB3 >> 2) & 0x3f) != 0x13)
4059                 return -EINVAL;
4060
4061         /* CTA Header Byte 2 (INFOFRAME Version Number) */
4062         if (sdp->db[0] != 1)
4063                 return -EINVAL;
4064
4065         /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */
4066         if (sdp->db[1] != HDMI_DRM_INFOFRAME_SIZE)
4067                 return -EINVAL;
4068
4069         ret = hdmi_drm_infoframe_unpack_only(drm_infoframe, &sdp->db[2],
4070                                              HDMI_DRM_INFOFRAME_SIZE);
4071
4072         return ret;
4073 }
4074
4075 static void intel_read_dp_vsc_sdp(struct intel_encoder *encoder,
4076                                   struct intel_crtc_state *crtc_state,
4077                                   struct drm_dp_vsc_sdp *vsc)
4078 {
4079         struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
4080         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
4081         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4082         unsigned int type = DP_SDP_VSC;
4083         struct dp_sdp sdp = {};
4084         int ret;
4085
4086         /* When PSR is enabled, VSC SDP is handled by PSR routine */
4087         if (intel_psr_enabled(intel_dp))
4088                 return;
4089
4090         if ((crtc_state->infoframes.enable &
4091              intel_hdmi_infoframe_enable(type)) == 0)
4092                 return;
4093
4094         dig_port->read_infoframe(encoder, crtc_state, type, &sdp, sizeof(sdp));
4095
4096         ret = intel_dp_vsc_sdp_unpack(vsc, &sdp, sizeof(sdp));
4097
4098         if (ret)
4099                 drm_dbg_kms(&dev_priv->drm, "Failed to unpack DP VSC SDP\n");
4100 }
4101
4102 static void intel_read_dp_hdr_metadata_infoframe_sdp(struct intel_encoder *encoder,
4103                                                      struct intel_crtc_state *crtc_state,
4104                                                      struct hdmi_drm_infoframe *drm_infoframe)
4105 {
4106         struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
4107         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4108         unsigned int type = HDMI_PACKET_TYPE_GAMUT_METADATA;
4109         struct dp_sdp sdp = {};
4110         int ret;
4111
4112         if ((crtc_state->infoframes.enable &
4113             intel_hdmi_infoframe_enable(type)) == 0)
4114                 return;
4115
4116         dig_port->read_infoframe(encoder, crtc_state, type, &sdp,
4117                                  sizeof(sdp));
4118
4119         ret = intel_dp_hdr_metadata_infoframe_sdp_unpack(drm_infoframe, &sdp,
4120                                                          sizeof(sdp));
4121
4122         if (ret)
4123                 drm_dbg_kms(&dev_priv->drm,
4124                             "Failed to unpack DP HDR Metadata Infoframe SDP\n");
4125 }
4126
4127 void intel_read_dp_sdp(struct intel_encoder *encoder,
4128                        struct intel_crtc_state *crtc_state,
4129                        unsigned int type)
4130 {
4131         if (encoder->type != INTEL_OUTPUT_DDI)
4132                 return;
4133
4134         switch (type) {
4135         case DP_SDP_VSC:
4136                 intel_read_dp_vsc_sdp(encoder, crtc_state,
4137                                       &crtc_state->infoframes.vsc);
4138                 break;
4139         case HDMI_PACKET_TYPE_GAMUT_METADATA:
4140                 intel_read_dp_hdr_metadata_infoframe_sdp(encoder, crtc_state,
4141                                                          &crtc_state->infoframes.drm.drm);
4142                 break;
4143         default:
4144                 MISSING_CASE(type);
4145                 break;
4146         }
4147 }
4148
4149 static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4150 {
4151         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
4152         int status = 0;
4153         int test_link_rate;
4154         u8 test_lane_count, test_link_bw;
4155         /* (DP CTS 1.2)
4156          * 4.3.1.11
4157          */
4158         /* Read the TEST_LANE_COUNT and TEST_LINK_RTAE fields (DP CTS 3.1.4) */
4159         status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LANE_COUNT,
4160                                    &test_lane_count);
4161
4162         if (status <= 0) {
4163                 drm_dbg_kms(&i915->drm, "Lane count read failed\n");
4164                 return DP_TEST_NAK;
4165         }
4166         test_lane_count &= DP_MAX_LANE_COUNT_MASK;
4167
4168         status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE,
4169                                    &test_link_bw);
4170         if (status <= 0) {
4171                 drm_dbg_kms(&i915->drm, "Link Rate read failed\n");
4172                 return DP_TEST_NAK;
4173         }
4174         test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw);
4175
4176         /* Validate the requested link rate and lane count */
4177         if (!intel_dp_link_params_valid(intel_dp, test_link_rate,
4178                                         test_lane_count))
4179                 return DP_TEST_NAK;
4180
4181         intel_dp->compliance.test_lane_count = test_lane_count;
4182         intel_dp->compliance.test_link_rate = test_link_rate;
4183
4184         return DP_TEST_ACK;
4185 }
4186
4187 static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4188 {
4189         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
4190         u8 test_pattern;
4191         u8 test_misc;
4192         __be16 h_width, v_height;
4193         int status = 0;
4194
4195         /* Read the TEST_PATTERN (DP CTS 3.1.5) */
4196         status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_PATTERN,
4197                                    &test_pattern);
4198         if (status <= 0) {
4199                 drm_dbg_kms(&i915->drm, "Test pattern read failed\n");
4200                 return DP_TEST_NAK;
4201         }
4202         if (test_pattern != DP_COLOR_RAMP)
4203                 return DP_TEST_NAK;
4204
4205         status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_H_WIDTH_HI,
4206                                   &h_width, 2);
4207         if (status <= 0) {
4208                 drm_dbg_kms(&i915->drm, "H Width read failed\n");
4209                 return DP_TEST_NAK;
4210         }
4211
4212         status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_V_HEIGHT_HI,
4213                                   &v_height, 2);
4214         if (status <= 0) {
4215                 drm_dbg_kms(&i915->drm, "V Height read failed\n");
4216                 return DP_TEST_NAK;
4217         }
4218
4219         status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_MISC0,
4220                                    &test_misc);
4221         if (status <= 0) {
4222                 drm_dbg_kms(&i915->drm, "TEST MISC read failed\n");
4223                 return DP_TEST_NAK;
4224         }
4225         if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB)
4226                 return DP_TEST_NAK;
4227         if (test_misc & DP_TEST_DYNAMIC_RANGE_CEA)
4228                 return DP_TEST_NAK;
4229         switch (test_misc & DP_TEST_BIT_DEPTH_MASK) {
4230         case DP_TEST_BIT_DEPTH_6:
4231                 intel_dp->compliance.test_data.bpc = 6;
4232                 break;
4233         case DP_TEST_BIT_DEPTH_8:
4234                 intel_dp->compliance.test_data.bpc = 8;
4235                 break;
4236         default:
4237                 return DP_TEST_NAK;
4238         }
4239
4240         intel_dp->compliance.test_data.video_pattern = test_pattern;
4241         intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width);
4242         intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height);
4243         /* Set test active flag here so userspace doesn't interrupt things */
4244         intel_dp->compliance.test_active = true;
4245
4246         return DP_TEST_ACK;
4247 }
4248
4249 static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp)
4250 {
4251         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
4252         u8 test_result = DP_TEST_ACK;
4253         struct intel_connector *intel_connector = intel_dp->attached_connector;
4254         struct drm_connector *connector = &intel_connector->base;
4255
4256         if (intel_connector->detect_edid == NULL ||
4257             connector->edid_corrupt ||
4258             intel_dp->aux.i2c_defer_count > 6) {
4259                 /* Check EDID read for NACKs, DEFERs and corruption
4260                  * (DP CTS 1.2 Core r1.1)
4261                  *    4.2.2.4 : Failed EDID read, I2C_NAK
4262                  *    4.2.2.5 : Failed EDID read, I2C_DEFER
4263                  *    4.2.2.6 : EDID corruption detected
4264                  * Use failsafe mode for all cases
4265                  */
4266                 if (intel_dp->aux.i2c_nack_count > 0 ||
4267                         intel_dp->aux.i2c_defer_count > 0)
4268                         drm_dbg_kms(&i915->drm,
4269                                     "EDID read had %d NACKs, %d DEFERs\n",
4270                                     intel_dp->aux.i2c_nack_count,
4271                                     intel_dp->aux.i2c_defer_count);
4272                 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE;
4273         } else {
4274                 struct edid *block = intel_connector->detect_edid;
4275
4276                 /* We have to write the checksum
4277                  * of the last block read
4278                  */
4279                 block += intel_connector->detect_edid->extensions;
4280
4281                 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_EDID_CHECKSUM,
4282                                        block->checksum) <= 0)
4283                         drm_dbg_kms(&i915->drm,
4284                                     "Failed to write EDID checksum\n");
4285
4286                 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4287                 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED;
4288         }
4289
4290         /* Set test active flag here so userspace doesn't interrupt things */
4291         intel_dp->compliance.test_active = true;
4292
4293         return test_result;
4294 }
4295
4296 static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp,
4297                                         const struct intel_crtc_state *crtc_state)
4298 {
4299         struct drm_i915_private *dev_priv =
4300                         to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
4301         struct drm_dp_phy_test_params *data =
4302                         &intel_dp->compliance.test_data.phytest;
4303         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4304         enum pipe pipe = crtc->pipe;
4305         u32 pattern_val;
4306
4307         switch (data->phy_pattern) {
4308         case DP_PHY_TEST_PATTERN_NONE:
4309                 DRM_DEBUG_KMS("Disable Phy Test Pattern\n");
4310                 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 0x0);
4311                 break;
4312         case DP_PHY_TEST_PATTERN_D10_2:
4313                 DRM_DEBUG_KMS("Set D10.2 Phy Test Pattern\n");
4314                 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
4315                                DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_D10_2);
4316                 break;
4317         case DP_PHY_TEST_PATTERN_ERROR_COUNT:
4318                 DRM_DEBUG_KMS("Set Error Count Phy Test Pattern\n");
4319                 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
4320                                DDI_DP_COMP_CTL_ENABLE |
4321                                DDI_DP_COMP_CTL_SCRAMBLED_0);
4322                 break;
4323         case DP_PHY_TEST_PATTERN_PRBS7:
4324                 DRM_DEBUG_KMS("Set PRBS7 Phy Test Pattern\n");
4325                 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
4326                                DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_PRBS7);
4327                 break;
4328         case DP_PHY_TEST_PATTERN_80BIT_CUSTOM:
4329                 /*
4330                  * FIXME: Ideally pattern should come from DPCD 0x250. As
4331                  * current firmware of DPR-100 could not set it, so hardcoding
4332                  * now for complaince test.
4333                  */
4334                 DRM_DEBUG_KMS("Set 80Bit Custom Phy Test Pattern 0x3e0f83e0 0x0f83e0f8 0x0000f83e\n");
4335                 pattern_val = 0x3e0f83e0;
4336                 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 0), pattern_val);
4337                 pattern_val = 0x0f83e0f8;
4338                 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 1), pattern_val);
4339                 pattern_val = 0x0000f83e;
4340                 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 2), pattern_val);
4341                 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
4342                                DDI_DP_COMP_CTL_ENABLE |
4343                                DDI_DP_COMP_CTL_CUSTOM80);
4344                 break;
4345         case DP_PHY_TEST_PATTERN_CP2520:
4346                 /*
4347                  * FIXME: Ideally pattern should come from DPCD 0x24A. As
4348                  * current firmware of DPR-100 could not set it, so hardcoding
4349                  * now for complaince test.
4350                  */
4351                 DRM_DEBUG_KMS("Set HBR2 compliance Phy Test Pattern\n");
4352                 pattern_val = 0xFB;
4353                 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
4354                                DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_HBR2 |
4355                                pattern_val);
4356                 break;
4357         default:
4358                 WARN(1, "Invalid Phy Test Pattern\n");
4359         }
4360 }
4361
4362 static void
4363 intel_dp_autotest_phy_ddi_disable(struct intel_dp *intel_dp,
4364                                   const struct intel_crtc_state *crtc_state)
4365 {
4366         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4367         struct drm_device *dev = dig_port->base.base.dev;
4368         struct drm_i915_private *dev_priv = to_i915(dev);
4369         struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
4370         enum pipe pipe = crtc->pipe;
4371         u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value;
4372
4373         trans_ddi_func_ctl_value = intel_de_read(dev_priv,
4374                                                  TRANS_DDI_FUNC_CTL(pipe));
4375         trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe));
4376         dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe));
4377
4378         trans_ddi_func_ctl_value &= ~(TRANS_DDI_FUNC_ENABLE |
4379                                       TGL_TRANS_DDI_PORT_MASK);
4380         trans_conf_value &= ~PIPECONF_ENABLE;
4381         dp_tp_ctl_value &= ~DP_TP_CTL_ENABLE;
4382
4383         intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value);
4384         intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe),
4385                        trans_ddi_func_ctl_value);
4386         intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value);
4387 }
4388
4389 static void
4390 intel_dp_autotest_phy_ddi_enable(struct intel_dp *intel_dp,
4391                                  const struct intel_crtc_state *crtc_state)
4392 {
4393         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4394         struct drm_device *dev = dig_port->base.base.dev;
4395         struct drm_i915_private *dev_priv = to_i915(dev);
4396         enum port port = dig_port->base.port;
4397         struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
4398         enum pipe pipe = crtc->pipe;
4399         u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value;
4400
4401         trans_ddi_func_ctl_value = intel_de_read(dev_priv,
4402                                                  TRANS_DDI_FUNC_CTL(pipe));
4403         trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe));
4404         dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe));
4405
4406         trans_ddi_func_ctl_value |= TRANS_DDI_FUNC_ENABLE |
4407                                     TGL_TRANS_DDI_SELECT_PORT(port);
4408         trans_conf_value |= PIPECONF_ENABLE;
4409         dp_tp_ctl_value |= DP_TP_CTL_ENABLE;
4410
4411         intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value);
4412         intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value);
4413         intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe),
4414                        trans_ddi_func_ctl_value);
4415 }
4416
4417 static void intel_dp_process_phy_request(struct intel_dp *intel_dp,
4418                                          const struct intel_crtc_state *crtc_state)
4419 {
4420         struct drm_dp_phy_test_params *data =
4421                 &intel_dp->compliance.test_data.phytest;
4422         u8 link_status[DP_LINK_STATUS_SIZE];
4423
4424         if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, DP_PHY_DPRX,
4425                                              link_status) < 0) {
4426                 DRM_DEBUG_KMS("failed to get link status\n");
4427                 return;
4428         }
4429
4430         /* retrieve vswing & pre-emphasis setting */
4431         intel_dp_get_adjust_train(intel_dp, crtc_state, DP_PHY_DPRX,
4432                                   link_status);
4433
4434         intel_dp_autotest_phy_ddi_disable(intel_dp, crtc_state);
4435
4436         intel_dp_set_signal_levels(intel_dp, crtc_state, DP_PHY_DPRX);
4437
4438         intel_dp_phy_pattern_update(intel_dp, crtc_state);
4439
4440         intel_dp_autotest_phy_ddi_enable(intel_dp, crtc_state);
4441
4442         drm_dp_set_phy_test_pattern(&intel_dp->aux, data,
4443                                     link_status[DP_DPCD_REV]);
4444 }
4445
4446 static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4447 {
4448         struct drm_dp_phy_test_params *data =
4449                 &intel_dp->compliance.test_data.phytest;
4450
4451         if (drm_dp_get_phy_test_pattern(&intel_dp->aux, data)) {
4452                 DRM_DEBUG_KMS("DP Phy Test pattern AUX read failure\n");
4453                 return DP_TEST_NAK;
4454         }
4455
4456         /* Set test active flag here so userspace doesn't interrupt things */
4457         intel_dp->compliance.test_active = true;
4458
4459         return DP_TEST_ACK;
4460 }
4461
4462 static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4463 {
4464         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
4465         u8 response = DP_TEST_NAK;
4466         u8 request = 0;
4467         int status;
4468
4469         status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request);
4470         if (status <= 0) {
4471                 drm_dbg_kms(&i915->drm,
4472                             "Could not read test request from sink\n");
4473                 goto update_status;
4474         }
4475
4476         switch (request) {
4477         case DP_TEST_LINK_TRAINING:
4478                 drm_dbg_kms(&i915->drm, "LINK_TRAINING test requested\n");
4479                 response = intel_dp_autotest_link_training(intel_dp);
4480                 break;
4481         case DP_TEST_LINK_VIDEO_PATTERN:
4482                 drm_dbg_kms(&i915->drm, "TEST_PATTERN test requested\n");
4483                 response = intel_dp_autotest_video_pattern(intel_dp);
4484                 break;
4485         case DP_TEST_LINK_EDID_READ:
4486                 drm_dbg_kms(&i915->drm, "EDID test requested\n");
4487                 response = intel_dp_autotest_edid(intel_dp);
4488                 break;
4489         case DP_TEST_LINK_PHY_TEST_PATTERN:
4490                 drm_dbg_kms(&i915->drm, "PHY_PATTERN test requested\n");
4491                 response = intel_dp_autotest_phy_pattern(intel_dp);
4492                 break;
4493         default:
4494                 drm_dbg_kms(&i915->drm, "Invalid test request '%02x'\n",
4495                             request);
4496                 break;
4497         }
4498
4499         if (response & DP_TEST_ACK)
4500                 intel_dp->compliance.test_type = request;
4501
4502 update_status:
4503         status = drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, response);
4504         if (status <= 0)
4505                 drm_dbg_kms(&i915->drm,
4506                             "Could not write test response to sink\n");
4507 }
4508
4509 static void
4510 intel_dp_mst_hpd_irq(struct intel_dp *intel_dp, u8 *esi, bool *handled)
4511 {
4512                 drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, handled);
4513
4514                 if (esi[1] & DP_CP_IRQ) {
4515                         intel_hdcp_handle_cp_irq(intel_dp->attached_connector);
4516                         *handled = true;
4517                 }
4518 }
4519
4520 /**
4521  * intel_dp_check_mst_status - service any pending MST interrupts, check link status
4522  * @intel_dp: Intel DP struct
4523  *
4524  * Read any pending MST interrupts, call MST core to handle these and ack the
4525  * interrupts. Check if the main and AUX link state is ok.
4526  *
4527  * Returns:
4528  * - %true if pending interrupts were serviced (or no interrupts were
4529  *   pending) w/o detecting an error condition.
4530  * - %false if an error condition - like AUX failure or a loss of link - is
4531  *   detected, which needs servicing from the hotplug work.
4532  */
4533 static bool
4534 intel_dp_check_mst_status(struct intel_dp *intel_dp)
4535 {
4536         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
4537         bool link_ok = true;
4538
4539         drm_WARN_ON_ONCE(&i915->drm, intel_dp->active_mst_links < 0);
4540
4541         for (;;) {
4542                 u8 esi[DP_DPRX_ESI_LEN] = {};
4543                 bool handled;
4544                 int retry;
4545
4546                 if (!intel_dp_get_sink_irq_esi(intel_dp, esi)) {
4547                         drm_dbg_kms(&i915->drm,
4548                                     "failed to get ESI - device may have failed\n");
4549                         link_ok = false;
4550
4551                         break;
4552                 }
4553
4554                 /* check link status - esi[10] = 0x200c */
4555                 if (intel_dp->active_mst_links > 0 && link_ok &&
4556                     !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
4557                         drm_dbg_kms(&i915->drm,
4558                                     "channel EQ not ok, retraining\n");
4559                         link_ok = false;
4560                 }
4561
4562                 drm_dbg_kms(&i915->drm, "got esi %3ph\n", esi);
4563
4564                 intel_dp_mst_hpd_irq(intel_dp, esi, &handled);
4565
4566                 if (!handled)
4567                         break;
4568
4569                 for (retry = 0; retry < 3; retry++) {
4570                         int wret;
4571
4572                         wret = drm_dp_dpcd_write(&intel_dp->aux,
4573                                                  DP_SINK_COUNT_ESI+1,
4574                                                  &esi[1], 3);
4575                         if (wret == 3)
4576                                 break;
4577                 }
4578         }
4579
4580         return link_ok;
4581 }
4582
4583 static void
4584 intel_dp_handle_hdmi_link_status_change(struct intel_dp *intel_dp)
4585 {
4586         bool is_active;
4587         u8 buf = 0;
4588
4589         is_active = drm_dp_pcon_hdmi_link_active(&intel_dp->aux);
4590         if (intel_dp->frl.is_trained && !is_active) {
4591                 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, &buf) < 0)
4592                         return;
4593
4594                 buf &=  ~DP_PCON_ENABLE_HDMI_LINK;
4595                 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, buf) < 0)
4596                         return;
4597
4598                 drm_dp_pcon_hdmi_frl_link_error_count(&intel_dp->aux, &intel_dp->attached_connector->base);
4599
4600                 /* Restart FRL training or fall back to TMDS mode */
4601                 intel_dp_check_frl_training(intel_dp);
4602         }
4603 }
4604
4605 static bool
4606 intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
4607 {
4608         u8 link_status[DP_LINK_STATUS_SIZE];
4609
4610         if (!intel_dp->link_trained)
4611                 return false;
4612
4613         /*
4614          * While PSR source HW is enabled, it will control main-link sending
4615          * frames, enabling and disabling it so trying to do a retrain will fail
4616          * as the link would or not be on or it could mix training patterns
4617          * and frame data at the same time causing retrain to fail.
4618          * Also when exiting PSR, HW will retrain the link anyways fixing
4619          * any link status error.
4620          */
4621         if (intel_psr_enabled(intel_dp))
4622                 return false;
4623
4624         if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, DP_PHY_DPRX,
4625                                              link_status) < 0)
4626                 return false;
4627
4628         /*
4629          * Validate the cached values of intel_dp->link_rate and
4630          * intel_dp->lane_count before attempting to retrain.
4631          *
4632          * FIXME would be nice to user the crtc state here, but since
4633          * we need to call this from the short HPD handler that seems
4634          * a bit hard.
4635          */
4636         if (!intel_dp_link_params_valid(intel_dp, intel_dp->link_rate,
4637                                         intel_dp->lane_count))
4638                 return false;
4639
4640         /* Retrain if Channel EQ or CR not ok */
4641         return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count);
4642 }
4643
4644 static bool intel_dp_has_connector(struct intel_dp *intel_dp,
4645                                    const struct drm_connector_state *conn_state)
4646 {
4647         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
4648         struct intel_encoder *encoder;
4649         enum pipe pipe;
4650
4651         if (!conn_state->best_encoder)
4652                 return false;
4653
4654         /* SST */
4655         encoder = &dp_to_dig_port(intel_dp)->base;
4656         if (conn_state->best_encoder == &encoder->base)
4657                 return true;
4658
4659         /* MST */
4660         for_each_pipe(i915, pipe) {
4661                 encoder = &intel_dp->mst_encoders[pipe]->base;
4662                 if (conn_state->best_encoder == &encoder->base)
4663                         return true;
4664         }
4665
4666         return false;
4667 }
4668
4669 static int intel_dp_prep_link_retrain(struct intel_dp *intel_dp,
4670                                       struct drm_modeset_acquire_ctx *ctx,
4671                                       u32 *crtc_mask)
4672 {
4673         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
4674         struct drm_connector_list_iter conn_iter;
4675         struct intel_connector *connector;
4676         int ret = 0;
4677
4678         *crtc_mask = 0;
4679
4680         if (!intel_dp_needs_link_retrain(intel_dp))
4681                 return 0;
4682
4683         drm_connector_list_iter_begin(&i915->drm, &conn_iter);
4684         for_each_intel_connector_iter(connector, &conn_iter) {
4685                 struct drm_connector_state *conn_state =
4686                         connector->base.state;
4687                 struct intel_crtc_state *crtc_state;
4688                 struct intel_crtc *crtc;
4689
4690                 if (!intel_dp_has_connector(intel_dp, conn_state))
4691                         continue;
4692
4693                 crtc = to_intel_crtc(conn_state->crtc);
4694                 if (!crtc)
4695                         continue;
4696
4697                 ret = drm_modeset_lock(&crtc->base.mutex, ctx);
4698                 if (ret)
4699                         break;
4700
4701                 crtc_state = to_intel_crtc_state(crtc->base.state);
4702
4703                 drm_WARN_ON(&i915->drm, !intel_crtc_has_dp_encoder(crtc_state));
4704
4705                 if (!crtc_state->hw.active)
4706                         continue;
4707
4708                 if (conn_state->commit &&
4709                     !try_wait_for_completion(&conn_state->commit->hw_done))
4710                         continue;
4711
4712                 *crtc_mask |= drm_crtc_mask(&crtc->base);
4713         }
4714         drm_connector_list_iter_end(&conn_iter);
4715
4716         if (!intel_dp_needs_link_retrain(intel_dp))
4717                 *crtc_mask = 0;
4718
4719         return ret;
4720 }
4721
4722 static bool intel_dp_is_connected(struct intel_dp *intel_dp)
4723 {
4724         struct intel_connector *connector = intel_dp->attached_connector;
4725
4726         return connector->base.status == connector_status_connected ||
4727                 intel_dp->is_mst;
4728 }
4729
4730 int intel_dp_retrain_link(struct intel_encoder *encoder,
4731                           struct drm_modeset_acquire_ctx *ctx)
4732 {
4733         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4734         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
4735         struct intel_crtc *crtc;
4736         u32 crtc_mask;
4737         int ret;
4738
4739         if (!intel_dp_is_connected(intel_dp))
4740                 return 0;
4741
4742         ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
4743                                ctx);
4744         if (ret)
4745                 return ret;
4746
4747         ret = intel_dp_prep_link_retrain(intel_dp, ctx, &crtc_mask);
4748         if (ret)
4749                 return ret;
4750
4751         if (crtc_mask == 0)
4752                 return 0;
4753
4754         drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] retraining link\n",
4755                     encoder->base.base.id, encoder->base.name);
4756
4757         for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) {
4758                 const struct intel_crtc_state *crtc_state =
4759                         to_intel_crtc_state(crtc->base.state);
4760
4761                 /* Suppress underruns caused by re-training */
4762                 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
4763                 if (crtc_state->has_pch_encoder)
4764                         intel_set_pch_fifo_underrun_reporting(dev_priv,
4765                                                               intel_crtc_pch_transcoder(crtc), false);
4766         }
4767
4768         for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) {
4769                 const struct intel_crtc_state *crtc_state =
4770                         to_intel_crtc_state(crtc->base.state);
4771
4772                 /* retrain on the MST master transcoder */
4773                 if (INTEL_GEN(dev_priv) >= 12 &&
4774                     intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) &&
4775                     !intel_dp_mst_is_master_trans(crtc_state))
4776                         continue;
4777
4778                 intel_dp_check_frl_training(intel_dp);
4779                 intel_dp_pcon_dsc_configure(intel_dp, crtc_state);
4780                 intel_dp_start_link_train(intel_dp, crtc_state);
4781                 intel_dp_stop_link_train(intel_dp, crtc_state);
4782                 break;
4783         }
4784
4785         for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) {
4786                 const struct intel_crtc_state *crtc_state =
4787                         to_intel_crtc_state(crtc->base.state);
4788
4789                 /* Keep underrun reporting disabled until things are stable */
4790                 intel_wait_for_vblank(dev_priv, crtc->pipe);
4791
4792                 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
4793                 if (crtc_state->has_pch_encoder)
4794                         intel_set_pch_fifo_underrun_reporting(dev_priv,
4795                                                               intel_crtc_pch_transcoder(crtc), true);
4796         }
4797
4798         return 0;
4799 }
4800
4801 static int intel_dp_prep_phy_test(struct intel_dp *intel_dp,
4802                                   struct drm_modeset_acquire_ctx *ctx,
4803                                   u32 *crtc_mask)
4804 {
4805         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
4806         struct drm_connector_list_iter conn_iter;
4807         struct intel_connector *connector;
4808         int ret = 0;
4809
4810         *crtc_mask = 0;
4811
4812         drm_connector_list_iter_begin(&i915->drm, &conn_iter);
4813         for_each_intel_connector_iter(connector, &conn_iter) {
4814                 struct drm_connector_state *conn_state =
4815                         connector->base.state;
4816                 struct intel_crtc_state *crtc_state;
4817                 struct intel_crtc *crtc;
4818
4819                 if (!intel_dp_has_connector(intel_dp, conn_state))
4820                         continue;
4821
4822                 crtc = to_intel_crtc(conn_state->crtc);
4823                 if (!crtc)
4824                         continue;
4825
4826                 ret = drm_modeset_lock(&crtc->base.mutex, ctx);
4827                 if (ret)
4828                         break;
4829
4830                 crtc_state = to_intel_crtc_state(crtc->base.state);
4831
4832                 drm_WARN_ON(&i915->drm, !intel_crtc_has_dp_encoder(crtc_state));
4833
4834                 if (!crtc_state->hw.active)
4835                         continue;
4836
4837                 if (conn_state->commit &&
4838                     !try_wait_for_completion(&conn_state->commit->hw_done))
4839                         continue;
4840
4841                 *crtc_mask |= drm_crtc_mask(&crtc->base);
4842         }
4843         drm_connector_list_iter_end(&conn_iter);
4844
4845         return ret;
4846 }
4847
4848 static int intel_dp_do_phy_test(struct intel_encoder *encoder,
4849                                 struct drm_modeset_acquire_ctx *ctx)
4850 {
4851         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4852         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
4853         struct intel_crtc *crtc;
4854         u32 crtc_mask;
4855         int ret;
4856
4857         ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
4858                                ctx);
4859         if (ret)
4860                 return ret;
4861
4862         ret = intel_dp_prep_phy_test(intel_dp, ctx, &crtc_mask);
4863         if (ret)
4864                 return ret;
4865
4866         if (crtc_mask == 0)
4867                 return 0;
4868
4869         drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] PHY test\n",
4870                     encoder->base.base.id, encoder->base.name);
4871
4872         for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) {
4873                 const struct intel_crtc_state *crtc_state =
4874                         to_intel_crtc_state(crtc->base.state);
4875
4876                 /* test on the MST master transcoder */
4877                 if (INTEL_GEN(dev_priv) >= 12 &&
4878                     intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) &&
4879                     !intel_dp_mst_is_master_trans(crtc_state))
4880                         continue;
4881
4882                 intel_dp_process_phy_request(intel_dp, crtc_state);
4883                 break;
4884         }
4885
4886         return 0;
4887 }
4888
4889 void intel_dp_phy_test(struct intel_encoder *encoder)
4890 {
4891         struct drm_modeset_acquire_ctx ctx;
4892         int ret;
4893
4894         drm_modeset_acquire_init(&ctx, 0);
4895
4896         for (;;) {
4897                 ret = intel_dp_do_phy_test(encoder, &ctx);
4898
4899                 if (ret == -EDEADLK) {
4900                         drm_modeset_backoff(&ctx);
4901                         continue;
4902                 }
4903
4904                 break;
4905         }
4906
4907         drm_modeset_drop_locks(&ctx);
4908         drm_modeset_acquire_fini(&ctx);
4909         drm_WARN(encoder->base.dev, ret,
4910                  "Acquiring modeset locks failed with %i\n", ret);
4911 }
4912
4913 /*
4914  * If display is now connected check links status,
4915  * there has been known issues of link loss triggering
4916  * long pulse.
4917  *
4918  * Some sinks (eg. ASUS PB287Q) seem to perform some
4919  * weird HPD ping pong during modesets. So we can apparently
4920  * end up with HPD going low during a modeset, and then
4921  * going back up soon after. And once that happens we must
4922  * retrain the link to get a picture. That's in case no
4923  * userspace component reacted to intermittent HPD dip.
4924  */
4925 static enum intel_hotplug_state
4926 intel_dp_hotplug(struct intel_encoder *encoder,
4927                  struct intel_connector *connector)
4928 {
4929         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
4930         struct drm_modeset_acquire_ctx ctx;
4931         enum intel_hotplug_state state;
4932         int ret;
4933
4934         if (intel_dp->compliance.test_active &&
4935             intel_dp->compliance.test_type == DP_TEST_LINK_PHY_TEST_PATTERN) {
4936                 intel_dp_phy_test(encoder);
4937                 /* just do the PHY test and nothing else */
4938                 return INTEL_HOTPLUG_UNCHANGED;
4939         }
4940
4941         state = intel_encoder_hotplug(encoder, connector);
4942
4943         drm_modeset_acquire_init(&ctx, 0);
4944
4945         for (;;) {
4946                 ret = intel_dp_retrain_link(encoder, &ctx);
4947
4948                 if (ret == -EDEADLK) {
4949                         drm_modeset_backoff(&ctx);
4950                         continue;
4951                 }
4952
4953                 break;
4954         }
4955
4956         drm_modeset_drop_locks(&ctx);
4957         drm_modeset_acquire_fini(&ctx);
4958         drm_WARN(encoder->base.dev, ret,
4959                  "Acquiring modeset locks failed with %i\n", ret);
4960
4961         /*
4962          * Keeping it consistent with intel_ddi_hotplug() and
4963          * intel_hdmi_hotplug().
4964          */
4965         if (state == INTEL_HOTPLUG_UNCHANGED && !connector->hotplug_retries)
4966                 state = INTEL_HOTPLUG_RETRY;
4967
4968         return state;
4969 }
4970
4971 static void intel_dp_check_device_service_irq(struct intel_dp *intel_dp)
4972 {
4973         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
4974         u8 val;
4975
4976         if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
4977                 return;
4978
4979         if (drm_dp_dpcd_readb(&intel_dp->aux,
4980                               DP_DEVICE_SERVICE_IRQ_VECTOR, &val) != 1 || !val)
4981                 return;
4982
4983         drm_dp_dpcd_writeb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, val);
4984
4985         if (val & DP_AUTOMATED_TEST_REQUEST)
4986                 intel_dp_handle_test_request(intel_dp);
4987
4988         if (val & DP_CP_IRQ)
4989                 intel_hdcp_handle_cp_irq(intel_dp->attached_connector);
4990
4991         if (val & DP_SINK_SPECIFIC_IRQ)
4992                 drm_dbg_kms(&i915->drm, "Sink specific irq unhandled\n");
4993 }
4994
4995 static void intel_dp_check_link_service_irq(struct intel_dp *intel_dp)
4996 {
4997         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
4998         u8 val;
4999
5000         if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
5001                 return;
5002
5003         if (drm_dp_dpcd_readb(&intel_dp->aux,
5004                               DP_LINK_SERVICE_IRQ_VECTOR_ESI0, &val) != 1 || !val) {
5005                 drm_dbg_kms(&i915->drm, "Error in reading link service irq vector\n");
5006                 return;
5007         }
5008
5009         if (drm_dp_dpcd_writeb(&intel_dp->aux,
5010                                DP_LINK_SERVICE_IRQ_VECTOR_ESI0, val) != 1) {
5011                 drm_dbg_kms(&i915->drm, "Error in writing link service irq vector\n");
5012                 return;
5013         }
5014
5015         if (val & HDMI_LINK_STATUS_CHANGED)
5016                 intel_dp_handle_hdmi_link_status_change(intel_dp);
5017 }
5018
5019 /*
5020  * According to DP spec
5021  * 5.1.2:
5022  *  1. Read DPCD
5023  *  2. Configure link according to Receiver Capabilities
5024  *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
5025  *  4. Check link status on receipt of hot-plug interrupt
5026  *
5027  * intel_dp_short_pulse -  handles short pulse interrupts
5028  * when full detection is not required.
5029  * Returns %true if short pulse is handled and full detection
5030  * is NOT required and %false otherwise.
5031  */
5032 static bool
5033 intel_dp_short_pulse(struct intel_dp *intel_dp)
5034 {
5035         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
5036         u8 old_sink_count = intel_dp->sink_count;
5037         bool ret;
5038
5039         /*
5040          * Clearing compliance test variables to allow capturing
5041          * of values for next automated test request.
5042          */
5043         memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
5044
5045         /*
5046          * Now read the DPCD to see if it's actually running
5047          * If the current value of sink count doesn't match with
5048          * the value that was stored earlier or dpcd read failed
5049          * we need to do full detection
5050          */
5051         ret = intel_dp_get_dpcd(intel_dp);
5052
5053         if ((old_sink_count != intel_dp->sink_count) || !ret) {
5054                 /* No need to proceed if we are going to do full detect */
5055                 return false;
5056         }
5057
5058         intel_dp_check_device_service_irq(intel_dp);
5059         intel_dp_check_link_service_irq(intel_dp);
5060
5061         /* Handle CEC interrupts, if any */
5062         drm_dp_cec_irq(&intel_dp->aux);
5063
5064         /* defer to the hotplug work for link retraining if needed */
5065         if (intel_dp_needs_link_retrain(intel_dp))
5066                 return false;
5067
5068         intel_psr_short_pulse(intel_dp);
5069
5070         switch (intel_dp->compliance.test_type) {
5071         case DP_TEST_LINK_TRAINING:
5072                 drm_dbg_kms(&dev_priv->drm,
5073                             "Link Training Compliance Test requested\n");
5074                 /* Send a Hotplug Uevent to userspace to start modeset */
5075                 drm_kms_helper_hotplug_event(&dev_priv->drm);
5076                 break;
5077         case DP_TEST_LINK_PHY_TEST_PATTERN:
5078                 drm_dbg_kms(&dev_priv->drm,
5079                             "PHY test pattern Compliance Test requested\n");
5080                 /*
5081                  * Schedule long hpd to do the test
5082                  *
5083                  * FIXME get rid of the ad-hoc phy test modeset code
5084                  * and properly incorporate it into the normal modeset.
5085                  */
5086                 return false;
5087         }
5088
5089         return true;
5090 }
5091
5092 /* XXX this is probably wrong for multiple downstream ports */
5093 static enum drm_connector_status
5094 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
5095 {
5096         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5097         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5098         u8 *dpcd = intel_dp->dpcd;
5099         u8 type;
5100
5101         if (drm_WARN_ON(&i915->drm, intel_dp_is_edp(intel_dp)))
5102                 return connector_status_connected;
5103
5104         lspcon_resume(dig_port);
5105
5106         if (!intel_dp_get_dpcd(intel_dp))
5107                 return connector_status_disconnected;
5108
5109         /* if there's no downstream port, we're done */
5110         if (!drm_dp_is_branch(dpcd))
5111                 return connector_status_connected;
5112
5113         /* If we're HPD-aware, SINK_COUNT changes dynamically */
5114         if (intel_dp_has_sink_count(intel_dp) &&
5115             intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
5116                 return intel_dp->sink_count ?
5117                 connector_status_connected : connector_status_disconnected;
5118         }
5119
5120         if (intel_dp_can_mst(intel_dp))
5121                 return connector_status_connected;
5122
5123         /* If no HPD, poke DDC gently */
5124         if (drm_probe_ddc(&intel_dp->aux.ddc))
5125                 return connector_status_connected;
5126
5127         /* Well we tried, say unknown for unreliable port types */
5128         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
5129                 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
5130                 if (type == DP_DS_PORT_TYPE_VGA ||
5131                     type == DP_DS_PORT_TYPE_NON_EDID)
5132                         return connector_status_unknown;
5133         } else {
5134                 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
5135                         DP_DWN_STRM_PORT_TYPE_MASK;
5136                 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
5137                     type == DP_DWN_STRM_PORT_TYPE_OTHER)
5138                         return connector_status_unknown;
5139         }
5140
5141         /* Anything else is out of spec, warn and ignore */
5142         drm_dbg_kms(&i915->drm, "Broken DP branch device, ignoring\n");
5143         return connector_status_disconnected;
5144 }
5145
5146 static enum drm_connector_status
5147 edp_detect(struct intel_dp *intel_dp)
5148 {
5149         return connector_status_connected;
5150 }
5151
5152 static bool ibx_digital_port_connected(struct intel_encoder *encoder)
5153 {
5154         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5155         u32 bit = dev_priv->hotplug.pch_hpd[encoder->hpd_pin];
5156
5157         return intel_de_read(dev_priv, SDEISR) & bit;
5158 }
5159
5160 static bool g4x_digital_port_connected(struct intel_encoder *encoder)
5161 {
5162         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5163         u32 bit;
5164
5165         switch (encoder->hpd_pin) {
5166         case HPD_PORT_B:
5167                 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
5168                 break;
5169         case HPD_PORT_C:
5170                 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
5171                 break;
5172         case HPD_PORT_D:
5173                 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
5174                 break;
5175         default:
5176                 MISSING_CASE(encoder->hpd_pin);
5177                 return false;
5178         }
5179
5180         return intel_de_read(dev_priv, PORT_HOTPLUG_STAT) & bit;
5181 }
5182
5183 static bool gm45_digital_port_connected(struct intel_encoder *encoder)
5184 {
5185         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5186         u32 bit;
5187
5188         switch (encoder->hpd_pin) {
5189         case HPD_PORT_B:
5190                 bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
5191                 break;
5192         case HPD_PORT_C:
5193                 bit = PORTC_HOTPLUG_LIVE_STATUS_GM45;
5194                 break;
5195         case HPD_PORT_D:
5196                 bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
5197                 break;
5198         default:
5199                 MISSING_CASE(encoder->hpd_pin);
5200                 return false;
5201         }
5202
5203         return intel_de_read(dev_priv, PORT_HOTPLUG_STAT) & bit;
5204 }
5205
5206 static bool ilk_digital_port_connected(struct intel_encoder *encoder)
5207 {
5208         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5209         u32 bit = dev_priv->hotplug.hpd[encoder->hpd_pin];
5210
5211         return intel_de_read(dev_priv, DEISR) & bit;
5212 }
5213
5214 /*
5215  * intel_digital_port_connected - is the specified port connected?
5216  * @encoder: intel_encoder
5217  *
5218  * In cases where there's a connector physically connected but it can't be used
5219  * by our hardware we also return false, since the rest of the driver should
5220  * pretty much treat the port as disconnected. This is relevant for type-C
5221  * (starting on ICL) where there's ownership involved.
5222  *
5223  * Return %true if port is connected, %false otherwise.
5224  */
5225 bool intel_digital_port_connected(struct intel_encoder *encoder)
5226 {
5227         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5228         struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
5229         bool is_connected = false;
5230         intel_wakeref_t wakeref;
5231
5232         with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref)
5233                 is_connected = dig_port->connected(encoder);
5234
5235         return is_connected;
5236 }
5237
5238 static struct edid *
5239 intel_dp_get_edid(struct intel_dp *intel_dp)
5240 {
5241         struct intel_connector *intel_connector = intel_dp->attached_connector;
5242
5243         /* use cached edid if we have one */
5244         if (intel_connector->edid) {
5245                 /* invalid edid */
5246                 if (IS_ERR(intel_connector->edid))
5247                         return NULL;
5248
5249                 return drm_edid_duplicate(intel_connector->edid);
5250         } else
5251                 return drm_get_edid(&intel_connector->base,
5252                                     &intel_dp->aux.ddc);
5253 }
5254
5255 static void
5256 intel_dp_update_dfp(struct intel_dp *intel_dp,
5257                     const struct edid *edid)
5258 {
5259         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5260         struct intel_connector *connector = intel_dp->attached_connector;
5261
5262         intel_dp->dfp.max_bpc =
5263                 drm_dp_downstream_max_bpc(intel_dp->dpcd,
5264                                           intel_dp->downstream_ports, edid);
5265
5266         intel_dp->dfp.max_dotclock =
5267                 drm_dp_downstream_max_dotclock(intel_dp->dpcd,
5268                                                intel_dp->downstream_ports);
5269
5270         intel_dp->dfp.min_tmds_clock =
5271                 drm_dp_downstream_min_tmds_clock(intel_dp->dpcd,
5272                                                  intel_dp->downstream_ports,
5273                                                  edid);
5274         intel_dp->dfp.max_tmds_clock =
5275                 drm_dp_downstream_max_tmds_clock(intel_dp->dpcd,
5276                                                  intel_dp->downstream_ports,
5277                                                  edid);
5278
5279         intel_dp->dfp.pcon_max_frl_bw =
5280                 drm_dp_get_pcon_max_frl_bw(intel_dp->dpcd,
5281                                            intel_dp->downstream_ports);
5282
5283         drm_dbg_kms(&i915->drm,
5284                     "[CONNECTOR:%d:%s] DFP max bpc %d, max dotclock %d, TMDS clock %d-%d, PCON Max FRL BW %dGbps\n",
5285                     connector->base.base.id, connector->base.name,
5286                     intel_dp->dfp.max_bpc,
5287                     intel_dp->dfp.max_dotclock,
5288                     intel_dp->dfp.min_tmds_clock,
5289                     intel_dp->dfp.max_tmds_clock,
5290                     intel_dp->dfp.pcon_max_frl_bw);
5291
5292         intel_dp_get_pcon_dsc_cap(intel_dp);
5293 }
5294
5295 static void
5296 intel_dp_update_420(struct intel_dp *intel_dp)
5297 {
5298         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5299         struct intel_connector *connector = intel_dp->attached_connector;
5300         bool is_branch, ycbcr_420_passthrough, ycbcr_444_to_420, rgb_to_ycbcr;
5301
5302         /* No YCbCr output support on gmch platforms */
5303         if (HAS_GMCH(i915))
5304                 return;
5305
5306         /*
5307          * ILK doesn't seem capable of DP YCbCr output. The
5308          * displayed image is severly corrupted. SNB+ is fine.
5309          */
5310         if (IS_GEN(i915, 5))
5311                 return;
5312
5313         is_branch = drm_dp_is_branch(intel_dp->dpcd);
5314         ycbcr_420_passthrough =
5315                 drm_dp_downstream_420_passthrough(intel_dp->dpcd,
5316                                                   intel_dp->downstream_ports);
5317         /* on-board LSPCON always assumed to support 4:4:4->4:2:0 conversion */
5318         ycbcr_444_to_420 =
5319                 dp_to_dig_port(intel_dp)->lspcon.active ||
5320                 drm_dp_downstream_444_to_420_conversion(intel_dp->dpcd,
5321                                                         intel_dp->downstream_ports);
5322         rgb_to_ycbcr = drm_dp_downstream_rgb_to_ycbcr_conversion(intel_dp->dpcd,
5323                                                                  intel_dp->downstream_ports,
5324                                                                  DP_DS_HDMI_BT601_RGB_YCBCR_CONV |
5325                                                                  DP_DS_HDMI_BT709_RGB_YCBCR_CONV |
5326                                                                  DP_DS_HDMI_BT2020_RGB_YCBCR_CONV);
5327
5328         if (INTEL_GEN(i915) >= 11) {
5329                 /* Let PCON convert from RGB->YCbCr if possible */
5330                 if (is_branch && rgb_to_ycbcr && ycbcr_444_to_420) {
5331                         intel_dp->dfp.rgb_to_ycbcr = true;
5332                         intel_dp->dfp.ycbcr_444_to_420 = true;
5333                         connector->base.ycbcr_420_allowed = true;
5334                 } else {
5335                 /* Prefer 4:2:0 passthrough over 4:4:4->4:2:0 conversion */
5336                         intel_dp->dfp.ycbcr_444_to_420 =
5337                                 ycbcr_444_to_420 && !ycbcr_420_passthrough;
5338
5339                         connector->base.ycbcr_420_allowed =
5340                                 !is_branch || ycbcr_444_to_420 || ycbcr_420_passthrough;
5341                 }
5342         } else {
5343                 /* 4:4:4->4:2:0 conversion is the only way */
5344                 intel_dp->dfp.ycbcr_444_to_420 = ycbcr_444_to_420;
5345
5346                 connector->base.ycbcr_420_allowed = ycbcr_444_to_420;
5347         }
5348
5349         drm_dbg_kms(&i915->drm,
5350                     "[CONNECTOR:%d:%s] RGB->YcbCr conversion? %s, YCbCr 4:2:0 allowed? %s, YCbCr 4:4:4->4:2:0 conversion? %s\n",
5351                     connector->base.base.id, connector->base.name,
5352                     yesno(intel_dp->dfp.rgb_to_ycbcr),
5353                     yesno(connector->base.ycbcr_420_allowed),
5354                     yesno(intel_dp->dfp.ycbcr_444_to_420));
5355 }
5356
5357 static void
5358 intel_dp_set_edid(struct intel_dp *intel_dp)
5359 {
5360         struct intel_connector *connector = intel_dp->attached_connector;
5361         struct edid *edid;
5362
5363         intel_dp_unset_edid(intel_dp);
5364         edid = intel_dp_get_edid(intel_dp);
5365         connector->detect_edid = edid;
5366
5367         intel_dp_update_dfp(intel_dp, edid);
5368         intel_dp_update_420(intel_dp);
5369
5370         if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) {
5371                 intel_dp->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
5372                 intel_dp->has_audio = drm_detect_monitor_audio(edid);
5373         }
5374
5375         drm_dp_cec_set_edid(&intel_dp->aux, edid);
5376 }
5377
5378 static void
5379 intel_dp_unset_edid(struct intel_dp *intel_dp)
5380 {
5381         struct intel_connector *connector = intel_dp->attached_connector;
5382
5383         drm_dp_cec_unset_edid(&intel_dp->aux);
5384         kfree(connector->detect_edid);
5385         connector->detect_edid = NULL;
5386
5387         intel_dp->has_hdmi_sink = false;
5388         intel_dp->has_audio = false;
5389
5390         intel_dp->dfp.max_bpc = 0;
5391         intel_dp->dfp.max_dotclock = 0;
5392         intel_dp->dfp.min_tmds_clock = 0;
5393         intel_dp->dfp.max_tmds_clock = 0;
5394
5395         intel_dp->dfp.pcon_max_frl_bw = 0;
5396
5397         intel_dp->dfp.ycbcr_444_to_420 = false;
5398         connector->base.ycbcr_420_allowed = false;
5399 }
5400
5401 static int
5402 intel_dp_detect(struct drm_connector *connector,
5403                 struct drm_modeset_acquire_ctx *ctx,
5404                 bool force)
5405 {
5406         struct drm_i915_private *dev_priv = to_i915(connector->dev);
5407         struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
5408         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5409         struct intel_encoder *encoder = &dig_port->base;
5410         enum drm_connector_status status;
5411
5412         drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n",
5413                     connector->base.id, connector->name);
5414         drm_WARN_ON(&dev_priv->drm,
5415                     !drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
5416
5417         if (!INTEL_DISPLAY_ENABLED(dev_priv))
5418                 return connector_status_disconnected;
5419
5420         /* Can't disconnect eDP */
5421         if (intel_dp_is_edp(intel_dp))
5422                 status = edp_detect(intel_dp);
5423         else if (intel_digital_port_connected(encoder))
5424                 status = intel_dp_detect_dpcd(intel_dp);
5425         else
5426                 status = connector_status_disconnected;
5427
5428         if (status == connector_status_disconnected) {
5429                 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
5430                 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd));
5431
5432                 if (intel_dp->is_mst) {
5433                         drm_dbg_kms(&dev_priv->drm,
5434                                     "MST device may have disappeared %d vs %d\n",
5435                                     intel_dp->is_mst,
5436                                     intel_dp->mst_mgr.mst_state);
5437                         intel_dp->is_mst = false;
5438                         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
5439                                                         intel_dp->is_mst);
5440                 }
5441
5442                 goto out;
5443         }
5444
5445         /* Read DP Sink DSC Cap DPCD regs for DP v1.4 */
5446         if (INTEL_GEN(dev_priv) >= 11)
5447                 intel_dp_get_dsc_sink_cap(intel_dp);
5448
5449         intel_dp_configure_mst(intel_dp);
5450
5451         /*
5452          * TODO: Reset link params when switching to MST mode, until MST
5453          * supports link training fallback params.
5454          */
5455         if (intel_dp->reset_link_params || intel_dp->is_mst) {
5456                 /* Initial max link lane count */
5457                 intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp);
5458
5459                 /* Initial max link rate */
5460                 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
5461
5462                 intel_dp->reset_link_params = false;
5463         }
5464
5465         intel_dp_print_rates(intel_dp);
5466
5467         if (intel_dp->is_mst) {
5468                 /*
5469                  * If we are in MST mode then this connector
5470                  * won't appear connected or have anything
5471                  * with EDID on it
5472                  */
5473                 status = connector_status_disconnected;
5474                 goto out;
5475         }
5476
5477         /*
5478          * Some external monitors do not signal loss of link synchronization
5479          * with an IRQ_HPD, so force a link status check.
5480          */
5481         if (!intel_dp_is_edp(intel_dp)) {
5482                 int ret;
5483
5484                 ret = intel_dp_retrain_link(encoder, ctx);
5485                 if (ret)
5486                         return ret;
5487         }
5488
5489         /*
5490          * Clearing NACK and defer counts to get their exact values
5491          * while reading EDID which are required by Compliance tests
5492          * 4.2.2.4 and 4.2.2.5
5493          */
5494         intel_dp->aux.i2c_nack_count = 0;
5495         intel_dp->aux.i2c_defer_count = 0;
5496
5497         intel_dp_set_edid(intel_dp);
5498         if (intel_dp_is_edp(intel_dp) ||
5499             to_intel_connector(connector)->detect_edid)
5500                 status = connector_status_connected;
5501
5502         intel_dp_check_device_service_irq(intel_dp);
5503
5504 out:
5505         if (status != connector_status_connected && !intel_dp->is_mst)
5506                 intel_dp_unset_edid(intel_dp);
5507
5508         /*
5509          * Make sure the refs for power wells enabled during detect are
5510          * dropped to avoid a new detect cycle triggered by HPD polling.
5511          */
5512         intel_display_power_flush_work(dev_priv);
5513
5514         if (!intel_dp_is_edp(intel_dp))
5515                 drm_dp_set_subconnector_property(connector,
5516                                                  status,
5517                                                  intel_dp->dpcd,
5518                                                  intel_dp->downstream_ports);
5519         return status;
5520 }
5521
5522 static void
5523 intel_dp_force(struct drm_connector *connector)
5524 {
5525         struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
5526         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5527         struct intel_encoder *intel_encoder = &dig_port->base;
5528         struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
5529         enum intel_display_power_domain aux_domain =
5530                 intel_aux_power_domain(dig_port);
5531         intel_wakeref_t wakeref;
5532
5533         drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n",
5534                     connector->base.id, connector->name);
5535         intel_dp_unset_edid(intel_dp);
5536
5537         if (connector->status != connector_status_connected)
5538                 return;
5539
5540         wakeref = intel_display_power_get(dev_priv, aux_domain);
5541
5542         intel_dp_set_edid(intel_dp);
5543
5544         intel_display_power_put(dev_priv, aux_domain, wakeref);
5545 }
5546
5547 static int intel_dp_get_modes(struct drm_connector *connector)
5548 {
5549         struct intel_connector *intel_connector = to_intel_connector(connector);
5550         struct edid *edid;
5551
5552         edid = intel_connector->detect_edid;
5553         if (edid) {
5554                 int ret = intel_connector_update_modes(connector, edid);
5555
5556                 if (intel_vrr_is_capable(connector))
5557                         drm_connector_set_vrr_capable_property(connector,
5558                                                                true);
5559                 if (ret)
5560                         return ret;
5561         }
5562
5563         /* if eDP has no EDID, fall back to fixed mode */
5564         if (intel_dp_is_edp(intel_attached_dp(intel_connector)) &&
5565             intel_connector->panel.fixed_mode) {
5566                 struct drm_display_mode *mode;
5567
5568                 mode = drm_mode_duplicate(connector->dev,
5569                                           intel_connector->panel.fixed_mode);
5570                 if (mode) {
5571                         drm_mode_probed_add(connector, mode);
5572                         return 1;
5573                 }
5574         }
5575
5576         if (!edid) {
5577                 struct intel_dp *intel_dp = intel_attached_dp(intel_connector);
5578                 struct drm_display_mode *mode;
5579
5580                 mode = drm_dp_downstream_mode(connector->dev,
5581                                               intel_dp->dpcd,
5582                                               intel_dp->downstream_ports);
5583                 if (mode) {
5584                         drm_mode_probed_add(connector, mode);
5585                         return 1;
5586                 }
5587         }
5588
5589         return 0;
5590 }
5591
5592 static int
5593 intel_dp_connector_register(struct drm_connector *connector)
5594 {
5595         struct drm_i915_private *i915 = to_i915(connector->dev);
5596         struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
5597         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5598         struct intel_lspcon *lspcon = &dig_port->lspcon;
5599         int ret;
5600
5601         ret = intel_connector_register(connector);
5602         if (ret)
5603                 return ret;
5604
5605         drm_dbg_kms(&i915->drm, "registering %s bus for %s\n",
5606                     intel_dp->aux.name, connector->kdev->kobj.name);
5607
5608         intel_dp->aux.dev = connector->kdev;
5609         ret = drm_dp_aux_register(&intel_dp->aux);
5610         if (!ret)
5611                 drm_dp_cec_register_connector(&intel_dp->aux, connector);
5612
5613         if (!intel_bios_is_lspcon_present(i915, dig_port->base.port))
5614                 return ret;
5615
5616         /*
5617          * ToDo: Clean this up to handle lspcon init and resume more
5618          * efficiently and streamlined.
5619          */
5620         if (lspcon_init(dig_port)) {
5621                 lspcon_detect_hdr_capability(lspcon);
5622                 if (lspcon->hdr_supported)
5623                         drm_object_attach_property(&connector->base,
5624                                                    connector->dev->mode_config.hdr_output_metadata_property,
5625                                                    0);
5626         }
5627
5628         return ret;
5629 }
5630
5631 static void
5632 intel_dp_connector_unregister(struct drm_connector *connector)
5633 {
5634         struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
5635
5636         drm_dp_cec_unregister_connector(&intel_dp->aux);
5637         drm_dp_aux_unregister(&intel_dp->aux);
5638         intel_connector_unregister(connector);
5639 }
5640
5641 void intel_dp_encoder_flush_work(struct drm_encoder *encoder)
5642 {
5643         struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder));
5644         struct intel_dp *intel_dp = &dig_port->dp;
5645
5646         intel_dp_mst_encoder_cleanup(dig_port);
5647
5648         intel_pps_vdd_off_sync(intel_dp);
5649
5650         intel_dp_aux_fini(intel_dp);
5651 }
5652
5653 static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
5654 {
5655         intel_dp_encoder_flush_work(encoder);
5656
5657         drm_encoder_cleanup(encoder);
5658         kfree(enc_to_dig_port(to_intel_encoder(encoder)));
5659 }
5660
5661 void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
5662 {
5663         struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
5664
5665         intel_pps_vdd_off_sync(intel_dp);
5666 }
5667
5668 void intel_dp_encoder_shutdown(struct intel_encoder *intel_encoder)
5669 {
5670         struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
5671
5672         intel_pps_wait_power_cycle(intel_dp);
5673 }
5674
5675 static enum pipe vlv_active_pipe(struct intel_dp *intel_dp)
5676 {
5677         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
5678         struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
5679         enum pipe pipe;
5680
5681         if (intel_dp_port_enabled(dev_priv, intel_dp->output_reg,
5682                                   encoder->port, &pipe))
5683                 return pipe;
5684
5685         return INVALID_PIPE;
5686 }
5687
5688 void intel_dp_encoder_reset(struct drm_encoder *encoder)
5689 {
5690         struct drm_i915_private *dev_priv = to_i915(encoder->dev);
5691         struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(encoder));
5692
5693         if (!HAS_DDI(dev_priv))
5694                 intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg);
5695
5696         intel_dp->reset_link_params = true;
5697
5698         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
5699                 intel_wakeref_t wakeref;
5700
5701                 with_intel_pps_lock(intel_dp, wakeref)
5702                         intel_dp->pps.active_pipe = vlv_active_pipe(intel_dp);
5703         }
5704
5705         intel_pps_encoder_reset(intel_dp);
5706 }
5707
5708 static int intel_modeset_tile_group(struct intel_atomic_state *state,
5709                                     int tile_group_id)
5710 {
5711         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
5712         struct drm_connector_list_iter conn_iter;
5713         struct drm_connector *connector;
5714         int ret = 0;
5715
5716         drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
5717         drm_for_each_connector_iter(connector, &conn_iter) {
5718                 struct drm_connector_state *conn_state;
5719                 struct intel_crtc_state *crtc_state;
5720                 struct intel_crtc *crtc;
5721
5722                 if (!connector->has_tile ||
5723                     connector->tile_group->id != tile_group_id)
5724                         continue;
5725
5726                 conn_state = drm_atomic_get_connector_state(&state->base,
5727                                                             connector);
5728                 if (IS_ERR(conn_state)) {
5729                         ret = PTR_ERR(conn_state);
5730                         break;
5731                 }
5732
5733                 crtc = to_intel_crtc(conn_state->crtc);
5734
5735                 if (!crtc)
5736                         continue;
5737
5738                 crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
5739                 crtc_state->uapi.mode_changed = true;
5740
5741                 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
5742                 if (ret)
5743                         break;
5744         }
5745         drm_connector_list_iter_end(&conn_iter);
5746
5747         return ret;
5748 }
5749
5750 static int intel_modeset_affected_transcoders(struct intel_atomic_state *state, u8 transcoders)
5751 {
5752         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
5753         struct intel_crtc *crtc;
5754
5755         if (transcoders == 0)
5756                 return 0;
5757
5758         for_each_intel_crtc(&dev_priv->drm, crtc) {
5759                 struct intel_crtc_state *crtc_state;
5760                 int ret;
5761
5762                 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
5763                 if (IS_ERR(crtc_state))
5764                         return PTR_ERR(crtc_state);
5765
5766                 if (!crtc_state->hw.enable)
5767                         continue;
5768
5769                 if (!(transcoders & BIT(crtc_state->cpu_transcoder)))
5770                         continue;
5771
5772                 crtc_state->uapi.mode_changed = true;
5773
5774                 ret = drm_atomic_add_affected_connectors(&state->base, &crtc->base);
5775                 if (ret)
5776                         return ret;
5777
5778                 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
5779                 if (ret)
5780                         return ret;
5781
5782                 transcoders &= ~BIT(crtc_state->cpu_transcoder);
5783         }
5784
5785         drm_WARN_ON(&dev_priv->drm, transcoders != 0);
5786
5787         return 0;
5788 }
5789
5790 static int intel_modeset_synced_crtcs(struct intel_atomic_state *state,
5791                                       struct drm_connector *connector)
5792 {
5793         const struct drm_connector_state *old_conn_state =
5794                 drm_atomic_get_old_connector_state(&state->base, connector);
5795         const struct intel_crtc_state *old_crtc_state;
5796         struct intel_crtc *crtc;
5797         u8 transcoders;
5798
5799         crtc = to_intel_crtc(old_conn_state->crtc);
5800         if (!crtc)
5801                 return 0;
5802
5803         old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
5804
5805         if (!old_crtc_state->hw.active)
5806                 return 0;
5807
5808         transcoders = old_crtc_state->sync_mode_slaves_mask;
5809         if (old_crtc_state->master_transcoder != INVALID_TRANSCODER)
5810                 transcoders |= BIT(old_crtc_state->master_transcoder);
5811
5812         return intel_modeset_affected_transcoders(state,
5813                                                   transcoders);
5814 }
5815
5816 static int intel_dp_connector_atomic_check(struct drm_connector *conn,
5817                                            struct drm_atomic_state *_state)
5818 {
5819         struct drm_i915_private *dev_priv = to_i915(conn->dev);
5820         struct intel_atomic_state *state = to_intel_atomic_state(_state);
5821         int ret;
5822
5823         ret = intel_digital_connector_atomic_check(conn, &state->base);
5824         if (ret)
5825                 return ret;
5826
5827         /*
5828          * We don't enable port sync on BDW due to missing w/as and
5829          * due to not having adjusted the modeset sequence appropriately.
5830          */
5831         if (INTEL_GEN(dev_priv) < 9)
5832                 return 0;
5833
5834         if (!intel_connector_needs_modeset(state, conn))
5835                 return 0;
5836
5837         if (conn->has_tile) {
5838                 ret = intel_modeset_tile_group(state, conn->tile_group->id);
5839                 if (ret)
5840                         return ret;
5841         }
5842
5843         return intel_modeset_synced_crtcs(state, conn);
5844 }
5845
5846 static const struct drm_connector_funcs intel_dp_connector_funcs = {
5847         .force = intel_dp_force,
5848         .fill_modes = drm_helper_probe_single_connector_modes,
5849         .atomic_get_property = intel_digital_connector_atomic_get_property,
5850         .atomic_set_property = intel_digital_connector_atomic_set_property,
5851         .late_register = intel_dp_connector_register,
5852         .early_unregister = intel_dp_connector_unregister,
5853         .destroy = intel_connector_destroy,
5854         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5855         .atomic_duplicate_state = intel_digital_connector_duplicate_state,
5856 };
5857
5858 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
5859         .detect_ctx = intel_dp_detect,
5860         .get_modes = intel_dp_get_modes,
5861         .mode_valid = intel_dp_mode_valid,
5862         .atomic_check = intel_dp_connector_atomic_check,
5863 };
5864
5865 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
5866         .reset = intel_dp_encoder_reset,
5867         .destroy = intel_dp_encoder_destroy,
5868 };
5869
5870 enum irqreturn
5871 intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd)
5872 {
5873         struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
5874         struct intel_dp *intel_dp = &dig_port->dp;
5875
5876         if (dig_port->base.type == INTEL_OUTPUT_EDP &&
5877             (long_hpd || !intel_pps_have_power(intel_dp))) {
5878                 /*
5879                  * vdd off can generate a long/short pulse on eDP which
5880                  * would require vdd on to handle it, and thus we
5881                  * would end up in an endless cycle of
5882                  * "vdd off -> long/short hpd -> vdd on -> detect -> vdd off -> ..."
5883                  */
5884                 drm_dbg_kms(&i915->drm,
5885                             "ignoring %s hpd on eDP [ENCODER:%d:%s]\n",
5886                             long_hpd ? "long" : "short",
5887                             dig_port->base.base.base.id,
5888                             dig_port->base.base.name);
5889                 return IRQ_HANDLED;
5890         }
5891
5892         drm_dbg_kms(&i915->drm, "got hpd irq on [ENCODER:%d:%s] - %s\n",
5893                     dig_port->base.base.base.id,
5894                     dig_port->base.base.name,
5895                     long_hpd ? "long" : "short");
5896
5897         if (long_hpd) {
5898                 intel_dp->reset_link_params = true;
5899                 return IRQ_NONE;
5900         }
5901
5902         if (intel_dp->is_mst) {
5903                 if (!intel_dp_check_mst_status(intel_dp))
5904                         return IRQ_NONE;
5905         } else if (!intel_dp_short_pulse(intel_dp)) {
5906                 return IRQ_NONE;
5907         }
5908
5909         return IRQ_HANDLED;
5910 }
5911
5912 /* check the VBT to see whether the eDP is on another port */
5913 bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
5914 {
5915         /*
5916          * eDP not supported on g4x. so bail out early just
5917          * for a bit extra safety in case the VBT is bonkers.
5918          */
5919         if (INTEL_GEN(dev_priv) < 5)
5920                 return false;
5921
5922         if (INTEL_GEN(dev_priv) < 9 && port == PORT_A)
5923                 return true;
5924
5925         return intel_bios_is_port_edp(dev_priv, port);
5926 }
5927
5928 static void
5929 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5930 {
5931         struct drm_i915_private *dev_priv = to_i915(connector->dev);
5932         enum port port = dp_to_dig_port(intel_dp)->base.port;
5933
5934         if (!intel_dp_is_edp(intel_dp))
5935                 drm_connector_attach_dp_subconnector_property(connector);
5936
5937         if (!IS_G4X(dev_priv) && port != PORT_A)
5938                 intel_attach_force_audio_property(connector);
5939
5940         intel_attach_broadcast_rgb_property(connector);
5941         if (HAS_GMCH(dev_priv))
5942                 drm_connector_attach_max_bpc_property(connector, 6, 10);
5943         else if (INTEL_GEN(dev_priv) >= 5)
5944                 drm_connector_attach_max_bpc_property(connector, 6, 12);
5945
5946         /* Register HDMI colorspace for case of lspcon */
5947         if (intel_bios_is_lspcon_present(dev_priv, port)) {
5948                 drm_connector_attach_content_type_property(connector);
5949                 intel_attach_hdmi_colorspace_property(connector);
5950         } else {
5951                 intel_attach_dp_colorspace_property(connector);
5952         }
5953
5954         if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 11)
5955                 drm_object_attach_property(&connector->base,
5956                                            connector->dev->mode_config.hdr_output_metadata_property,
5957                                            0);
5958
5959         if (intel_dp_is_edp(intel_dp)) {
5960                 u32 allowed_scalers;
5961
5962                 allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN);
5963                 if (!HAS_GMCH(dev_priv))
5964                         allowed_scalers |= BIT(DRM_MODE_SCALE_CENTER);
5965
5966                 drm_connector_attach_scaling_mode_property(connector, allowed_scalers);
5967
5968                 connector->state->scaling_mode = DRM_MODE_SCALE_ASPECT;
5969
5970         }
5971
5972         if (HAS_VRR(dev_priv))
5973                 drm_connector_attach_vrr_capable_property(connector);
5974 }
5975
5976 /**
5977  * intel_dp_set_drrs_state - program registers for RR switch to take effect
5978  * @dev_priv: i915 device
5979  * @crtc_state: a pointer to the active intel_crtc_state
5980  * @refresh_rate: RR to be programmed
5981  *
5982  * This function gets called when refresh rate (RR) has to be changed from
5983  * one frequency to another. Switches can be between high and low RR
5984  * supported by the panel or to any other RR based on media playback (in
5985  * this case, RR value needs to be passed from user space).
5986  *
5987  * The caller of this function needs to take a lock on dev_priv->drrs.
5988  */
5989 static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
5990                                     const struct intel_crtc_state *crtc_state,
5991                                     int refresh_rate)
5992 {
5993         struct intel_dp *intel_dp = dev_priv->drrs.dp;
5994         struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
5995         enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
5996
5997         if (refresh_rate <= 0) {
5998                 drm_dbg_kms(&dev_priv->drm,
5999                             "Refresh rate should be positive non-zero.\n");
6000                 return;
6001         }
6002
6003         if (intel_dp == NULL) {
6004                 drm_dbg_kms(&dev_priv->drm, "DRRS not supported.\n");
6005                 return;
6006         }
6007
6008         if (!intel_crtc) {
6009                 drm_dbg_kms(&dev_priv->drm,
6010                             "DRRS: intel_crtc not initialized\n");
6011                 return;
6012         }
6013
6014         if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
6015                 drm_dbg_kms(&dev_priv->drm, "Only Seamless DRRS supported.\n");
6016                 return;
6017         }
6018
6019         if (drm_mode_vrefresh(intel_dp->attached_connector->panel.downclock_mode) ==
6020                         refresh_rate)
6021                 index = DRRS_LOW_RR;
6022
6023         if (index == dev_priv->drrs.refresh_rate_type) {
6024                 drm_dbg_kms(&dev_priv->drm,
6025                             "DRRS requested for previously set RR...ignoring\n");
6026                 return;
6027         }
6028
6029         if (!crtc_state->hw.active) {
6030                 drm_dbg_kms(&dev_priv->drm,
6031                             "eDP encoder disabled. CRTC not Active\n");
6032                 return;
6033         }
6034
6035         if (INTEL_GEN(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) {
6036                 switch (index) {
6037                 case DRRS_HIGH_RR:
6038                         intel_dp_set_m_n(crtc_state, M1_N1);
6039                         break;
6040                 case DRRS_LOW_RR:
6041                         intel_dp_set_m_n(crtc_state, M2_N2);
6042                         break;
6043                 case DRRS_MAX_RR:
6044                 default:
6045                         drm_err(&dev_priv->drm,
6046                                 "Unsupported refreshrate type\n");
6047                 }
6048         } else if (INTEL_GEN(dev_priv) > 6) {
6049                 i915_reg_t reg = PIPECONF(crtc_state->cpu_transcoder);
6050                 u32 val;
6051
6052                 val = intel_de_read(dev_priv, reg);
6053                 if (index > DRRS_HIGH_RR) {
6054                         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6055                                 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
6056                         else
6057                                 val |= PIPECONF_EDP_RR_MODE_SWITCH;
6058                 } else {
6059                         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6060                                 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
6061                         else
6062                                 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
6063                 }
6064                 intel_de_write(dev_priv, reg, val);
6065         }
6066
6067         dev_priv->drrs.refresh_rate_type = index;
6068
6069         drm_dbg_kms(&dev_priv->drm, "eDP Refresh Rate set to : %dHz\n",
6070                     refresh_rate);
6071 }
6072
6073 static void
6074 intel_edp_drrs_enable_locked(struct intel_dp *intel_dp)
6075 {
6076         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6077
6078         dev_priv->drrs.busy_frontbuffer_bits = 0;
6079         dev_priv->drrs.dp = intel_dp;
6080 }
6081
6082 /**
6083  * intel_edp_drrs_enable - init drrs struct if supported
6084  * @intel_dp: DP struct
6085  * @crtc_state: A pointer to the active crtc state.
6086  *
6087  * Initializes frontbuffer_bits and drrs.dp
6088  */
6089 void intel_edp_drrs_enable(struct intel_dp *intel_dp,
6090                            const struct intel_crtc_state *crtc_state)
6091 {
6092         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6093
6094         if (!crtc_state->has_drrs)
6095                 return;
6096
6097         drm_dbg_kms(&dev_priv->drm, "Enabling DRRS\n");
6098
6099         mutex_lock(&dev_priv->drrs.mutex);
6100
6101         if (dev_priv->drrs.dp) {
6102                 drm_warn(&dev_priv->drm, "DRRS already enabled\n");
6103                 goto unlock;
6104         }
6105
6106         intel_edp_drrs_enable_locked(intel_dp);
6107
6108 unlock:
6109         mutex_unlock(&dev_priv->drrs.mutex);
6110 }
6111
6112 static void
6113 intel_edp_drrs_disable_locked(struct intel_dp *intel_dp,
6114                               const struct intel_crtc_state *crtc_state)
6115 {
6116         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6117
6118         if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) {
6119                 int refresh;
6120
6121                 refresh = drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode);
6122                 intel_dp_set_drrs_state(dev_priv, crtc_state, refresh);
6123         }
6124
6125         dev_priv->drrs.dp = NULL;
6126 }
6127
6128 /**
6129  * intel_edp_drrs_disable - Disable DRRS
6130  * @intel_dp: DP struct
6131  * @old_crtc_state: Pointer to old crtc_state.
6132  *
6133  */
6134 void intel_edp_drrs_disable(struct intel_dp *intel_dp,
6135                             const struct intel_crtc_state *old_crtc_state)
6136 {
6137         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6138
6139         if (!old_crtc_state->has_drrs)
6140                 return;
6141
6142         mutex_lock(&dev_priv->drrs.mutex);
6143         if (!dev_priv->drrs.dp) {
6144                 mutex_unlock(&dev_priv->drrs.mutex);
6145                 return;
6146         }
6147
6148         intel_edp_drrs_disable_locked(intel_dp, old_crtc_state);
6149         mutex_unlock(&dev_priv->drrs.mutex);
6150
6151         cancel_delayed_work_sync(&dev_priv->drrs.work);
6152 }
6153
6154 /**
6155  * intel_edp_drrs_update - Update DRRS state
6156  * @intel_dp: Intel DP
6157  * @crtc_state: new CRTC state
6158  *
6159  * This function will update DRRS states, disabling or enabling DRRS when
6160  * executing fastsets. For full modeset, intel_edp_drrs_disable() and
6161  * intel_edp_drrs_enable() should be called instead.
6162  */
6163 void
6164 intel_edp_drrs_update(struct intel_dp *intel_dp,
6165                       const struct intel_crtc_state *crtc_state)
6166 {
6167         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6168
6169         if (dev_priv->drrs.type != SEAMLESS_DRRS_SUPPORT)
6170                 return;
6171
6172         mutex_lock(&dev_priv->drrs.mutex);
6173
6174         /* New state matches current one? */
6175         if (crtc_state->has_drrs == !!dev_priv->drrs.dp)
6176                 goto unlock;
6177
6178         if (crtc_state->has_drrs)
6179                 intel_edp_drrs_enable_locked(intel_dp);
6180         else
6181                 intel_edp_drrs_disable_locked(intel_dp, crtc_state);
6182
6183 unlock:
6184         mutex_unlock(&dev_priv->drrs.mutex);
6185 }
6186
6187 static void intel_edp_drrs_downclock_work(struct work_struct *work)
6188 {
6189         struct drm_i915_private *dev_priv =
6190                 container_of(work, typeof(*dev_priv), drrs.work.work);
6191         struct intel_dp *intel_dp;
6192
6193         mutex_lock(&dev_priv->drrs.mutex);
6194
6195         intel_dp = dev_priv->drrs.dp;
6196
6197         if (!intel_dp)
6198                 goto unlock;
6199
6200         /*
6201          * The delayed work can race with an invalidate hence we need to
6202          * recheck.
6203          */
6204
6205         if (dev_priv->drrs.busy_frontbuffer_bits)
6206                 goto unlock;
6207
6208         if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR) {
6209                 struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
6210
6211                 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
6212                         drm_mode_vrefresh(intel_dp->attached_connector->panel.downclock_mode));
6213         }
6214
6215 unlock:
6216         mutex_unlock(&dev_priv->drrs.mutex);
6217 }
6218
6219 /**
6220  * intel_edp_drrs_invalidate - Disable Idleness DRRS
6221  * @dev_priv: i915 device
6222  * @frontbuffer_bits: frontbuffer plane tracking bits
6223  *
6224  * This function gets called everytime rendering on the given planes start.
6225  * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
6226  *
6227  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
6228  */
6229 void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
6230                                unsigned int frontbuffer_bits)
6231 {
6232         struct intel_dp *intel_dp;
6233         struct drm_crtc *crtc;
6234         enum pipe pipe;
6235
6236         if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
6237                 return;
6238
6239         cancel_delayed_work(&dev_priv->drrs.work);
6240
6241         mutex_lock(&dev_priv->drrs.mutex);
6242
6243         intel_dp = dev_priv->drrs.dp;
6244         if (!intel_dp) {
6245                 mutex_unlock(&dev_priv->drrs.mutex);
6246                 return;
6247         }
6248
6249         crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
6250         pipe = to_intel_crtc(crtc)->pipe;
6251
6252         frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
6253         dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
6254
6255         /* invalidate means busy screen hence upclock */
6256         if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
6257                 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
6258                                         drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode));
6259
6260         mutex_unlock(&dev_priv->drrs.mutex);
6261 }
6262
6263 /**
6264  * intel_edp_drrs_flush - Restart Idleness DRRS
6265  * @dev_priv: i915 device
6266  * @frontbuffer_bits: frontbuffer plane tracking bits
6267  *
6268  * This function gets called every time rendering on the given planes has
6269  * completed or flip on a crtc is completed. So DRRS should be upclocked
6270  * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
6271  * if no other planes are dirty.
6272  *
6273  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
6274  */
6275 void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
6276                           unsigned int frontbuffer_bits)
6277 {
6278         struct intel_dp *intel_dp;
6279         struct drm_crtc *crtc;
6280         enum pipe pipe;
6281
6282         if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
6283                 return;
6284
6285         cancel_delayed_work(&dev_priv->drrs.work);
6286
6287         mutex_lock(&dev_priv->drrs.mutex);
6288
6289         intel_dp = dev_priv->drrs.dp;
6290         if (!intel_dp) {
6291                 mutex_unlock(&dev_priv->drrs.mutex);
6292                 return;
6293         }
6294
6295         crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
6296         pipe = to_intel_crtc(crtc)->pipe;
6297
6298         frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
6299         dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
6300
6301         /* flush means busy screen hence upclock */
6302         if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
6303                 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
6304                                         drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode));
6305
6306         /*
6307          * flush also means no more activity hence schedule downclock, if all
6308          * other fbs are quiescent too
6309          */
6310         if (!dev_priv->drrs.busy_frontbuffer_bits)
6311                 schedule_delayed_work(&dev_priv->drrs.work,
6312                                 msecs_to_jiffies(1000));
6313         mutex_unlock(&dev_priv->drrs.mutex);
6314 }
6315
6316 /**
6317  * DOC: Display Refresh Rate Switching (DRRS)
6318  *
6319  * Display Refresh Rate Switching (DRRS) is a power conservation feature
6320  * which enables swtching between low and high refresh rates,
6321  * dynamically, based on the usage scenario. This feature is applicable
6322  * for internal panels.
6323  *
6324  * Indication that the panel supports DRRS is given by the panel EDID, which
6325  * would list multiple refresh rates for one resolution.
6326  *
6327  * DRRS is of 2 types - static and seamless.
6328  * Static DRRS involves changing refresh rate (RR) by doing a full modeset
6329  * (may appear as a blink on screen) and is used in dock-undock scenario.
6330  * Seamless DRRS involves changing RR without any visual effect to the user
6331  * and can be used during normal system usage. This is done by programming
6332  * certain registers.
6333  *
6334  * Support for static/seamless DRRS may be indicated in the VBT based on
6335  * inputs from the panel spec.
6336  *
6337  * DRRS saves power by switching to low RR based on usage scenarios.
6338  *
6339  * The implementation is based on frontbuffer tracking implementation.  When
6340  * there is a disturbance on the screen triggered by user activity or a periodic
6341  * system activity, DRRS is disabled (RR is changed to high RR).  When there is
6342  * no movement on screen, after a timeout of 1 second, a switch to low RR is
6343  * made.
6344  *
6345  * For integration with frontbuffer tracking code, intel_edp_drrs_invalidate()
6346  * and intel_edp_drrs_flush() are called.
6347  *
6348  * DRRS can be further extended to support other internal panels and also
6349  * the scenario of video playback wherein RR is set based on the rate
6350  * requested by userspace.
6351  */
6352
6353 /**
6354  * intel_dp_drrs_init - Init basic DRRS work and mutex.
6355  * @connector: eDP connector
6356  * @fixed_mode: preferred mode of panel
6357  *
6358  * This function is  called only once at driver load to initialize basic
6359  * DRRS stuff.
6360  *
6361  * Returns:
6362  * Downclock mode if panel supports it, else return NULL.
6363  * DRRS support is determined by the presence of downclock mode (apart
6364  * from VBT setting).
6365  */
6366 static struct drm_display_mode *
6367 intel_dp_drrs_init(struct intel_connector *connector,
6368                    struct drm_display_mode *fixed_mode)
6369 {
6370         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
6371         struct drm_display_mode *downclock_mode = NULL;
6372
6373         INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
6374         mutex_init(&dev_priv->drrs.mutex);
6375
6376         if (INTEL_GEN(dev_priv) <= 6) {
6377                 drm_dbg_kms(&dev_priv->drm,
6378                             "DRRS supported for Gen7 and above\n");
6379                 return NULL;
6380         }
6381
6382         if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
6383                 drm_dbg_kms(&dev_priv->drm, "VBT doesn't support DRRS\n");
6384                 return NULL;
6385         }
6386
6387         downclock_mode = intel_panel_edid_downclock_mode(connector, fixed_mode);
6388         if (!downclock_mode) {
6389                 drm_dbg_kms(&dev_priv->drm,
6390                             "Downclock mode is not found. DRRS not supported\n");
6391                 return NULL;
6392         }
6393
6394         dev_priv->drrs.type = dev_priv->vbt.drrs_type;
6395
6396         dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
6397         drm_dbg_kms(&dev_priv->drm,
6398                     "seamless DRRS supported for eDP panel.\n");
6399         return downclock_mode;
6400 }
6401
6402 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
6403                                      struct intel_connector *intel_connector)
6404 {
6405         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6406         struct drm_device *dev = &dev_priv->drm;
6407         struct drm_connector *connector = &intel_connector->base;
6408         struct drm_display_mode *fixed_mode = NULL;
6409         struct drm_display_mode *downclock_mode = NULL;
6410         bool has_dpcd;
6411         enum pipe pipe = INVALID_PIPE;
6412         struct edid *edid;
6413
6414         if (!intel_dp_is_edp(intel_dp))
6415                 return true;
6416
6417         /*
6418          * On IBX/CPT we may get here with LVDS already registered. Since the
6419          * driver uses the only internal power sequencer available for both
6420          * eDP and LVDS bail out early in this case to prevent interfering
6421          * with an already powered-on LVDS power sequencer.
6422          */
6423         if (intel_get_lvds_encoder(dev_priv)) {
6424                 drm_WARN_ON(dev,
6425                             !(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
6426                 drm_info(&dev_priv->drm,
6427                          "LVDS was detected, not registering eDP\n");
6428
6429                 return false;
6430         }
6431
6432         intel_pps_init(intel_dp);
6433
6434         /* Cache DPCD and EDID for edp. */
6435         has_dpcd = intel_edp_init_dpcd(intel_dp);
6436
6437         if (!has_dpcd) {
6438                 /* if this fails, presume the device is a ghost */
6439                 drm_info(&dev_priv->drm,
6440                          "failed to retrieve link info, disabling eDP\n");
6441                 goto out_vdd_off;
6442         }
6443
6444         mutex_lock(&dev->mode_config.mutex);
6445         edid = drm_get_edid(connector, &intel_dp->aux.ddc);
6446         if (edid) {
6447                 if (drm_add_edid_modes(connector, edid)) {
6448                         drm_connector_update_edid_property(connector, edid);
6449                 } else {
6450                         kfree(edid);
6451                         edid = ERR_PTR(-EINVAL);
6452                 }
6453         } else {
6454                 edid = ERR_PTR(-ENOENT);
6455         }
6456         intel_connector->edid = edid;
6457
6458         fixed_mode = intel_panel_edid_fixed_mode(intel_connector);
6459         if (fixed_mode)
6460                 downclock_mode = intel_dp_drrs_init(intel_connector, fixed_mode);
6461
6462         /* fallback to VBT if available for eDP */
6463         if (!fixed_mode)
6464                 fixed_mode = intel_panel_vbt_fixed_mode(intel_connector);
6465         mutex_unlock(&dev->mode_config.mutex);
6466
6467         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
6468                 /*
6469                  * Figure out the current pipe for the initial backlight setup.
6470                  * If the current pipe isn't valid, try the PPS pipe, and if that
6471                  * fails just assume pipe A.
6472                  */
6473                 pipe = vlv_active_pipe(intel_dp);
6474
6475                 if (pipe != PIPE_A && pipe != PIPE_B)
6476                         pipe = intel_dp->pps.pps_pipe;
6477
6478                 if (pipe != PIPE_A && pipe != PIPE_B)
6479                         pipe = PIPE_A;
6480
6481                 drm_dbg_kms(&dev_priv->drm,
6482                             "using pipe %c for initial backlight setup\n",
6483                             pipe_name(pipe));
6484         }
6485
6486         intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
6487         intel_connector->panel.backlight.power = intel_pps_backlight_power;
6488         intel_panel_setup_backlight(connector, pipe);
6489
6490         if (fixed_mode) {
6491                 drm_connector_set_panel_orientation_with_quirk(connector,
6492                                 dev_priv->vbt.orientation,
6493                                 fixed_mode->hdisplay, fixed_mode->vdisplay);
6494         }
6495
6496         return true;
6497
6498 out_vdd_off:
6499         intel_pps_vdd_off_sync(intel_dp);
6500
6501         return false;
6502 }
6503
6504 static void intel_dp_modeset_retry_work_fn(struct work_struct *work)
6505 {
6506         struct intel_connector *intel_connector;
6507         struct drm_connector *connector;
6508
6509         intel_connector = container_of(work, typeof(*intel_connector),
6510                                        modeset_retry_work);
6511         connector = &intel_connector->base;
6512         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
6513                       connector->name);
6514
6515         /* Grab the locks before changing connector property*/
6516         mutex_lock(&connector->dev->mode_config.mutex);
6517         /* Set connector link status to BAD and send a Uevent to notify
6518          * userspace to do a modeset.
6519          */
6520         drm_connector_set_link_status_property(connector,
6521                                                DRM_MODE_LINK_STATUS_BAD);
6522         mutex_unlock(&connector->dev->mode_config.mutex);
6523         /* Send Hotplug uevent so userspace can reprobe */
6524         drm_kms_helper_hotplug_event(connector->dev);
6525 }
6526
6527 bool
6528 intel_dp_init_connector(struct intel_digital_port *dig_port,
6529                         struct intel_connector *intel_connector)
6530 {
6531         struct drm_connector *connector = &intel_connector->base;
6532         struct intel_dp *intel_dp = &dig_port->dp;
6533         struct intel_encoder *intel_encoder = &dig_port->base;
6534         struct drm_device *dev = intel_encoder->base.dev;
6535         struct drm_i915_private *dev_priv = to_i915(dev);
6536         enum port port = intel_encoder->port;
6537         enum phy phy = intel_port_to_phy(dev_priv, port);
6538         int type;
6539
6540         /* Initialize the work for modeset in case of link train failure */
6541         INIT_WORK(&intel_connector->modeset_retry_work,
6542                   intel_dp_modeset_retry_work_fn);
6543
6544         if (drm_WARN(dev, dig_port->max_lanes < 1,
6545                      "Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n",
6546                      dig_port->max_lanes, intel_encoder->base.base.id,
6547                      intel_encoder->base.name))
6548                 return false;
6549
6550         intel_dp_set_source_rates(intel_dp);
6551
6552         intel_dp->reset_link_params = true;
6553         intel_dp->pps.pps_pipe = INVALID_PIPE;
6554         intel_dp->pps.active_pipe = INVALID_PIPE;
6555
6556         /* Preserve the current hw state. */
6557         intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg);
6558         intel_dp->attached_connector = intel_connector;
6559
6560         if (intel_dp_is_port_edp(dev_priv, port)) {
6561                 /*
6562                  * Currently we don't support eDP on TypeC ports, although in
6563                  * theory it could work on TypeC legacy ports.
6564                  */
6565                 drm_WARN_ON(dev, intel_phy_is_tc(dev_priv, phy));
6566                 type = DRM_MODE_CONNECTOR_eDP;
6567         } else {
6568                 type = DRM_MODE_CONNECTOR_DisplayPort;
6569         }
6570
6571         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6572                 intel_dp->pps.active_pipe = vlv_active_pipe(intel_dp);
6573
6574         /*
6575          * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
6576          * for DP the encoder type can be set by the caller to
6577          * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
6578          */
6579         if (type == DRM_MODE_CONNECTOR_eDP)
6580                 intel_encoder->type = INTEL_OUTPUT_EDP;
6581
6582         /* eDP only on port B and/or C on vlv/chv */
6583         if (drm_WARN_ON(dev, (IS_VALLEYVIEW(dev_priv) ||
6584                               IS_CHERRYVIEW(dev_priv)) &&
6585                         intel_dp_is_edp(intel_dp) &&
6586                         port != PORT_B && port != PORT_C))
6587                 return false;
6588
6589         drm_dbg_kms(&dev_priv->drm,
6590                     "Adding %s connector on [ENCODER:%d:%s]\n",
6591                     type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
6592                     intel_encoder->base.base.id, intel_encoder->base.name);
6593
6594         drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
6595         drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
6596
6597         if (!HAS_GMCH(dev_priv))
6598                 connector->interlace_allowed = true;
6599         connector->doublescan_allowed = 0;
6600
6601         intel_connector->polled = DRM_CONNECTOR_POLL_HPD;
6602
6603         intel_dp_aux_init(intel_dp);
6604
6605         intel_connector_attach_encoder(intel_connector, intel_encoder);
6606
6607         if (HAS_DDI(dev_priv))
6608                 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
6609         else
6610                 intel_connector->get_hw_state = intel_connector_get_hw_state;
6611
6612         /* init MST on ports that can support it */
6613         intel_dp_mst_encoder_init(dig_port,
6614                                   intel_connector->base.base.id);
6615
6616         if (!intel_edp_init_connector(intel_dp, intel_connector)) {
6617                 intel_dp_aux_fini(intel_dp);
6618                 intel_dp_mst_encoder_cleanup(dig_port);
6619                 goto fail;
6620         }
6621
6622         intel_dp_add_properties(intel_dp, connector);
6623
6624         if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_edp(intel_dp)) {
6625                 int ret = intel_dp_init_hdcp(dig_port, intel_connector);
6626                 if (ret)
6627                         drm_dbg_kms(&dev_priv->drm,
6628                                     "HDCP init failed, skipping.\n");
6629         }
6630
6631         /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
6632          * 0xd.  Failure to do so will result in spurious interrupts being
6633          * generated on the port when a cable is not attached.
6634          */
6635         if (IS_G45(dev_priv)) {
6636                 u32 temp = intel_de_read(dev_priv, PEG_BAND_GAP_DATA);
6637                 intel_de_write(dev_priv, PEG_BAND_GAP_DATA,
6638                                (temp & ~0xf) | 0xd);
6639         }
6640
6641         intel_dp->frl.is_trained = false;
6642         intel_dp->frl.trained_rate_gbps = 0;
6643
6644         return true;
6645
6646 fail:
6647         drm_connector_cleanup(connector);
6648
6649         return false;
6650 }
6651
6652 bool intel_dp_init(struct drm_i915_private *dev_priv,
6653                    i915_reg_t output_reg,
6654                    enum port port)
6655 {
6656         struct intel_digital_port *dig_port;
6657         struct intel_encoder *intel_encoder;
6658         struct drm_encoder *encoder;
6659         struct intel_connector *intel_connector;
6660
6661         dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL);
6662         if (!dig_port)
6663                 return false;
6664
6665         intel_connector = intel_connector_alloc();
6666         if (!intel_connector)
6667                 goto err_connector_alloc;
6668
6669         intel_encoder = &dig_port->base;
6670         encoder = &intel_encoder->base;
6671
6672         mutex_init(&dig_port->hdcp_mutex);
6673
6674         if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
6675                              &intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS,
6676                              "DP %c", port_name(port)))
6677                 goto err_encoder_init;
6678
6679         intel_encoder->hotplug = intel_dp_hotplug;
6680         intel_encoder->compute_config = intel_dp_compute_config;
6681         intel_encoder->get_hw_state = intel_dp_get_hw_state;
6682         intel_encoder->get_config = intel_dp_get_config;
6683         intel_encoder->sync_state = intel_dp_sync_state;
6684         intel_encoder->initial_fastset_check = intel_dp_initial_fastset_check;
6685         intel_encoder->update_pipe = intel_panel_update_backlight;
6686         intel_encoder->suspend = intel_dp_encoder_suspend;
6687         intel_encoder->shutdown = intel_dp_encoder_shutdown;
6688         if (IS_CHERRYVIEW(dev_priv)) {
6689                 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
6690                 intel_encoder->pre_enable = chv_pre_enable_dp;
6691                 intel_encoder->enable = vlv_enable_dp;
6692                 intel_encoder->disable = vlv_disable_dp;
6693                 intel_encoder->post_disable = chv_post_disable_dp;
6694                 intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
6695         } else if (IS_VALLEYVIEW(dev_priv)) {
6696                 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
6697                 intel_encoder->pre_enable = vlv_pre_enable_dp;
6698                 intel_encoder->enable = vlv_enable_dp;
6699                 intel_encoder->disable = vlv_disable_dp;
6700                 intel_encoder->post_disable = vlv_post_disable_dp;
6701         } else {
6702                 intel_encoder->pre_enable = g4x_pre_enable_dp;
6703                 intel_encoder->enable = g4x_enable_dp;
6704                 intel_encoder->disable = g4x_disable_dp;
6705                 intel_encoder->post_disable = g4x_post_disable_dp;
6706         }
6707
6708         if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
6709             (HAS_PCH_CPT(dev_priv) && port != PORT_A))
6710                 dig_port->dp.set_link_train = cpt_set_link_train;
6711         else
6712                 dig_port->dp.set_link_train = g4x_set_link_train;
6713
6714         if (IS_CHERRYVIEW(dev_priv))
6715                 dig_port->dp.set_signal_levels = chv_set_signal_levels;
6716         else if (IS_VALLEYVIEW(dev_priv))
6717                 dig_port->dp.set_signal_levels = vlv_set_signal_levels;
6718         else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
6719                 dig_port->dp.set_signal_levels = ivb_cpu_edp_set_signal_levels;
6720         else if (IS_GEN(dev_priv, 6) && port == PORT_A)
6721                 dig_port->dp.set_signal_levels = snb_cpu_edp_set_signal_levels;
6722         else
6723                 dig_port->dp.set_signal_levels = g4x_set_signal_levels;
6724
6725         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv) ||
6726             (HAS_PCH_SPLIT(dev_priv) && port != PORT_A)) {
6727                 dig_port->dp.preemph_max = intel_dp_preemph_max_3;
6728                 dig_port->dp.voltage_max = intel_dp_voltage_max_3;
6729         } else {
6730                 dig_port->dp.preemph_max = intel_dp_preemph_max_2;
6731                 dig_port->dp.voltage_max = intel_dp_voltage_max_2;
6732         }
6733
6734         dig_port->dp.output_reg = output_reg;
6735         dig_port->max_lanes = 4;
6736
6737         intel_encoder->type = INTEL_OUTPUT_DP;
6738         intel_encoder->power_domain = intel_port_to_power_domain(port);
6739         if (IS_CHERRYVIEW(dev_priv)) {
6740                 if (port == PORT_D)
6741                         intel_encoder->pipe_mask = BIT(PIPE_C);
6742                 else
6743                         intel_encoder->pipe_mask = BIT(PIPE_A) | BIT(PIPE_B);
6744         } else {
6745                 intel_encoder->pipe_mask = ~0;
6746         }
6747         intel_encoder->cloneable = 0;
6748         intel_encoder->port = port;
6749         intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port);
6750
6751         dig_port->hpd_pulse = intel_dp_hpd_pulse;
6752
6753         if (HAS_GMCH(dev_priv)) {
6754                 if (IS_GM45(dev_priv))
6755                         dig_port->connected = gm45_digital_port_connected;
6756                 else
6757                         dig_port->connected = g4x_digital_port_connected;
6758         } else {
6759                 if (port == PORT_A)
6760                         dig_port->connected = ilk_digital_port_connected;
6761                 else
6762                         dig_port->connected = ibx_digital_port_connected;
6763         }
6764
6765         if (port != PORT_A)
6766                 intel_infoframe_init(dig_port);
6767
6768         dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
6769         if (!intel_dp_init_connector(dig_port, intel_connector))
6770                 goto err_init_connector;
6771
6772         return true;
6773
6774 err_init_connector:
6775         drm_encoder_cleanup(encoder);
6776 err_encoder_init:
6777         kfree(intel_connector);
6778 err_connector_alloc:
6779         kfree(dig_port);
6780         return false;
6781 }
6782
6783 void intel_dp_mst_suspend(struct drm_i915_private *dev_priv)
6784 {
6785         struct intel_encoder *encoder;
6786
6787         for_each_intel_encoder(&dev_priv->drm, encoder) {
6788                 struct intel_dp *intel_dp;
6789
6790                 if (encoder->type != INTEL_OUTPUT_DDI)
6791                         continue;
6792
6793                 intel_dp = enc_to_intel_dp(encoder);
6794
6795                 if (!intel_dp->can_mst)
6796                         continue;
6797
6798                 if (intel_dp->is_mst)
6799                         drm_dp_mst_topology_mgr_suspend(&intel_dp->mst_mgr);
6800         }
6801 }
6802
6803 void intel_dp_mst_resume(struct drm_i915_private *dev_priv)
6804 {
6805         struct intel_encoder *encoder;
6806
6807         for_each_intel_encoder(&dev_priv->drm, encoder) {
6808                 struct intel_dp *intel_dp;
6809                 int ret;
6810
6811                 if (encoder->type != INTEL_OUTPUT_DDI)
6812                         continue;
6813
6814                 intel_dp = enc_to_intel_dp(encoder);
6815
6816                 if (!intel_dp->can_mst)
6817                         continue;
6818
6819                 ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr,
6820                                                      true);
6821                 if (ret) {
6822                         intel_dp->is_mst = false;
6823                         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
6824                                                         false);
6825                 }
6826         }
6827 }