684166dbc9a337d6f80277edc435c1d43060ce53
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / display / intel_dp.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Keith Packard <keithp@keithp.com>
25  *
26  */
27
28 #include <linux/export.h>
29 #include <linux/i2c.h>
30 #include <linux/notifier.h>
31 #include <linux/reboot.h>
32 #include <linux/slab.h>
33 #include <linux/types.h>
34
35 #include <asm/byteorder.h>
36
37 #include <drm/drm_atomic_helper.h>
38 #include <drm/drm_crtc.h>
39 #include <drm/drm_dp_helper.h>
40 #include <drm/drm_edid.h>
41 #include <drm/drm_probe_helper.h>
42
43 #include "i915_debugfs.h"
44 #include "i915_drv.h"
45 #include "i915_trace.h"
46 #include "intel_atomic.h"
47 #include "intel_audio.h"
48 #include "intel_connector.h"
49 #include "intel_ddi.h"
50 #include "intel_display_types.h"
51 #include "intel_dp.h"
52 #include "intel_dp_link_training.h"
53 #include "intel_dp_mst.h"
54 #include "intel_dpio_phy.h"
55 #include "intel_fifo_underrun.h"
56 #include "intel_hdcp.h"
57 #include "intel_hdmi.h"
58 #include "intel_hotplug.h"
59 #include "intel_lspcon.h"
60 #include "intel_lvds.h"
61 #include "intel_panel.h"
62 #include "intel_psr.h"
63 #include "intel_sideband.h"
64 #include "intel_tc.h"
65 #include "intel_vdsc.h"
66
67 #define DP_DPRX_ESI_LEN 14
68
69 /* DP DSC throughput values used for slice count calculations KPixels/s */
70 #define DP_DSC_PEAK_PIXEL_RATE                  2720000
71 #define DP_DSC_MAX_ENC_THROUGHPUT_0             340000
72 #define DP_DSC_MAX_ENC_THROUGHPUT_1             400000
73
74 /* DP DSC FEC Overhead factor = 1/(0.972261) */
75 #define DP_DSC_FEC_OVERHEAD_FACTOR              972261
76
77 /* Compliance test status bits  */
78 #define INTEL_DP_RESOLUTION_SHIFT_MASK  0
79 #define INTEL_DP_RESOLUTION_PREFERRED   (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
80 #define INTEL_DP_RESOLUTION_STANDARD    (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
81 #define INTEL_DP_RESOLUTION_FAILSAFE    (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
82
83 struct dp_link_dpll {
84         int clock;
85         struct dpll dpll;
86 };
87
88 static const struct dp_link_dpll g4x_dpll[] = {
89         { 162000,
90                 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
91         { 270000,
92                 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
93 };
94
95 static const struct dp_link_dpll pch_dpll[] = {
96         { 162000,
97                 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
98         { 270000,
99                 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
100 };
101
102 static const struct dp_link_dpll vlv_dpll[] = {
103         { 162000,
104                 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
105         { 270000,
106                 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
107 };
108
109 /*
110  * CHV supports eDP 1.4 that have  more link rates.
111  * Below only provides the fixed rate but exclude variable rate.
112  */
113 static const struct dp_link_dpll chv_dpll[] = {
114         /*
115          * CHV requires to program fractional division for m2.
116          * m2 is stored in fixed point format using formula below
117          * (m2_int << 22) | m2_fraction
118          */
119         { 162000,       /* m2_int = 32, m2_fraction = 1677722 */
120                 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
121         { 270000,       /* m2_int = 27, m2_fraction = 0 */
122                 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
123 };
124
125 /* Constants for DP DSC configurations */
126 static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15};
127
128 /* With Single pipe configuration, HW is capable of supporting maximum
129  * of 4 slices per line.
130  */
131 static const u8 valid_dsc_slicecount[] = {1, 2, 4};
132
133 /**
134  * intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH)
135  * @intel_dp: DP struct
136  *
137  * If a CPU or PCH DP output is attached to an eDP panel, this function
138  * will return true, and false otherwise.
139  */
140 bool intel_dp_is_edp(struct intel_dp *intel_dp)
141 {
142         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
143
144         return dig_port->base.type == INTEL_OUTPUT_EDP;
145 }
146
147 static void intel_dp_link_down(struct intel_encoder *encoder,
148                                const struct intel_crtc_state *old_crtc_state);
149 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
150 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
151 static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
152                                            const struct intel_crtc_state *crtc_state);
153 static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
154                                       enum pipe pipe);
155 static void intel_dp_unset_edid(struct intel_dp *intel_dp);
156
157 /* update sink rates from dpcd */
158 static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
159 {
160         static const int dp_rates[] = {
161                 162000, 270000, 540000, 810000
162         };
163         int i, max_rate;
164
165         if (drm_dp_has_quirk(&intel_dp->desc, 0,
166                              DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS)) {
167                 /* Needed, e.g., for Apple MBP 2017, 15 inch eDP Retina panel */
168                 static const int quirk_rates[] = { 162000, 270000, 324000 };
169
170                 memcpy(intel_dp->sink_rates, quirk_rates, sizeof(quirk_rates));
171                 intel_dp->num_sink_rates = ARRAY_SIZE(quirk_rates);
172
173                 return;
174         }
175
176         max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]);
177
178         for (i = 0; i < ARRAY_SIZE(dp_rates); i++) {
179                 if (dp_rates[i] > max_rate)
180                         break;
181                 intel_dp->sink_rates[i] = dp_rates[i];
182         }
183
184         intel_dp->num_sink_rates = i;
185 }
186
187 /* Get length of rates array potentially limited by max_rate. */
188 static int intel_dp_rate_limit_len(const int *rates, int len, int max_rate)
189 {
190         int i;
191
192         /* Limit results by potentially reduced max rate */
193         for (i = 0; i < len; i++) {
194                 if (rates[len - i - 1] <= max_rate)
195                         return len - i;
196         }
197
198         return 0;
199 }
200
201 /* Get length of common rates array potentially limited by max_rate. */
202 static int intel_dp_common_len_rate_limit(const struct intel_dp *intel_dp,
203                                           int max_rate)
204 {
205         return intel_dp_rate_limit_len(intel_dp->common_rates,
206                                        intel_dp->num_common_rates, max_rate);
207 }
208
209 /* Theoretical max between source and sink */
210 static int intel_dp_max_common_rate(struct intel_dp *intel_dp)
211 {
212         return intel_dp->common_rates[intel_dp->num_common_rates - 1];
213 }
214
215 /* Theoretical max between source and sink */
216 static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp)
217 {
218         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
219         int source_max = dig_port->max_lanes;
220         int sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
221         int fia_max = intel_tc_port_fia_max_lane_count(dig_port);
222
223         return min3(source_max, sink_max, fia_max);
224 }
225
226 int intel_dp_max_lane_count(struct intel_dp *intel_dp)
227 {
228         return intel_dp->max_link_lane_count;
229 }
230
231 int
232 intel_dp_link_required(int pixel_clock, int bpp)
233 {
234         /* pixel_clock is in kHz, divide bpp by 8 for bit to Byte conversion */
235         return DIV_ROUND_UP(pixel_clock * bpp, 8);
236 }
237
238 int
239 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
240 {
241         /* max_link_clock is the link symbol clock (LS_Clk) in kHz and not the
242          * link rate that is generally expressed in Gbps. Since, 8 bits of data
243          * is transmitted every LS_Clk per lane, there is no need to account for
244          * the channel encoding that is done in the PHY layer here.
245          */
246
247         return max_link_clock * max_lanes;
248 }
249
250 static int
251 intel_dp_downstream_max_dotclock(struct intel_dp *intel_dp)
252 {
253         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
254         struct intel_encoder *encoder = &dig_port->base;
255         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
256         int max_dotclk = dev_priv->max_dotclk_freq;
257         int ds_max_dotclk;
258
259         int type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
260
261         if (type != DP_DS_PORT_TYPE_VGA)
262                 return max_dotclk;
263
264         ds_max_dotclk = drm_dp_downstream_max_clock(intel_dp->dpcd,
265                                                     intel_dp->downstream_ports);
266
267         if (ds_max_dotclk != 0)
268                 max_dotclk = min(max_dotclk, ds_max_dotclk);
269
270         return max_dotclk;
271 }
272
273 static int cnl_max_source_rate(struct intel_dp *intel_dp)
274 {
275         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
276         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
277         enum port port = dig_port->base.port;
278
279         u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
280
281         /* Low voltage SKUs are limited to max of 5.4G */
282         if (voltage == VOLTAGE_INFO_0_85V)
283                 return 540000;
284
285         /* For this SKU 8.1G is supported in all ports */
286         if (IS_CNL_WITH_PORT_F(dev_priv))
287                 return 810000;
288
289         /* For other SKUs, max rate on ports A and D is 5.4G */
290         if (port == PORT_A || port == PORT_D)
291                 return 540000;
292
293         return 810000;
294 }
295
296 static int icl_max_source_rate(struct intel_dp *intel_dp)
297 {
298         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
299         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
300         enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
301
302         if (intel_phy_is_combo(dev_priv, phy) &&
303             !IS_ELKHARTLAKE(dev_priv) &&
304             !intel_dp_is_edp(intel_dp))
305                 return 540000;
306
307         return 810000;
308 }
309
310 static void
311 intel_dp_set_source_rates(struct intel_dp *intel_dp)
312 {
313         /* The values must be in increasing order */
314         static const int cnl_rates[] = {
315                 162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000
316         };
317         static const int bxt_rates[] = {
318                 162000, 216000, 243000, 270000, 324000, 432000, 540000
319         };
320         static const int skl_rates[] = {
321                 162000, 216000, 270000, 324000, 432000, 540000
322         };
323         static const int hsw_rates[] = {
324                 162000, 270000, 540000
325         };
326         static const int g4x_rates[] = {
327                 162000, 270000
328         };
329         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
330         struct intel_encoder *encoder = &dig_port->base;
331         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
332         const int *source_rates;
333         int size, max_rate = 0, vbt_max_rate;
334
335         /* This should only be done once */
336         drm_WARN_ON(&dev_priv->drm,
337                     intel_dp->source_rates || intel_dp->num_source_rates);
338
339         if (INTEL_GEN(dev_priv) >= 10) {
340                 source_rates = cnl_rates;
341                 size = ARRAY_SIZE(cnl_rates);
342                 if (IS_GEN(dev_priv, 10))
343                         max_rate = cnl_max_source_rate(intel_dp);
344                 else
345                         max_rate = icl_max_source_rate(intel_dp);
346         } else if (IS_GEN9_LP(dev_priv)) {
347                 source_rates = bxt_rates;
348                 size = ARRAY_SIZE(bxt_rates);
349         } else if (IS_GEN9_BC(dev_priv)) {
350                 source_rates = skl_rates;
351                 size = ARRAY_SIZE(skl_rates);
352         } else if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) ||
353                    IS_BROADWELL(dev_priv)) {
354                 source_rates = hsw_rates;
355                 size = ARRAY_SIZE(hsw_rates);
356         } else {
357                 source_rates = g4x_rates;
358                 size = ARRAY_SIZE(g4x_rates);
359         }
360
361         vbt_max_rate = intel_bios_dp_max_link_rate(encoder);
362         if (max_rate && vbt_max_rate)
363                 max_rate = min(max_rate, vbt_max_rate);
364         else if (vbt_max_rate)
365                 max_rate = vbt_max_rate;
366
367         if (max_rate)
368                 size = intel_dp_rate_limit_len(source_rates, size, max_rate);
369
370         intel_dp->source_rates = source_rates;
371         intel_dp->num_source_rates = size;
372 }
373
374 static int intersect_rates(const int *source_rates, int source_len,
375                            const int *sink_rates, int sink_len,
376                            int *common_rates)
377 {
378         int i = 0, j = 0, k = 0;
379
380         while (i < source_len && j < sink_len) {
381                 if (source_rates[i] == sink_rates[j]) {
382                         if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
383                                 return k;
384                         common_rates[k] = source_rates[i];
385                         ++k;
386                         ++i;
387                         ++j;
388                 } else if (source_rates[i] < sink_rates[j]) {
389                         ++i;
390                 } else {
391                         ++j;
392                 }
393         }
394         return k;
395 }
396
397 /* return index of rate in rates array, or -1 if not found */
398 static int intel_dp_rate_index(const int *rates, int len, int rate)
399 {
400         int i;
401
402         for (i = 0; i < len; i++)
403                 if (rate == rates[i])
404                         return i;
405
406         return -1;
407 }
408
409 static void intel_dp_set_common_rates(struct intel_dp *intel_dp)
410 {
411         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
412
413         drm_WARN_ON(&i915->drm,
414                     !intel_dp->num_source_rates || !intel_dp->num_sink_rates);
415
416         intel_dp->num_common_rates = intersect_rates(intel_dp->source_rates,
417                                                      intel_dp->num_source_rates,
418                                                      intel_dp->sink_rates,
419                                                      intel_dp->num_sink_rates,
420                                                      intel_dp->common_rates);
421
422         /* Paranoia, there should always be something in common. */
423         if (drm_WARN_ON(&i915->drm, intel_dp->num_common_rates == 0)) {
424                 intel_dp->common_rates[0] = 162000;
425                 intel_dp->num_common_rates = 1;
426         }
427 }
428
429 static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate,
430                                        u8 lane_count)
431 {
432         /*
433          * FIXME: we need to synchronize the current link parameters with
434          * hardware readout. Currently fast link training doesn't work on
435          * boot-up.
436          */
437         if (link_rate == 0 ||
438             link_rate > intel_dp->max_link_rate)
439                 return false;
440
441         if (lane_count == 0 ||
442             lane_count > intel_dp_max_lane_count(intel_dp))
443                 return false;
444
445         return true;
446 }
447
448 static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp,
449                                                      int link_rate,
450                                                      u8 lane_count)
451 {
452         const struct drm_display_mode *fixed_mode =
453                 intel_dp->attached_connector->panel.fixed_mode;
454         int mode_rate, max_rate;
455
456         mode_rate = intel_dp_link_required(fixed_mode->clock, 18);
457         max_rate = intel_dp_max_data_rate(link_rate, lane_count);
458         if (mode_rate > max_rate)
459                 return false;
460
461         return true;
462 }
463
464 int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
465                                             int link_rate, u8 lane_count)
466 {
467         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
468         int index;
469
470         /*
471          * TODO: Enable fallback on MST links once MST link compute can handle
472          * the fallback params.
473          */
474         if (intel_dp->is_mst) {
475                 drm_err(&i915->drm, "Link Training Unsuccessful\n");
476                 return -1;
477         }
478
479         index = intel_dp_rate_index(intel_dp->common_rates,
480                                     intel_dp->num_common_rates,
481                                     link_rate);
482         if (index > 0) {
483                 if (intel_dp_is_edp(intel_dp) &&
484                     !intel_dp_can_link_train_fallback_for_edp(intel_dp,
485                                                               intel_dp->common_rates[index - 1],
486                                                               lane_count)) {
487                         drm_dbg_kms(&i915->drm,
488                                     "Retrying Link training for eDP with same parameters\n");
489                         return 0;
490                 }
491                 intel_dp->max_link_rate = intel_dp->common_rates[index - 1];
492                 intel_dp->max_link_lane_count = lane_count;
493         } else if (lane_count > 1) {
494                 if (intel_dp_is_edp(intel_dp) &&
495                     !intel_dp_can_link_train_fallback_for_edp(intel_dp,
496                                                               intel_dp_max_common_rate(intel_dp),
497                                                               lane_count >> 1)) {
498                         drm_dbg_kms(&i915->drm,
499                                     "Retrying Link training for eDP with same parameters\n");
500                         return 0;
501                 }
502                 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
503                 intel_dp->max_link_lane_count = lane_count >> 1;
504         } else {
505                 drm_err(&i915->drm, "Link Training Unsuccessful\n");
506                 return -1;
507         }
508
509         return 0;
510 }
511
512 u32 intel_dp_mode_to_fec_clock(u32 mode_clock)
513 {
514         return div_u64(mul_u32_u32(mode_clock, 1000000U),
515                        DP_DSC_FEC_OVERHEAD_FACTOR);
516 }
517
518 static int
519 small_joiner_ram_size_bits(struct drm_i915_private *i915)
520 {
521         if (INTEL_GEN(i915) >= 11)
522                 return 7680 * 8;
523         else
524                 return 6144 * 8;
525 }
526
527 static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915,
528                                        u32 link_clock, u32 lane_count,
529                                        u32 mode_clock, u32 mode_hdisplay)
530 {
531         u32 bits_per_pixel, max_bpp_small_joiner_ram;
532         int i;
533
534         /*
535          * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)*
536          * (LinkSymbolClock)* 8 * (TimeSlotsPerMTP)
537          * for SST -> TimeSlotsPerMTP is 1,
538          * for MST -> TimeSlotsPerMTP has to be calculated
539          */
540         bits_per_pixel = (link_clock * lane_count * 8) /
541                          intel_dp_mode_to_fec_clock(mode_clock);
542         drm_dbg_kms(&i915->drm, "Max link bpp: %u\n", bits_per_pixel);
543
544         /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */
545         max_bpp_small_joiner_ram = small_joiner_ram_size_bits(i915) /
546                 mode_hdisplay;
547         drm_dbg_kms(&i915->drm, "Max small joiner bpp: %u\n",
548                     max_bpp_small_joiner_ram);
549
550         /*
551          * Greatest allowed DSC BPP = MIN (output BPP from available Link BW
552          * check, output bpp from small joiner RAM check)
553          */
554         bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram);
555
556         /* Error out if the max bpp is less than smallest allowed valid bpp */
557         if (bits_per_pixel < valid_dsc_bpp[0]) {
558                 drm_dbg_kms(&i915->drm, "Unsupported BPP %u, min %u\n",
559                             bits_per_pixel, valid_dsc_bpp[0]);
560                 return 0;
561         }
562
563         /* Find the nearest match in the array of known BPPs from VESA */
564         for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) {
565                 if (bits_per_pixel < valid_dsc_bpp[i + 1])
566                         break;
567         }
568         bits_per_pixel = valid_dsc_bpp[i];
569
570         /*
571          * Compressed BPP in U6.4 format so multiply by 16, for Gen 11,
572          * fractional part is 0
573          */
574         return bits_per_pixel << 4;
575 }
576
577 static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
578                                        int mode_clock, int mode_hdisplay)
579 {
580         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
581         u8 min_slice_count, i;
582         int max_slice_width;
583
584         if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE)
585                 min_slice_count = DIV_ROUND_UP(mode_clock,
586                                                DP_DSC_MAX_ENC_THROUGHPUT_0);
587         else
588                 min_slice_count = DIV_ROUND_UP(mode_clock,
589                                                DP_DSC_MAX_ENC_THROUGHPUT_1);
590
591         max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd);
592         if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) {
593                 drm_dbg_kms(&i915->drm,
594                             "Unsupported slice width %d by DP DSC Sink device\n",
595                             max_slice_width);
596                 return 0;
597         }
598         /* Also take into account max slice width */
599         min_slice_count = min_t(u8, min_slice_count,
600                                 DIV_ROUND_UP(mode_hdisplay,
601                                              max_slice_width));
602
603         /* Find the closest match to the valid slice count values */
604         for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) {
605                 if (valid_dsc_slicecount[i] >
606                     drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
607                                                     false))
608                         break;
609                 if (min_slice_count  <= valid_dsc_slicecount[i])
610                         return valid_dsc_slicecount[i];
611         }
612
613         drm_dbg_kms(&i915->drm, "Unsupported Slice Count %d\n",
614                     min_slice_count);
615         return 0;
616 }
617
618 static bool intel_dp_hdisplay_bad(struct drm_i915_private *dev_priv,
619                                   int hdisplay)
620 {
621         /*
622          * Older platforms don't like hdisplay==4096 with DP.
623          *
624          * On ILK/SNB/IVB the pipe seems to be somewhat running (scanline
625          * and frame counter increment), but we don't get vblank interrupts,
626          * and the pipe underruns immediately. The link also doesn't seem
627          * to get trained properly.
628          *
629          * On CHV the vblank interrupts don't seem to disappear but
630          * otherwise the symptoms are similar.
631          *
632          * TODO: confirm the behaviour on HSW+
633          */
634         return hdisplay == 4096 && !HAS_DDI(dev_priv);
635 }
636
637 static enum drm_mode_status
638 intel_dp_mode_valid(struct drm_connector *connector,
639                     struct drm_display_mode *mode)
640 {
641         struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
642         struct intel_connector *intel_connector = to_intel_connector(connector);
643         struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
644         struct drm_i915_private *dev_priv = to_i915(connector->dev);
645         int target_clock = mode->clock;
646         int max_rate, mode_rate, max_lanes, max_link_clock;
647         int max_dotclk;
648         u16 dsc_max_output_bpp = 0;
649         u8 dsc_slice_count = 0;
650
651         if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
652                 return MODE_NO_DBLESCAN;
653
654         max_dotclk = intel_dp_downstream_max_dotclock(intel_dp);
655
656         if (intel_dp_is_edp(intel_dp) && fixed_mode) {
657                 if (mode->hdisplay > fixed_mode->hdisplay)
658                         return MODE_PANEL;
659
660                 if (mode->vdisplay > fixed_mode->vdisplay)
661                         return MODE_PANEL;
662
663                 target_clock = fixed_mode->clock;
664         }
665
666         max_link_clock = intel_dp_max_link_rate(intel_dp);
667         max_lanes = intel_dp_max_lane_count(intel_dp);
668
669         max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
670         mode_rate = intel_dp_link_required(target_clock, 18);
671
672         if (intel_dp_hdisplay_bad(dev_priv, mode->hdisplay))
673                 return MODE_H_ILLEGAL;
674
675         /*
676          * Output bpp is stored in 6.4 format so right shift by 4 to get the
677          * integer value since we support only integer values of bpp.
678          */
679         if ((INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) &&
680             drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)) {
681                 if (intel_dp_is_edp(intel_dp)) {
682                         dsc_max_output_bpp =
683                                 drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4;
684                         dsc_slice_count =
685                                 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
686                                                                 true);
687                 } else if (drm_dp_sink_supports_fec(intel_dp->fec_capable)) {
688                         dsc_max_output_bpp =
689                                 intel_dp_dsc_get_output_bpp(dev_priv,
690                                                             max_link_clock,
691                                                             max_lanes,
692                                                             target_clock,
693                                                             mode->hdisplay) >> 4;
694                         dsc_slice_count =
695                                 intel_dp_dsc_get_slice_count(intel_dp,
696                                                              target_clock,
697                                                              mode->hdisplay);
698                 }
699         }
700
701         if ((mode_rate > max_rate && !(dsc_max_output_bpp && dsc_slice_count)) ||
702             target_clock > max_dotclk)
703                 return MODE_CLOCK_HIGH;
704
705         if (mode->clock < 10000)
706                 return MODE_CLOCK_LOW;
707
708         if (mode->flags & DRM_MODE_FLAG_DBLCLK)
709                 return MODE_H_ILLEGAL;
710
711         return intel_mode_valid_max_plane_size(dev_priv, mode);
712 }
713
714 u32 intel_dp_pack_aux(const u8 *src, int src_bytes)
715 {
716         int i;
717         u32 v = 0;
718
719         if (src_bytes > 4)
720                 src_bytes = 4;
721         for (i = 0; i < src_bytes; i++)
722                 v |= ((u32)src[i]) << ((3 - i) * 8);
723         return v;
724 }
725
726 static void intel_dp_unpack_aux(u32 src, u8 *dst, int dst_bytes)
727 {
728         int i;
729         if (dst_bytes > 4)
730                 dst_bytes = 4;
731         for (i = 0; i < dst_bytes; i++)
732                 dst[i] = src >> ((3-i) * 8);
733 }
734
735 static void
736 intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp);
737 static void
738 intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
739                                               bool force_disable_vdd);
740 static void
741 intel_dp_pps_init(struct intel_dp *intel_dp);
742
743 static intel_wakeref_t
744 pps_lock(struct intel_dp *intel_dp)
745 {
746         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
747         intel_wakeref_t wakeref;
748
749         /*
750          * See intel_power_sequencer_reset() why we need
751          * a power domain reference here.
752          */
753         wakeref = intel_display_power_get(dev_priv,
754                                           intel_aux_power_domain(dp_to_dig_port(intel_dp)));
755
756         mutex_lock(&dev_priv->pps_mutex);
757
758         return wakeref;
759 }
760
761 static intel_wakeref_t
762 pps_unlock(struct intel_dp *intel_dp, intel_wakeref_t wakeref)
763 {
764         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
765
766         mutex_unlock(&dev_priv->pps_mutex);
767         intel_display_power_put(dev_priv,
768                                 intel_aux_power_domain(dp_to_dig_port(intel_dp)),
769                                 wakeref);
770         return 0;
771 }
772
773 #define with_pps_lock(dp, wf) \
774         for ((wf) = pps_lock(dp); (wf); (wf) = pps_unlock((dp), (wf)))
775
776 static void
777 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
778 {
779         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
780         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
781         enum pipe pipe = intel_dp->pps_pipe;
782         bool pll_enabled, release_cl_override = false;
783         enum dpio_phy phy = DPIO_PHY(pipe);
784         enum dpio_channel ch = vlv_pipe_to_channel(pipe);
785         u32 DP;
786
787         if (drm_WARN(&dev_priv->drm,
788                      intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN,
789                      "skipping pipe %c power sequencer kick due to [ENCODER:%d:%s] being active\n",
790                      pipe_name(pipe), dig_port->base.base.base.id,
791                      dig_port->base.base.name))
792                 return;
793
794         drm_dbg_kms(&dev_priv->drm,
795                     "kicking pipe %c power sequencer for [ENCODER:%d:%s]\n",
796                     pipe_name(pipe), dig_port->base.base.base.id,
797                     dig_port->base.base.name);
798
799         /* Preserve the BIOS-computed detected bit. This is
800          * supposed to be read-only.
801          */
802         DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED;
803         DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
804         DP |= DP_PORT_WIDTH(1);
805         DP |= DP_LINK_TRAIN_PAT_1;
806
807         if (IS_CHERRYVIEW(dev_priv))
808                 DP |= DP_PIPE_SEL_CHV(pipe);
809         else
810                 DP |= DP_PIPE_SEL(pipe);
811
812         pll_enabled = intel_de_read(dev_priv, DPLL(pipe)) & DPLL_VCO_ENABLE;
813
814         /*
815          * The DPLL for the pipe must be enabled for this to work.
816          * So enable temporarily it if it's not already enabled.
817          */
818         if (!pll_enabled) {
819                 release_cl_override = IS_CHERRYVIEW(dev_priv) &&
820                         !chv_phy_powergate_ch(dev_priv, phy, ch, true);
821
822                 if (vlv_force_pll_on(dev_priv, pipe, IS_CHERRYVIEW(dev_priv) ?
823                                      &chv_dpll[0].dpll : &vlv_dpll[0].dpll)) {
824                         drm_err(&dev_priv->drm,
825                                 "Failed to force on pll for pipe %c!\n",
826                                 pipe_name(pipe));
827                         return;
828                 }
829         }
830
831         /*
832          * Similar magic as in intel_dp_enable_port().
833          * We _must_ do this port enable + disable trick
834          * to make this power sequencer lock onto the port.
835          * Otherwise even VDD force bit won't work.
836          */
837         intel_de_write(dev_priv, intel_dp->output_reg, DP);
838         intel_de_posting_read(dev_priv, intel_dp->output_reg);
839
840         intel_de_write(dev_priv, intel_dp->output_reg, DP | DP_PORT_EN);
841         intel_de_posting_read(dev_priv, intel_dp->output_reg);
842
843         intel_de_write(dev_priv, intel_dp->output_reg, DP & ~DP_PORT_EN);
844         intel_de_posting_read(dev_priv, intel_dp->output_reg);
845
846         if (!pll_enabled) {
847                 vlv_force_pll_off(dev_priv, pipe);
848
849                 if (release_cl_override)
850                         chv_phy_powergate_ch(dev_priv, phy, ch, false);
851         }
852 }
853
854 static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
855 {
856         struct intel_encoder *encoder;
857         unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
858
859         /*
860          * We don't have power sequencer currently.
861          * Pick one that's not used by other ports.
862          */
863         for_each_intel_dp(&dev_priv->drm, encoder) {
864                 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
865
866                 if (encoder->type == INTEL_OUTPUT_EDP) {
867                         drm_WARN_ON(&dev_priv->drm,
868                                     intel_dp->active_pipe != INVALID_PIPE &&
869                                     intel_dp->active_pipe !=
870                                     intel_dp->pps_pipe);
871
872                         if (intel_dp->pps_pipe != INVALID_PIPE)
873                                 pipes &= ~(1 << intel_dp->pps_pipe);
874                 } else {
875                         drm_WARN_ON(&dev_priv->drm,
876                                     intel_dp->pps_pipe != INVALID_PIPE);
877
878                         if (intel_dp->active_pipe != INVALID_PIPE)
879                                 pipes &= ~(1 << intel_dp->active_pipe);
880                 }
881         }
882
883         if (pipes == 0)
884                 return INVALID_PIPE;
885
886         return ffs(pipes) - 1;
887 }
888
889 static enum pipe
890 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
891 {
892         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
893         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
894         enum pipe pipe;
895
896         lockdep_assert_held(&dev_priv->pps_mutex);
897
898         /* We should never land here with regular DP ports */
899         drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
900
901         drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE &&
902                     intel_dp->active_pipe != intel_dp->pps_pipe);
903
904         if (intel_dp->pps_pipe != INVALID_PIPE)
905                 return intel_dp->pps_pipe;
906
907         pipe = vlv_find_free_pps(dev_priv);
908
909         /*
910          * Didn't find one. This should not happen since there
911          * are two power sequencers and up to two eDP ports.
912          */
913         if (drm_WARN_ON(&dev_priv->drm, pipe == INVALID_PIPE))
914                 pipe = PIPE_A;
915
916         vlv_steal_power_sequencer(dev_priv, pipe);
917         intel_dp->pps_pipe = pipe;
918
919         drm_dbg_kms(&dev_priv->drm,
920                     "picked pipe %c power sequencer for [ENCODER:%d:%s]\n",
921                     pipe_name(intel_dp->pps_pipe),
922                     dig_port->base.base.base.id,
923                     dig_port->base.base.name);
924
925         /* init power sequencer on this pipe and port */
926         intel_dp_init_panel_power_sequencer(intel_dp);
927         intel_dp_init_panel_power_sequencer_registers(intel_dp, true);
928
929         /*
930          * Even vdd force doesn't work until we've made
931          * the power sequencer lock in on the port.
932          */
933         vlv_power_sequencer_kick(intel_dp);
934
935         return intel_dp->pps_pipe;
936 }
937
938 static int
939 bxt_power_sequencer_idx(struct intel_dp *intel_dp)
940 {
941         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
942         int backlight_controller = dev_priv->vbt.backlight.controller;
943
944         lockdep_assert_held(&dev_priv->pps_mutex);
945
946         /* We should never land here with regular DP ports */
947         drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
948
949         if (!intel_dp->pps_reset)
950                 return backlight_controller;
951
952         intel_dp->pps_reset = false;
953
954         /*
955          * Only the HW needs to be reprogrammed, the SW state is fixed and
956          * has been setup during connector init.
957          */
958         intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
959
960         return backlight_controller;
961 }
962
963 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
964                                enum pipe pipe);
965
966 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
967                                enum pipe pipe)
968 {
969         return intel_de_read(dev_priv, PP_STATUS(pipe)) & PP_ON;
970 }
971
972 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
973                                 enum pipe pipe)
974 {
975         return intel_de_read(dev_priv, PP_CONTROL(pipe)) & EDP_FORCE_VDD;
976 }
977
978 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
979                          enum pipe pipe)
980 {
981         return true;
982 }
983
984 static enum pipe
985 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
986                      enum port port,
987                      vlv_pipe_check pipe_check)
988 {
989         enum pipe pipe;
990
991         for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
992                 u32 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(pipe)) &
993                         PANEL_PORT_SELECT_MASK;
994
995                 if (port_sel != PANEL_PORT_SELECT_VLV(port))
996                         continue;
997
998                 if (!pipe_check(dev_priv, pipe))
999                         continue;
1000
1001                 return pipe;
1002         }
1003
1004         return INVALID_PIPE;
1005 }
1006
1007 static void
1008 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
1009 {
1010         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1011         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1012         enum port port = dig_port->base.port;
1013
1014         lockdep_assert_held(&dev_priv->pps_mutex);
1015
1016         /* try to find a pipe with this port selected */
1017         /* first pick one where the panel is on */
1018         intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
1019                                                   vlv_pipe_has_pp_on);
1020         /* didn't find one? pick one where vdd is on */
1021         if (intel_dp->pps_pipe == INVALID_PIPE)
1022                 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
1023                                                           vlv_pipe_has_vdd_on);
1024         /* didn't find one? pick one with just the correct port */
1025         if (intel_dp->pps_pipe == INVALID_PIPE)
1026                 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
1027                                                           vlv_pipe_any);
1028
1029         /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
1030         if (intel_dp->pps_pipe == INVALID_PIPE) {
1031                 drm_dbg_kms(&dev_priv->drm,
1032                             "no initial power sequencer for [ENCODER:%d:%s]\n",
1033                             dig_port->base.base.base.id,
1034                             dig_port->base.base.name);
1035                 return;
1036         }
1037
1038         drm_dbg_kms(&dev_priv->drm,
1039                     "initial power sequencer for [ENCODER:%d:%s]: pipe %c\n",
1040                     dig_port->base.base.base.id,
1041                     dig_port->base.base.name,
1042                     pipe_name(intel_dp->pps_pipe));
1043
1044         intel_dp_init_panel_power_sequencer(intel_dp);
1045         intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
1046 }
1047
1048 void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
1049 {
1050         struct intel_encoder *encoder;
1051
1052         if (drm_WARN_ON(&dev_priv->drm,
1053                         !(IS_VALLEYVIEW(dev_priv) ||
1054                           IS_CHERRYVIEW(dev_priv) ||
1055                           IS_GEN9_LP(dev_priv))))
1056                 return;
1057
1058         /*
1059          * We can't grab pps_mutex here due to deadlock with power_domain
1060          * mutex when power_domain functions are called while holding pps_mutex.
1061          * That also means that in order to use pps_pipe the code needs to
1062          * hold both a power domain reference and pps_mutex, and the power domain
1063          * reference get/put must be done while _not_ holding pps_mutex.
1064          * pps_{lock,unlock}() do these steps in the correct order, so one
1065          * should use them always.
1066          */
1067
1068         for_each_intel_dp(&dev_priv->drm, encoder) {
1069                 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1070
1071                 drm_WARN_ON(&dev_priv->drm,
1072                             intel_dp->active_pipe != INVALID_PIPE);
1073
1074                 if (encoder->type != INTEL_OUTPUT_EDP)
1075                         continue;
1076
1077                 if (IS_GEN9_LP(dev_priv))
1078                         intel_dp->pps_reset = true;
1079                 else
1080                         intel_dp->pps_pipe = INVALID_PIPE;
1081         }
1082 }
1083
1084 struct pps_registers {
1085         i915_reg_t pp_ctrl;
1086         i915_reg_t pp_stat;
1087         i915_reg_t pp_on;
1088         i915_reg_t pp_off;
1089         i915_reg_t pp_div;
1090 };
1091
1092 static void intel_pps_get_registers(struct intel_dp *intel_dp,
1093                                     struct pps_registers *regs)
1094 {
1095         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1096         int pps_idx = 0;
1097
1098         memset(regs, 0, sizeof(*regs));
1099
1100         if (IS_GEN9_LP(dev_priv))
1101                 pps_idx = bxt_power_sequencer_idx(intel_dp);
1102         else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1103                 pps_idx = vlv_power_sequencer_pipe(intel_dp);
1104
1105         regs->pp_ctrl = PP_CONTROL(pps_idx);
1106         regs->pp_stat = PP_STATUS(pps_idx);
1107         regs->pp_on = PP_ON_DELAYS(pps_idx);
1108         regs->pp_off = PP_OFF_DELAYS(pps_idx);
1109
1110         /* Cycle delay moved from PP_DIVISOR to PP_CONTROL */
1111         if (IS_GEN9_LP(dev_priv) || INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
1112                 regs->pp_div = INVALID_MMIO_REG;
1113         else
1114                 regs->pp_div = PP_DIVISOR(pps_idx);
1115 }
1116
1117 static i915_reg_t
1118 _pp_ctrl_reg(struct intel_dp *intel_dp)
1119 {
1120         struct pps_registers regs;
1121
1122         intel_pps_get_registers(intel_dp, &regs);
1123
1124         return regs.pp_ctrl;
1125 }
1126
1127 static i915_reg_t
1128 _pp_stat_reg(struct intel_dp *intel_dp)
1129 {
1130         struct pps_registers regs;
1131
1132         intel_pps_get_registers(intel_dp, &regs);
1133
1134         return regs.pp_stat;
1135 }
1136
1137 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
1138    This function only applicable when panel PM state is not to be tracked */
1139 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
1140                               void *unused)
1141 {
1142         struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
1143                                                  edp_notifier);
1144         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1145         intel_wakeref_t wakeref;
1146
1147         if (!intel_dp_is_edp(intel_dp) || code != SYS_RESTART)
1148                 return 0;
1149
1150         with_pps_lock(intel_dp, wakeref) {
1151                 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1152                         enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
1153                         i915_reg_t pp_ctrl_reg, pp_div_reg;
1154                         u32 pp_div;
1155
1156                         pp_ctrl_reg = PP_CONTROL(pipe);
1157                         pp_div_reg  = PP_DIVISOR(pipe);
1158                         pp_div = intel_de_read(dev_priv, pp_div_reg);
1159                         pp_div &= PP_REFERENCE_DIVIDER_MASK;
1160
1161                         /* 0x1F write to PP_DIV_REG sets max cycle delay */
1162                         intel_de_write(dev_priv, pp_div_reg, pp_div | 0x1F);
1163                         intel_de_write(dev_priv, pp_ctrl_reg,
1164                                        PANEL_UNLOCK_REGS);
1165                         msleep(intel_dp->panel_power_cycle_delay);
1166                 }
1167         }
1168
1169         return 0;
1170 }
1171
1172 static bool edp_have_panel_power(struct intel_dp *intel_dp)
1173 {
1174         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1175
1176         lockdep_assert_held(&dev_priv->pps_mutex);
1177
1178         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
1179             intel_dp->pps_pipe == INVALID_PIPE)
1180                 return false;
1181
1182         return (intel_de_read(dev_priv, _pp_stat_reg(intel_dp)) & PP_ON) != 0;
1183 }
1184
1185 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
1186 {
1187         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1188
1189         lockdep_assert_held(&dev_priv->pps_mutex);
1190
1191         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
1192             intel_dp->pps_pipe == INVALID_PIPE)
1193                 return false;
1194
1195         return intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
1196 }
1197
1198 static void
1199 intel_dp_check_edp(struct intel_dp *intel_dp)
1200 {
1201         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1202
1203         if (!intel_dp_is_edp(intel_dp))
1204                 return;
1205
1206         if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
1207                 drm_WARN(&dev_priv->drm, 1,
1208                          "eDP powered off while attempting aux channel communication.\n");
1209                 drm_dbg_kms(&dev_priv->drm, "Status 0x%08x Control 0x%08x\n",
1210                             intel_de_read(dev_priv, _pp_stat_reg(intel_dp)),
1211                             intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)));
1212         }
1213 }
1214
1215 static u32
1216 intel_dp_aux_wait_done(struct intel_dp *intel_dp)
1217 {
1218         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1219         i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
1220         const unsigned int timeout_ms = 10;
1221         u32 status;
1222         bool done;
1223
1224 #define C (((status = intel_uncore_read_notrace(&i915->uncore, ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
1225         done = wait_event_timeout(i915->gmbus_wait_queue, C,
1226                                   msecs_to_jiffies_timeout(timeout_ms));
1227
1228         /* just trace the final value */
1229         trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
1230
1231         if (!done)
1232                 drm_err(&i915->drm,
1233                         "%s: did not complete or timeout within %ums (status 0x%08x)\n",
1234                         intel_dp->aux.name, timeout_ms, status);
1235 #undef C
1236
1237         return status;
1238 }
1239
1240 static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1241 {
1242         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1243
1244         if (index)
1245                 return 0;
1246
1247         /*
1248          * The clock divider is based off the hrawclk, and would like to run at
1249          * 2MHz.  So, take the hrawclk value and divide by 2000 and use that
1250          */
1251         return DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq, 2000);
1252 }
1253
1254 static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1255 {
1256         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1257         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1258         u32 freq;
1259
1260         if (index)
1261                 return 0;
1262
1263         /*
1264          * The clock divider is based off the cdclk or PCH rawclk, and would
1265          * like to run at 2MHz.  So, take the cdclk or PCH rawclk value and
1266          * divide by 2000 and use that
1267          */
1268         if (dig_port->aux_ch == AUX_CH_A)
1269                 freq = dev_priv->cdclk.hw.cdclk;
1270         else
1271                 freq = RUNTIME_INFO(dev_priv)->rawclk_freq;
1272         return DIV_ROUND_CLOSEST(freq, 2000);
1273 }
1274
1275 static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1276 {
1277         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1278         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1279
1280         if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) {
1281                 /* Workaround for non-ULT HSW */
1282                 switch (index) {
1283                 case 0: return 63;
1284                 case 1: return 72;
1285                 default: return 0;
1286                 }
1287         }
1288
1289         return ilk_get_aux_clock_divider(intel_dp, index);
1290 }
1291
1292 static u32 skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1293 {
1294         /*
1295          * SKL doesn't need us to program the AUX clock divider (Hardware will
1296          * derive the clock from CDCLK automatically). We still implement the
1297          * get_aux_clock_divider vfunc to plug-in into the existing code.
1298          */
1299         return index ? 0 : 1;
1300 }
1301
1302 static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
1303                                 int send_bytes,
1304                                 u32 aux_clock_divider)
1305 {
1306         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1307         struct drm_i915_private *dev_priv =
1308                         to_i915(dig_port->base.base.dev);
1309         u32 precharge, timeout;
1310
1311         if (IS_GEN(dev_priv, 6))
1312                 precharge = 3;
1313         else
1314                 precharge = 5;
1315
1316         if (IS_BROADWELL(dev_priv))
1317                 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
1318         else
1319                 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
1320
1321         return DP_AUX_CH_CTL_SEND_BUSY |
1322                DP_AUX_CH_CTL_DONE |
1323                DP_AUX_CH_CTL_INTERRUPT |
1324                DP_AUX_CH_CTL_TIME_OUT_ERROR |
1325                timeout |
1326                DP_AUX_CH_CTL_RECEIVE_ERROR |
1327                (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
1328                (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
1329                (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
1330 }
1331
1332 static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
1333                                 int send_bytes,
1334                                 u32 unused)
1335 {
1336         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1337         struct drm_i915_private *i915 =
1338                         to_i915(dig_port->base.base.dev);
1339         enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
1340         u32 ret;
1341
1342         ret = DP_AUX_CH_CTL_SEND_BUSY |
1343               DP_AUX_CH_CTL_DONE |
1344               DP_AUX_CH_CTL_INTERRUPT |
1345               DP_AUX_CH_CTL_TIME_OUT_ERROR |
1346               DP_AUX_CH_CTL_TIME_OUT_MAX |
1347               DP_AUX_CH_CTL_RECEIVE_ERROR |
1348               (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
1349               DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
1350               DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
1351
1352         if (intel_phy_is_tc(i915, phy) &&
1353             dig_port->tc_mode == TC_PORT_TBT_ALT)
1354                 ret |= DP_AUX_CH_CTL_TBT_IO;
1355
1356         return ret;
1357 }
1358
1359 static int
1360 intel_dp_aux_xfer(struct intel_dp *intel_dp,
1361                   const u8 *send, int send_bytes,
1362                   u8 *recv, int recv_size,
1363                   u32 aux_send_ctl_flags)
1364 {
1365         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1366         struct drm_i915_private *i915 =
1367                         to_i915(dig_port->base.base.dev);
1368         struct intel_uncore *uncore = &i915->uncore;
1369         enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
1370         bool is_tc_port = intel_phy_is_tc(i915, phy);
1371         i915_reg_t ch_ctl, ch_data[5];
1372         u32 aux_clock_divider;
1373         enum intel_display_power_domain aux_domain;
1374         intel_wakeref_t aux_wakeref;
1375         intel_wakeref_t pps_wakeref;
1376         int i, ret, recv_bytes;
1377         int try, clock = 0;
1378         u32 status;
1379         bool vdd;
1380
1381         ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
1382         for (i = 0; i < ARRAY_SIZE(ch_data); i++)
1383                 ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i);
1384
1385         if (is_tc_port)
1386                 intel_tc_port_lock(dig_port);
1387
1388         aux_domain = intel_aux_power_domain(dig_port);
1389
1390         aux_wakeref = intel_display_power_get(i915, aux_domain);
1391         pps_wakeref = pps_lock(intel_dp);
1392
1393         /*
1394          * We will be called with VDD already enabled for dpcd/edid/oui reads.
1395          * In such cases we want to leave VDD enabled and it's up to upper layers
1396          * to turn it off. But for eg. i2c-dev access we need to turn it on/off
1397          * ourselves.
1398          */
1399         vdd = edp_panel_vdd_on(intel_dp);
1400
1401         /* dp aux is extremely sensitive to irq latency, hence request the
1402          * lowest possible wakeup latency and so prevent the cpu from going into
1403          * deep sleep states.
1404          */
1405         cpu_latency_qos_update_request(&i915->pm_qos, 0);
1406
1407         intel_dp_check_edp(intel_dp);
1408
1409         /* Try to wait for any previous AUX channel activity */
1410         for (try = 0; try < 3; try++) {
1411                 status = intel_uncore_read_notrace(uncore, ch_ctl);
1412                 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
1413                         break;
1414                 msleep(1);
1415         }
1416         /* just trace the final value */
1417         trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
1418
1419         if (try == 3) {
1420                 const u32 status = intel_uncore_read(uncore, ch_ctl);
1421
1422                 if (status != intel_dp->aux_busy_last_status) {
1423                         drm_WARN(&i915->drm, 1,
1424                                  "%s: not started (status 0x%08x)\n",
1425                                  intel_dp->aux.name, status);
1426                         intel_dp->aux_busy_last_status = status;
1427                 }
1428
1429                 ret = -EBUSY;
1430                 goto out;
1431         }
1432
1433         /* Only 5 data registers! */
1434         if (drm_WARN_ON(&i915->drm, send_bytes > 20 || recv_size > 20)) {
1435                 ret = -E2BIG;
1436                 goto out;
1437         }
1438
1439         while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
1440                 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
1441                                                           send_bytes,
1442                                                           aux_clock_divider);
1443
1444                 send_ctl |= aux_send_ctl_flags;
1445
1446                 /* Must try at least 3 times according to DP spec */
1447                 for (try = 0; try < 5; try++) {
1448                         /* Load the send data into the aux channel data registers */
1449                         for (i = 0; i < send_bytes; i += 4)
1450                                 intel_uncore_write(uncore,
1451                                                    ch_data[i >> 2],
1452                                                    intel_dp_pack_aux(send + i,
1453                                                                      send_bytes - i));
1454
1455                         /* Send the command and wait for it to complete */
1456                         intel_uncore_write(uncore, ch_ctl, send_ctl);
1457
1458                         status = intel_dp_aux_wait_done(intel_dp);
1459
1460                         /* Clear done status and any errors */
1461                         intel_uncore_write(uncore,
1462                                            ch_ctl,
1463                                            status |
1464                                            DP_AUX_CH_CTL_DONE |
1465                                            DP_AUX_CH_CTL_TIME_OUT_ERROR |
1466                                            DP_AUX_CH_CTL_RECEIVE_ERROR);
1467
1468                         /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
1469                          *   400us delay required for errors and timeouts
1470                          *   Timeout errors from the HW already meet this
1471                          *   requirement so skip to next iteration
1472                          */
1473                         if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
1474                                 continue;
1475
1476                         if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1477                                 usleep_range(400, 500);
1478                                 continue;
1479                         }
1480                         if (status & DP_AUX_CH_CTL_DONE)
1481                                 goto done;
1482                 }
1483         }
1484
1485         if ((status & DP_AUX_CH_CTL_DONE) == 0) {
1486                 drm_err(&i915->drm, "%s: not done (status 0x%08x)\n",
1487                         intel_dp->aux.name, status);
1488                 ret = -EBUSY;
1489                 goto out;
1490         }
1491
1492 done:
1493         /* Check for timeout or receive error.
1494          * Timeouts occur when the sink is not connected
1495          */
1496         if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1497                 drm_err(&i915->drm, "%s: receive error (status 0x%08x)\n",
1498                         intel_dp->aux.name, status);
1499                 ret = -EIO;
1500                 goto out;
1501         }
1502
1503         /* Timeouts occur when the device isn't connected, so they're
1504          * "normal" -- don't fill the kernel log with these */
1505         if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
1506                 drm_dbg_kms(&i915->drm, "%s: timeout (status 0x%08x)\n",
1507                             intel_dp->aux.name, status);
1508                 ret = -ETIMEDOUT;
1509                 goto out;
1510         }
1511
1512         /* Unload any bytes sent back from the other side */
1513         recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
1514                       DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
1515
1516         /*
1517          * By BSpec: "Message sizes of 0 or >20 are not allowed."
1518          * We have no idea of what happened so we return -EBUSY so
1519          * drm layer takes care for the necessary retries.
1520          */
1521         if (recv_bytes == 0 || recv_bytes > 20) {
1522                 drm_dbg_kms(&i915->drm,
1523                             "%s: Forbidden recv_bytes = %d on aux transaction\n",
1524                             intel_dp->aux.name, recv_bytes);
1525                 ret = -EBUSY;
1526                 goto out;
1527         }
1528
1529         if (recv_bytes > recv_size)
1530                 recv_bytes = recv_size;
1531
1532         for (i = 0; i < recv_bytes; i += 4)
1533                 intel_dp_unpack_aux(intel_uncore_read(uncore, ch_data[i >> 2]),
1534                                     recv + i, recv_bytes - i);
1535
1536         ret = recv_bytes;
1537 out:
1538         cpu_latency_qos_update_request(&i915->pm_qos, PM_QOS_DEFAULT_VALUE);
1539
1540         if (vdd)
1541                 edp_panel_vdd_off(intel_dp, false);
1542
1543         pps_unlock(intel_dp, pps_wakeref);
1544         intel_display_power_put_async(i915, aux_domain, aux_wakeref);
1545
1546         if (is_tc_port)
1547                 intel_tc_port_unlock(dig_port);
1548
1549         return ret;
1550 }
1551
1552 #define BARE_ADDRESS_SIZE       3
1553 #define HEADER_SIZE             (BARE_ADDRESS_SIZE + 1)
1554
1555 static void
1556 intel_dp_aux_header(u8 txbuf[HEADER_SIZE],
1557                     const struct drm_dp_aux_msg *msg)
1558 {
1559         txbuf[0] = (msg->request << 4) | ((msg->address >> 16) & 0xf);
1560         txbuf[1] = (msg->address >> 8) & 0xff;
1561         txbuf[2] = msg->address & 0xff;
1562         txbuf[3] = msg->size - 1;
1563 }
1564
1565 static u32 intel_dp_aux_xfer_flags(const struct drm_dp_aux_msg *msg)
1566 {
1567         /*
1568          * If we're trying to send the HDCP Aksv, we need to set a the Aksv
1569          * select bit to inform the hardware to send the Aksv after our header
1570          * since we can't access that data from software.
1571          */
1572         if ((msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_NATIVE_WRITE &&
1573             msg->address == DP_AUX_HDCP_AKSV)
1574                 return DP_AUX_CH_CTL_AUX_AKSV_SELECT;
1575
1576         return 0;
1577 }
1578
1579 static ssize_t
1580 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
1581 {
1582         struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
1583         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1584         u8 txbuf[20], rxbuf[20];
1585         size_t txsize, rxsize;
1586         u32 flags = intel_dp_aux_xfer_flags(msg);
1587         int ret;
1588
1589         intel_dp_aux_header(txbuf, msg);
1590
1591         switch (msg->request & ~DP_AUX_I2C_MOT) {
1592         case DP_AUX_NATIVE_WRITE:
1593         case DP_AUX_I2C_WRITE:
1594         case DP_AUX_I2C_WRITE_STATUS_UPDATE:
1595                 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
1596                 rxsize = 2; /* 0 or 1 data bytes */
1597
1598                 if (drm_WARN_ON(&i915->drm, txsize > 20))
1599                         return -E2BIG;
1600
1601                 drm_WARN_ON(&i915->drm, !msg->buffer != !msg->size);
1602
1603                 if (msg->buffer)
1604                         memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
1605
1606                 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
1607                                         rxbuf, rxsize, flags);
1608                 if (ret > 0) {
1609                         msg->reply = rxbuf[0] >> 4;
1610
1611                         if (ret > 1) {
1612                                 /* Number of bytes written in a short write. */
1613                                 ret = clamp_t(int, rxbuf[1], 0, msg->size);
1614                         } else {
1615                                 /* Return payload size. */
1616                                 ret = msg->size;
1617                         }
1618                 }
1619                 break;
1620
1621         case DP_AUX_NATIVE_READ:
1622         case DP_AUX_I2C_READ:
1623                 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
1624                 rxsize = msg->size + 1;
1625
1626                 if (drm_WARN_ON(&i915->drm, rxsize > 20))
1627                         return -E2BIG;
1628
1629                 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
1630                                         rxbuf, rxsize, flags);
1631                 if (ret > 0) {
1632                         msg->reply = rxbuf[0] >> 4;
1633                         /*
1634                          * Assume happy day, and copy the data. The caller is
1635                          * expected to check msg->reply before touching it.
1636                          *
1637                          * Return payload size.
1638                          */
1639                         ret--;
1640                         memcpy(msg->buffer, rxbuf + 1, ret);
1641                 }
1642                 break;
1643
1644         default:
1645                 ret = -EINVAL;
1646                 break;
1647         }
1648
1649         return ret;
1650 }
1651
1652
1653 static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp)
1654 {
1655         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1656         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1657         enum aux_ch aux_ch = dig_port->aux_ch;
1658
1659         switch (aux_ch) {
1660         case AUX_CH_B:
1661         case AUX_CH_C:
1662         case AUX_CH_D:
1663                 return DP_AUX_CH_CTL(aux_ch);
1664         default:
1665                 MISSING_CASE(aux_ch);
1666                 return DP_AUX_CH_CTL(AUX_CH_B);
1667         }
1668 }
1669
1670 static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index)
1671 {
1672         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1673         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1674         enum aux_ch aux_ch = dig_port->aux_ch;
1675
1676         switch (aux_ch) {
1677         case AUX_CH_B:
1678         case AUX_CH_C:
1679         case AUX_CH_D:
1680                 return DP_AUX_CH_DATA(aux_ch, index);
1681         default:
1682                 MISSING_CASE(aux_ch);
1683                 return DP_AUX_CH_DATA(AUX_CH_B, index);
1684         }
1685 }
1686
1687 static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp)
1688 {
1689         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1690         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1691         enum aux_ch aux_ch = dig_port->aux_ch;
1692
1693         switch (aux_ch) {
1694         case AUX_CH_A:
1695                 return DP_AUX_CH_CTL(aux_ch);
1696         case AUX_CH_B:
1697         case AUX_CH_C:
1698         case AUX_CH_D:
1699                 return PCH_DP_AUX_CH_CTL(aux_ch);
1700         default:
1701                 MISSING_CASE(aux_ch);
1702                 return DP_AUX_CH_CTL(AUX_CH_A);
1703         }
1704 }
1705
1706 static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index)
1707 {
1708         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1709         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1710         enum aux_ch aux_ch = dig_port->aux_ch;
1711
1712         switch (aux_ch) {
1713         case AUX_CH_A:
1714                 return DP_AUX_CH_DATA(aux_ch, index);
1715         case AUX_CH_B:
1716         case AUX_CH_C:
1717         case AUX_CH_D:
1718                 return PCH_DP_AUX_CH_DATA(aux_ch, index);
1719         default:
1720                 MISSING_CASE(aux_ch);
1721                 return DP_AUX_CH_DATA(AUX_CH_A, index);
1722         }
1723 }
1724
1725 static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
1726 {
1727         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1728         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1729         enum aux_ch aux_ch = dig_port->aux_ch;
1730
1731         switch (aux_ch) {
1732         case AUX_CH_A:
1733         case AUX_CH_B:
1734         case AUX_CH_C:
1735         case AUX_CH_D:
1736         case AUX_CH_E:
1737         case AUX_CH_F:
1738         case AUX_CH_G:
1739                 return DP_AUX_CH_CTL(aux_ch);
1740         default:
1741                 MISSING_CASE(aux_ch);
1742                 return DP_AUX_CH_CTL(AUX_CH_A);
1743         }
1744 }
1745
1746 static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index)
1747 {
1748         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1749         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1750         enum aux_ch aux_ch = dig_port->aux_ch;
1751
1752         switch (aux_ch) {
1753         case AUX_CH_A:
1754         case AUX_CH_B:
1755         case AUX_CH_C:
1756         case AUX_CH_D:
1757         case AUX_CH_E:
1758         case AUX_CH_F:
1759         case AUX_CH_G:
1760                 return DP_AUX_CH_DATA(aux_ch, index);
1761         default:
1762                 MISSING_CASE(aux_ch);
1763                 return DP_AUX_CH_DATA(AUX_CH_A, index);
1764         }
1765 }
1766
1767 static void
1768 intel_dp_aux_fini(struct intel_dp *intel_dp)
1769 {
1770         kfree(intel_dp->aux.name);
1771 }
1772
1773 static void
1774 intel_dp_aux_init(struct intel_dp *intel_dp)
1775 {
1776         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1777         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1778         struct intel_encoder *encoder = &dig_port->base;
1779
1780         if (INTEL_GEN(dev_priv) >= 9) {
1781                 intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg;
1782                 intel_dp->aux_ch_data_reg = skl_aux_data_reg;
1783         } else if (HAS_PCH_SPLIT(dev_priv)) {
1784                 intel_dp->aux_ch_ctl_reg = ilk_aux_ctl_reg;
1785                 intel_dp->aux_ch_data_reg = ilk_aux_data_reg;
1786         } else {
1787                 intel_dp->aux_ch_ctl_reg = g4x_aux_ctl_reg;
1788                 intel_dp->aux_ch_data_reg = g4x_aux_data_reg;
1789         }
1790
1791         if (INTEL_GEN(dev_priv) >= 9)
1792                 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
1793         else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
1794                 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
1795         else if (HAS_PCH_SPLIT(dev_priv))
1796                 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
1797         else
1798                 intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
1799
1800         if (INTEL_GEN(dev_priv) >= 9)
1801                 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
1802         else
1803                 intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
1804
1805         drm_dp_aux_init(&intel_dp->aux);
1806
1807         /* Failure to allocate our preferred name is not critical */
1808         intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX %c/port %c",
1809                                        aux_ch_name(dig_port->aux_ch),
1810                                        port_name(encoder->port));
1811         intel_dp->aux.transfer = intel_dp_aux_transfer;
1812 }
1813
1814 bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
1815 {
1816         int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1];
1817
1818         return max_rate >= 540000;
1819 }
1820
1821 bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp)
1822 {
1823         int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1];
1824
1825         return max_rate >= 810000;
1826 }
1827
1828 static void
1829 intel_dp_set_clock(struct intel_encoder *encoder,
1830                    struct intel_crtc_state *pipe_config)
1831 {
1832         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1833         const struct dp_link_dpll *divisor = NULL;
1834         int i, count = 0;
1835
1836         if (IS_G4X(dev_priv)) {
1837                 divisor = g4x_dpll;
1838                 count = ARRAY_SIZE(g4x_dpll);
1839         } else if (HAS_PCH_SPLIT(dev_priv)) {
1840                 divisor = pch_dpll;
1841                 count = ARRAY_SIZE(pch_dpll);
1842         } else if (IS_CHERRYVIEW(dev_priv)) {
1843                 divisor = chv_dpll;
1844                 count = ARRAY_SIZE(chv_dpll);
1845         } else if (IS_VALLEYVIEW(dev_priv)) {
1846                 divisor = vlv_dpll;
1847                 count = ARRAY_SIZE(vlv_dpll);
1848         }
1849
1850         if (divisor && count) {
1851                 for (i = 0; i < count; i++) {
1852                         if (pipe_config->port_clock == divisor[i].clock) {
1853                                 pipe_config->dpll = divisor[i].dpll;
1854                                 pipe_config->clock_set = true;
1855                                 break;
1856                         }
1857                 }
1858         }
1859 }
1860
1861 static void snprintf_int_array(char *str, size_t len,
1862                                const int *array, int nelem)
1863 {
1864         int i;
1865
1866         str[0] = '\0';
1867
1868         for (i = 0; i < nelem; i++) {
1869                 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1870                 if (r >= len)
1871                         return;
1872                 str += r;
1873                 len -= r;
1874         }
1875 }
1876
1877 static void intel_dp_print_rates(struct intel_dp *intel_dp)
1878 {
1879         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1880         char str[128]; /* FIXME: too big for stack? */
1881
1882         if (!drm_debug_enabled(DRM_UT_KMS))
1883                 return;
1884
1885         snprintf_int_array(str, sizeof(str),
1886                            intel_dp->source_rates, intel_dp->num_source_rates);
1887         drm_dbg_kms(&i915->drm, "source rates: %s\n", str);
1888
1889         snprintf_int_array(str, sizeof(str),
1890                            intel_dp->sink_rates, intel_dp->num_sink_rates);
1891         drm_dbg_kms(&i915->drm, "sink rates: %s\n", str);
1892
1893         snprintf_int_array(str, sizeof(str),
1894                            intel_dp->common_rates, intel_dp->num_common_rates);
1895         drm_dbg_kms(&i915->drm, "common rates: %s\n", str);
1896 }
1897
1898 int
1899 intel_dp_max_link_rate(struct intel_dp *intel_dp)
1900 {
1901         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1902         int len;
1903
1904         len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->max_link_rate);
1905         if (drm_WARN_ON(&i915->drm, len <= 0))
1906                 return 162000;
1907
1908         return intel_dp->common_rates[len - 1];
1909 }
1910
1911 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1912 {
1913         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1914         int i = intel_dp_rate_index(intel_dp->sink_rates,
1915                                     intel_dp->num_sink_rates, rate);
1916
1917         if (drm_WARN_ON(&i915->drm, i < 0))
1918                 i = 0;
1919
1920         return i;
1921 }
1922
1923 void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1924                            u8 *link_bw, u8 *rate_select)
1925 {
1926         /* eDP 1.4 rate select method. */
1927         if (intel_dp->use_rate_select) {
1928                 *link_bw = 0;
1929                 *rate_select =
1930                         intel_dp_rate_select(intel_dp, port_clock);
1931         } else {
1932                 *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1933                 *rate_select = 0;
1934         }
1935 }
1936
1937 static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp,
1938                                          const struct intel_crtc_state *pipe_config)
1939 {
1940         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1941
1942         /* On TGL, FEC is supported on all Pipes */
1943         if (INTEL_GEN(dev_priv) >= 12)
1944                 return true;
1945
1946         if (IS_GEN(dev_priv, 11) && pipe_config->cpu_transcoder != TRANSCODER_A)
1947                 return true;
1948
1949         return false;
1950 }
1951
1952 static bool intel_dp_supports_fec(struct intel_dp *intel_dp,
1953                                   const struct intel_crtc_state *pipe_config)
1954 {
1955         return intel_dp_source_supports_fec(intel_dp, pipe_config) &&
1956                 drm_dp_sink_supports_fec(intel_dp->fec_capable);
1957 }
1958
1959 static bool intel_dp_supports_dsc(struct intel_dp *intel_dp,
1960                                   const struct intel_crtc_state *crtc_state)
1961 {
1962         struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
1963
1964         if (!intel_dp_is_edp(intel_dp) && !crtc_state->fec_enable)
1965                 return false;
1966
1967         return intel_dsc_source_support(encoder, crtc_state) &&
1968                 drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd);
1969 }
1970
1971 static int intel_dp_compute_bpp(struct intel_dp *intel_dp,
1972                                 struct intel_crtc_state *pipe_config)
1973 {
1974         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1975         struct intel_connector *intel_connector = intel_dp->attached_connector;
1976         int bpp, bpc;
1977
1978         bpp = pipe_config->pipe_bpp;
1979         bpc = drm_dp_downstream_max_bpc(intel_dp->dpcd, intel_dp->downstream_ports);
1980
1981         if (bpc > 0)
1982                 bpp = min(bpp, 3*bpc);
1983
1984         if (intel_dp_is_edp(intel_dp)) {
1985                 /* Get bpp from vbt only for panels that dont have bpp in edid */
1986                 if (intel_connector->base.display_info.bpc == 0 &&
1987                     dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp) {
1988                         drm_dbg_kms(&dev_priv->drm,
1989                                     "clamping bpp for eDP panel to BIOS-provided %i\n",
1990                                     dev_priv->vbt.edp.bpp);
1991                         bpp = dev_priv->vbt.edp.bpp;
1992                 }
1993         }
1994
1995         return bpp;
1996 }
1997
1998 /* Adjust link config limits based on compliance test requests. */
1999 void
2000 intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
2001                                   struct intel_crtc_state *pipe_config,
2002                                   struct link_config_limits *limits)
2003 {
2004         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2005
2006         /* For DP Compliance we override the computed bpp for the pipe */
2007         if (intel_dp->compliance.test_data.bpc != 0) {
2008                 int bpp = 3 * intel_dp->compliance.test_data.bpc;
2009
2010                 limits->min_bpp = limits->max_bpp = bpp;
2011                 pipe_config->dither_force_disable = bpp == 6 * 3;
2012
2013                 drm_dbg_kms(&i915->drm, "Setting pipe_bpp to %d\n", bpp);
2014         }
2015
2016         /* Use values requested by Compliance Test Request */
2017         if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
2018                 int index;
2019
2020                 /* Validate the compliance test data since max values
2021                  * might have changed due to link train fallback.
2022                  */
2023                 if (intel_dp_link_params_valid(intel_dp, intel_dp->compliance.test_link_rate,
2024                                                intel_dp->compliance.test_lane_count)) {
2025                         index = intel_dp_rate_index(intel_dp->common_rates,
2026                                                     intel_dp->num_common_rates,
2027                                                     intel_dp->compliance.test_link_rate);
2028                         if (index >= 0)
2029                                 limits->min_clock = limits->max_clock = index;
2030                         limits->min_lane_count = limits->max_lane_count =
2031                                 intel_dp->compliance.test_lane_count;
2032                 }
2033         }
2034 }
2035
2036 static int intel_dp_output_bpp(const struct intel_crtc_state *crtc_state, int bpp)
2037 {
2038         /*
2039          * bpp value was assumed to RGB format. And YCbCr 4:2:0 output
2040          * format of the number of bytes per pixel will be half the number
2041          * of bytes of RGB pixel.
2042          */
2043         if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
2044                 bpp /= 2;
2045
2046         return bpp;
2047 }
2048
2049 /* Optimize link config in order: max bpp, min clock, min lanes */
2050 static int
2051 intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
2052                                   struct intel_crtc_state *pipe_config,
2053                                   const struct link_config_limits *limits)
2054 {
2055         struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
2056         int bpp, clock, lane_count;
2057         int mode_rate, link_clock, link_avail;
2058
2059         for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
2060                 int output_bpp = intel_dp_output_bpp(pipe_config, bpp);
2061
2062                 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
2063                                                    output_bpp);
2064
2065                 for (clock = limits->min_clock; clock <= limits->max_clock; clock++) {
2066                         for (lane_count = limits->min_lane_count;
2067                              lane_count <= limits->max_lane_count;
2068                              lane_count <<= 1) {
2069                                 link_clock = intel_dp->common_rates[clock];
2070                                 link_avail = intel_dp_max_data_rate(link_clock,
2071                                                                     lane_count);
2072
2073                                 if (mode_rate <= link_avail) {
2074                                         pipe_config->lane_count = lane_count;
2075                                         pipe_config->pipe_bpp = bpp;
2076                                         pipe_config->port_clock = link_clock;
2077
2078                                         return 0;
2079                                 }
2080                         }
2081                 }
2082         }
2083
2084         return -EINVAL;
2085 }
2086
2087 static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc)
2088 {
2089         int i, num_bpc;
2090         u8 dsc_bpc[3] = {0};
2091
2092         num_bpc = drm_dp_dsc_sink_supported_input_bpcs(intel_dp->dsc_dpcd,
2093                                                        dsc_bpc);
2094         for (i = 0; i < num_bpc; i++) {
2095                 if (dsc_max_bpc >= dsc_bpc[i])
2096                         return dsc_bpc[i] * 3;
2097         }
2098
2099         return 0;
2100 }
2101
2102 #define DSC_SUPPORTED_VERSION_MIN               1
2103
2104 static int intel_dp_dsc_compute_params(struct intel_encoder *encoder,
2105                                        struct intel_crtc_state *crtc_state)
2106 {
2107         struct drm_i915_private *i915 = to_i915(encoder->base.dev);
2108         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2109         struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
2110         u8 line_buf_depth;
2111         int ret;
2112
2113         ret = intel_dsc_compute_params(encoder, crtc_state);
2114         if (ret)
2115                 return ret;
2116
2117         /*
2118          * Slice Height of 8 works for all currently available panels. So start
2119          * with that if pic_height is an integral multiple of 8. Eventually add
2120          * logic to try multiple slice heights.
2121          */
2122         if (vdsc_cfg->pic_height % 8 == 0)
2123                 vdsc_cfg->slice_height = 8;
2124         else if (vdsc_cfg->pic_height % 4 == 0)
2125                 vdsc_cfg->slice_height = 4;
2126         else
2127                 vdsc_cfg->slice_height = 2;
2128
2129         vdsc_cfg->dsc_version_major =
2130                 (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] &
2131                  DP_DSC_MAJOR_MASK) >> DP_DSC_MAJOR_SHIFT;
2132         vdsc_cfg->dsc_version_minor =
2133                 min(DSC_SUPPORTED_VERSION_MIN,
2134                     (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] &
2135                      DP_DSC_MINOR_MASK) >> DP_DSC_MINOR_SHIFT);
2136
2137         vdsc_cfg->convert_rgb = intel_dp->dsc_dpcd[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT] &
2138                 DP_DSC_RGB;
2139
2140         line_buf_depth = drm_dp_dsc_sink_line_buf_depth(intel_dp->dsc_dpcd);
2141         if (!line_buf_depth) {
2142                 drm_dbg_kms(&i915->drm,
2143                             "DSC Sink Line Buffer Depth invalid\n");
2144                 return -EINVAL;
2145         }
2146
2147         if (vdsc_cfg->dsc_version_minor == 2)
2148                 vdsc_cfg->line_buf_depth = (line_buf_depth == DSC_1_2_MAX_LINEBUF_DEPTH_BITS) ?
2149                         DSC_1_2_MAX_LINEBUF_DEPTH_VAL : line_buf_depth;
2150         else
2151                 vdsc_cfg->line_buf_depth = (line_buf_depth > DSC_1_1_MAX_LINEBUF_DEPTH_BITS) ?
2152                         DSC_1_1_MAX_LINEBUF_DEPTH_BITS : line_buf_depth;
2153
2154         vdsc_cfg->block_pred_enable =
2155                 intel_dp->dsc_dpcd[DP_DSC_BLK_PREDICTION_SUPPORT - DP_DSC_SUPPORT] &
2156                 DP_DSC_BLK_PREDICTION_IS_SUPPORTED;
2157
2158         return drm_dsc_compute_rc_parameters(vdsc_cfg);
2159 }
2160
2161 static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
2162                                        struct intel_crtc_state *pipe_config,
2163                                        struct drm_connector_state *conn_state,
2164                                        struct link_config_limits *limits)
2165 {
2166         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2167         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
2168         const struct drm_display_mode *adjusted_mode =
2169                 &pipe_config->hw.adjusted_mode;
2170         u8 dsc_max_bpc;
2171         int pipe_bpp;
2172         int ret;
2173
2174         pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) &&
2175                 intel_dp_supports_fec(intel_dp, pipe_config);
2176
2177         if (!intel_dp_supports_dsc(intel_dp, pipe_config))
2178                 return -EINVAL;
2179
2180         /* Max DSC Input BPC for ICL is 10 and for TGL+ is 12 */
2181         if (INTEL_GEN(dev_priv) >= 12)
2182                 dsc_max_bpc = min_t(u8, 12, conn_state->max_requested_bpc);
2183         else
2184                 dsc_max_bpc = min_t(u8, 10,
2185                                     conn_state->max_requested_bpc);
2186
2187         pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, dsc_max_bpc);
2188
2189         /* Min Input BPC for ICL+ is 8 */
2190         if (pipe_bpp < 8 * 3) {
2191                 drm_dbg_kms(&dev_priv->drm,
2192                             "No DSC support for less than 8bpc\n");
2193                 return -EINVAL;
2194         }
2195
2196         /*
2197          * For now enable DSC for max bpp, max link rate, max lane count.
2198          * Optimize this later for the minimum possible link rate/lane count
2199          * with DSC enabled for the requested mode.
2200          */
2201         pipe_config->pipe_bpp = pipe_bpp;
2202         pipe_config->port_clock = intel_dp->common_rates[limits->max_clock];
2203         pipe_config->lane_count = limits->max_lane_count;
2204
2205         if (intel_dp_is_edp(intel_dp)) {
2206                 pipe_config->dsc.compressed_bpp =
2207                         min_t(u16, drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4,
2208                               pipe_config->pipe_bpp);
2209                 pipe_config->dsc.slice_count =
2210                         drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
2211                                                         true);
2212         } else {
2213                 u16 dsc_max_output_bpp;
2214                 u8 dsc_dp_slice_count;
2215
2216                 dsc_max_output_bpp =
2217                         intel_dp_dsc_get_output_bpp(dev_priv,
2218                                                     pipe_config->port_clock,
2219                                                     pipe_config->lane_count,
2220                                                     adjusted_mode->crtc_clock,
2221                                                     adjusted_mode->crtc_hdisplay);
2222                 dsc_dp_slice_count =
2223                         intel_dp_dsc_get_slice_count(intel_dp,
2224                                                      adjusted_mode->crtc_clock,
2225                                                      adjusted_mode->crtc_hdisplay);
2226                 if (!dsc_max_output_bpp || !dsc_dp_slice_count) {
2227                         drm_dbg_kms(&dev_priv->drm,
2228                                     "Compressed BPP/Slice Count not supported\n");
2229                         return -EINVAL;
2230                 }
2231                 pipe_config->dsc.compressed_bpp = min_t(u16,
2232                                                                dsc_max_output_bpp >> 4,
2233                                                                pipe_config->pipe_bpp);
2234                 pipe_config->dsc.slice_count = dsc_dp_slice_count;
2235         }
2236         /*
2237          * VDSC engine operates at 1 Pixel per clock, so if peak pixel rate
2238          * is greater than the maximum Cdclock and if slice count is even
2239          * then we need to use 2 VDSC instances.
2240          */
2241         if (adjusted_mode->crtc_clock > dev_priv->max_cdclk_freq) {
2242                 if (pipe_config->dsc.slice_count > 1) {
2243                         pipe_config->dsc.dsc_split = true;
2244                 } else {
2245                         drm_dbg_kms(&dev_priv->drm,
2246                                     "Cannot split stream to use 2 VDSC instances\n");
2247                         return -EINVAL;
2248                 }
2249         }
2250
2251         ret = intel_dp_dsc_compute_params(&dig_port->base, pipe_config);
2252         if (ret < 0) {
2253                 drm_dbg_kms(&dev_priv->drm,
2254                             "Cannot compute valid DSC parameters for Input Bpp = %d "
2255                             "Compressed BPP = %d\n",
2256                             pipe_config->pipe_bpp,
2257                             pipe_config->dsc.compressed_bpp);
2258                 return ret;
2259         }
2260
2261         pipe_config->dsc.compression_enable = true;
2262         drm_dbg_kms(&dev_priv->drm, "DP DSC computed with Input Bpp = %d "
2263                     "Compressed Bpp = %d Slice Count = %d\n",
2264                     pipe_config->pipe_bpp,
2265                     pipe_config->dsc.compressed_bpp,
2266                     pipe_config->dsc.slice_count);
2267
2268         return 0;
2269 }
2270
2271 int intel_dp_min_bpp(const struct intel_crtc_state *crtc_state)
2272 {
2273         if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_RGB)
2274                 return 6 * 3;
2275         else
2276                 return 8 * 3;
2277 }
2278
2279 static int
2280 intel_dp_compute_link_config(struct intel_encoder *encoder,
2281                              struct intel_crtc_state *pipe_config,
2282                              struct drm_connector_state *conn_state)
2283 {
2284         struct drm_i915_private *i915 = to_i915(encoder->base.dev);
2285         const struct drm_display_mode *adjusted_mode =
2286                 &pipe_config->hw.adjusted_mode;
2287         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2288         struct link_config_limits limits;
2289         int common_len;
2290         int ret;
2291
2292         common_len = intel_dp_common_len_rate_limit(intel_dp,
2293                                                     intel_dp->max_link_rate);
2294
2295         /* No common link rates between source and sink */
2296         drm_WARN_ON(encoder->base.dev, common_len <= 0);
2297
2298         limits.min_clock = 0;
2299         limits.max_clock = common_len - 1;
2300
2301         limits.min_lane_count = 1;
2302         limits.max_lane_count = intel_dp_max_lane_count(intel_dp);
2303
2304         limits.min_bpp = intel_dp_min_bpp(pipe_config);
2305         limits.max_bpp = intel_dp_compute_bpp(intel_dp, pipe_config);
2306
2307         if (intel_dp_is_edp(intel_dp)) {
2308                 /*
2309                  * Use the maximum clock and number of lanes the eDP panel
2310                  * advertizes being capable of. The panels are generally
2311                  * designed to support only a single clock and lane
2312                  * configuration, and typically these values correspond to the
2313                  * native resolution of the panel.
2314                  */
2315                 limits.min_lane_count = limits.max_lane_count;
2316                 limits.min_clock = limits.max_clock;
2317         }
2318
2319         intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits);
2320
2321         drm_dbg_kms(&i915->drm, "DP link computation with max lane count %i "
2322                     "max rate %d max bpp %d pixel clock %iKHz\n",
2323                     limits.max_lane_count,
2324                     intel_dp->common_rates[limits.max_clock],
2325                     limits.max_bpp, adjusted_mode->crtc_clock);
2326
2327         /*
2328          * Optimize for slow and wide. This is the place to add alternative
2329          * optimization policy.
2330          */
2331         ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits);
2332
2333         /* enable compression if the mode doesn't fit available BW */
2334         drm_dbg_kms(&i915->drm, "Force DSC en = %d\n", intel_dp->force_dsc_en);
2335         if (ret || intel_dp->force_dsc_en) {
2336                 ret = intel_dp_dsc_compute_config(intel_dp, pipe_config,
2337                                                   conn_state, &limits);
2338                 if (ret < 0)
2339                         return ret;
2340         }
2341
2342         if (pipe_config->dsc.compression_enable) {
2343                 drm_dbg_kms(&i915->drm,
2344                             "DP lane count %d clock %d Input bpp %d Compressed bpp %d\n",
2345                             pipe_config->lane_count, pipe_config->port_clock,
2346                             pipe_config->pipe_bpp,
2347                             pipe_config->dsc.compressed_bpp);
2348
2349                 drm_dbg_kms(&i915->drm,
2350                             "DP link rate required %i available %i\n",
2351                             intel_dp_link_required(adjusted_mode->crtc_clock,
2352                                                    pipe_config->dsc.compressed_bpp),
2353                             intel_dp_max_data_rate(pipe_config->port_clock,
2354                                                    pipe_config->lane_count));
2355         } else {
2356                 drm_dbg_kms(&i915->drm, "DP lane count %d clock %d bpp %d\n",
2357                             pipe_config->lane_count, pipe_config->port_clock,
2358                             pipe_config->pipe_bpp);
2359
2360                 drm_dbg_kms(&i915->drm,
2361                             "DP link rate required %i available %i\n",
2362                             intel_dp_link_required(adjusted_mode->crtc_clock,
2363                                                    pipe_config->pipe_bpp),
2364                             intel_dp_max_data_rate(pipe_config->port_clock,
2365                                                    pipe_config->lane_count));
2366         }
2367         return 0;
2368 }
2369
2370 static int
2371 intel_dp_ycbcr420_config(struct intel_dp *intel_dp,
2372                          struct intel_crtc_state *crtc_state,
2373                          const struct drm_connector_state *conn_state)
2374 {
2375         struct drm_connector *connector = conn_state->connector;
2376         const struct drm_display_info *info = &connector->display_info;
2377         const struct drm_display_mode *adjusted_mode =
2378                 &crtc_state->hw.adjusted_mode;
2379
2380         if (!drm_mode_is_420_only(info, adjusted_mode) ||
2381             !intel_dp_get_colorimetry_status(intel_dp) ||
2382             !connector->ycbcr_420_allowed)
2383                 return 0;
2384
2385         crtc_state->output_format = INTEL_OUTPUT_FORMAT_YCBCR420;
2386
2387         return intel_pch_panel_fitting(crtc_state, conn_state);
2388 }
2389
2390 bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
2391                                   const struct drm_connector_state *conn_state)
2392 {
2393         const struct intel_digital_connector_state *intel_conn_state =
2394                 to_intel_digital_connector_state(conn_state);
2395         const struct drm_display_mode *adjusted_mode =
2396                 &crtc_state->hw.adjusted_mode;
2397
2398         /*
2399          * Our YCbCr output is always limited range.
2400          * crtc_state->limited_color_range only applies to RGB,
2401          * and it must never be set for YCbCr or we risk setting
2402          * some conflicting bits in PIPECONF which will mess up
2403          * the colors on the monitor.
2404          */
2405         if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
2406                 return false;
2407
2408         if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
2409                 /*
2410                  * See:
2411                  * CEA-861-E - 5.1 Default Encoding Parameters
2412                  * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
2413                  */
2414                 return crtc_state->pipe_bpp != 18 &&
2415                         drm_default_rgb_quant_range(adjusted_mode) ==
2416                         HDMI_QUANTIZATION_RANGE_LIMITED;
2417         } else {
2418                 return intel_conn_state->broadcast_rgb ==
2419                         INTEL_BROADCAST_RGB_LIMITED;
2420         }
2421 }
2422
2423 static bool intel_dp_port_has_audio(struct drm_i915_private *dev_priv,
2424                                     enum port port)
2425 {
2426         if (IS_G4X(dev_priv))
2427                 return false;
2428         if (INTEL_GEN(dev_priv) < 12 && port == PORT_A)
2429                 return false;
2430
2431         return true;
2432 }
2433
2434 static void intel_dp_compute_vsc_colorimetry(const struct intel_crtc_state *crtc_state,
2435                                              const struct drm_connector_state *conn_state,
2436                                              struct drm_dp_vsc_sdp *vsc)
2437 {
2438         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2439         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2440
2441         /*
2442          * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118
2443          * VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/
2444          * Colorimetry Format indication.
2445          */
2446         vsc->revision = 0x5;
2447         vsc->length = 0x13;
2448
2449         /* DP 1.4a spec, Table 2-120 */
2450         switch (crtc_state->output_format) {
2451         case INTEL_OUTPUT_FORMAT_YCBCR444:
2452                 vsc->pixelformat = DP_PIXELFORMAT_YUV444;
2453                 break;
2454         case INTEL_OUTPUT_FORMAT_YCBCR420:
2455                 vsc->pixelformat = DP_PIXELFORMAT_YUV420;
2456                 break;
2457         case INTEL_OUTPUT_FORMAT_RGB:
2458         default:
2459                 vsc->pixelformat = DP_PIXELFORMAT_RGB;
2460         }
2461
2462         switch (conn_state->colorspace) {
2463         case DRM_MODE_COLORIMETRY_BT709_YCC:
2464                 vsc->colorimetry = DP_COLORIMETRY_BT709_YCC;
2465                 break;
2466         case DRM_MODE_COLORIMETRY_XVYCC_601:
2467                 vsc->colorimetry = DP_COLORIMETRY_XVYCC_601;
2468                 break;
2469         case DRM_MODE_COLORIMETRY_XVYCC_709:
2470                 vsc->colorimetry = DP_COLORIMETRY_XVYCC_709;
2471                 break;
2472         case DRM_MODE_COLORIMETRY_SYCC_601:
2473                 vsc->colorimetry = DP_COLORIMETRY_SYCC_601;
2474                 break;
2475         case DRM_MODE_COLORIMETRY_OPYCC_601:
2476                 vsc->colorimetry = DP_COLORIMETRY_OPYCC_601;
2477                 break;
2478         case DRM_MODE_COLORIMETRY_BT2020_CYCC:
2479                 vsc->colorimetry = DP_COLORIMETRY_BT2020_CYCC;
2480                 break;
2481         case DRM_MODE_COLORIMETRY_BT2020_RGB:
2482                 vsc->colorimetry = DP_COLORIMETRY_BT2020_RGB;
2483                 break;
2484         case DRM_MODE_COLORIMETRY_BT2020_YCC:
2485                 vsc->colorimetry = DP_COLORIMETRY_BT2020_YCC;
2486                 break;
2487         case DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65:
2488         case DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER:
2489                 vsc->colorimetry = DP_COLORIMETRY_DCI_P3_RGB;
2490                 break;
2491         default:
2492                 /*
2493                  * RGB->YCBCR color conversion uses the BT.709
2494                  * color space.
2495                  */
2496                 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
2497                         vsc->colorimetry = DP_COLORIMETRY_BT709_YCC;
2498                 else
2499                         vsc->colorimetry = DP_COLORIMETRY_DEFAULT;
2500                 break;
2501         }
2502
2503         vsc->bpc = crtc_state->pipe_bpp / 3;
2504
2505         /* only RGB pixelformat supports 6 bpc */
2506         drm_WARN_ON(&dev_priv->drm,
2507                     vsc->bpc == 6 && vsc->pixelformat != DP_PIXELFORMAT_RGB);
2508
2509         /* all YCbCr are always limited range */
2510         vsc->dynamic_range = DP_DYNAMIC_RANGE_CTA;
2511         vsc->content_type = DP_CONTENT_TYPE_NOT_DEFINED;
2512 }
2513
2514 static void intel_dp_compute_vsc_sdp(struct intel_dp *intel_dp,
2515                                      struct intel_crtc_state *crtc_state,
2516                                      const struct drm_connector_state *conn_state)
2517 {
2518         struct drm_dp_vsc_sdp *vsc = &crtc_state->infoframes.vsc;
2519
2520         /* When a crtc state has PSR, VSC SDP will be handled by PSR routine */
2521         if (crtc_state->has_psr)
2522                 return;
2523
2524         if (!intel_dp_needs_vsc_sdp(crtc_state, conn_state))
2525                 return;
2526
2527         crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
2528         vsc->sdp_type = DP_SDP_VSC;
2529         intel_dp_compute_vsc_colorimetry(crtc_state, conn_state,
2530                                          &crtc_state->infoframes.vsc);
2531 }
2532
2533 void intel_dp_compute_psr_vsc_sdp(struct intel_dp *intel_dp,
2534                                   const struct intel_crtc_state *crtc_state,
2535                                   const struct drm_connector_state *conn_state,
2536                                   struct drm_dp_vsc_sdp *vsc)
2537 {
2538         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2539
2540         vsc->sdp_type = DP_SDP_VSC;
2541
2542         if (dev_priv->psr.psr2_enabled) {
2543                 if (dev_priv->psr.colorimetry_support &&
2544                     intel_dp_needs_vsc_sdp(crtc_state, conn_state)) {
2545                         /* [PSR2, +Colorimetry] */
2546                         intel_dp_compute_vsc_colorimetry(crtc_state, conn_state,
2547                                                          vsc);
2548                 } else {
2549                         /*
2550                          * [PSR2, -Colorimetry]
2551                          * Prepare VSC Header for SU as per eDP 1.4 spec, Table 6-11
2552                          * 3D stereo + PSR/PSR2 + Y-coordinate.
2553                          */
2554                         vsc->revision = 0x4;
2555                         vsc->length = 0xe;
2556                 }
2557         } else {
2558                 /*
2559                  * [PSR1]
2560                  * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118
2561                  * VSC SDP supporting 3D stereo + PSR (applies to eDP v1.3 or
2562                  * higher).
2563                  */
2564                 vsc->revision = 0x2;
2565                 vsc->length = 0x8;
2566         }
2567 }
2568
2569 static void
2570 intel_dp_compute_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp,
2571                                             struct intel_crtc_state *crtc_state,
2572                                             const struct drm_connector_state *conn_state)
2573 {
2574         int ret;
2575         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2576         struct hdmi_drm_infoframe *drm_infoframe = &crtc_state->infoframes.drm.drm;
2577
2578         if (!conn_state->hdr_output_metadata)
2579                 return;
2580
2581         ret = drm_hdmi_infoframe_set_hdr_metadata(drm_infoframe, conn_state);
2582
2583         if (ret) {
2584                 drm_dbg_kms(&dev_priv->drm, "couldn't set HDR metadata in infoframe\n");
2585                 return;
2586         }
2587
2588         crtc_state->infoframes.enable |=
2589                 intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA);
2590 }
2591
2592 static void
2593 intel_dp_drrs_compute_config(struct intel_dp *intel_dp,
2594                              struct intel_crtc_state *pipe_config,
2595                              int output_bpp, bool constant_n)
2596 {
2597         struct intel_connector *intel_connector = intel_dp->attached_connector;
2598         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2599
2600         /*
2601          * DRRS and PSR can't be enable together, so giving preference to PSR
2602          * as it allows more power-savings by complete shutting down display,
2603          * so to guarantee this, intel_dp_drrs_compute_config() must be called
2604          * after intel_psr_compute_config().
2605          */
2606         if (pipe_config->has_psr)
2607                 return;
2608
2609         if (!intel_connector->panel.downclock_mode ||
2610             dev_priv->drrs.type != SEAMLESS_DRRS_SUPPORT)
2611                 return;
2612
2613         pipe_config->has_drrs = true;
2614         intel_link_compute_m_n(output_bpp, pipe_config->lane_count,
2615                                intel_connector->panel.downclock_mode->clock,
2616                                pipe_config->port_clock, &pipe_config->dp_m2_n2,
2617                                constant_n, pipe_config->fec_enable);
2618 }
2619
2620 int
2621 intel_dp_compute_config(struct intel_encoder *encoder,
2622                         struct intel_crtc_state *pipe_config,
2623                         struct drm_connector_state *conn_state)
2624 {
2625         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2626         struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
2627         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2628         struct intel_lspcon *lspcon = enc_to_intel_lspcon(encoder);
2629         enum port port = encoder->port;
2630         struct intel_connector *intel_connector = intel_dp->attached_connector;
2631         struct intel_digital_connector_state *intel_conn_state =
2632                 to_intel_digital_connector_state(conn_state);
2633         bool constant_n = drm_dp_has_quirk(&intel_dp->desc, 0,
2634                                            DP_DPCD_QUIRK_CONSTANT_N);
2635         int ret = 0, output_bpp;
2636
2637         if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A)
2638                 pipe_config->has_pch_encoder = true;
2639
2640         pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
2641
2642         if (lspcon->active)
2643                 lspcon_ycbcr420_config(&intel_connector->base, pipe_config);
2644         else
2645                 ret = intel_dp_ycbcr420_config(intel_dp, pipe_config,
2646                                                conn_state);
2647         if (ret)
2648                 return ret;
2649
2650         if (!intel_dp_port_has_audio(dev_priv, port))
2651                 pipe_config->has_audio = false;
2652         else if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
2653                 pipe_config->has_audio = intel_dp->has_audio;
2654         else
2655                 pipe_config->has_audio = intel_conn_state->force_audio == HDMI_AUDIO_ON;
2656
2657         if (intel_dp_is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
2658                 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
2659                                        adjusted_mode);
2660
2661                 if (HAS_GMCH(dev_priv))
2662                         ret = intel_gmch_panel_fitting(pipe_config, conn_state);
2663                 else
2664                         ret = intel_pch_panel_fitting(pipe_config, conn_state);
2665                 if (ret)
2666                         return ret;
2667         }
2668
2669         if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
2670                 return -EINVAL;
2671
2672         if (HAS_GMCH(dev_priv) &&
2673             adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
2674                 return -EINVAL;
2675
2676         if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
2677                 return -EINVAL;
2678
2679         if (intel_dp_hdisplay_bad(dev_priv, adjusted_mode->crtc_hdisplay))
2680                 return -EINVAL;
2681
2682         ret = intel_dp_compute_link_config(encoder, pipe_config, conn_state);
2683         if (ret < 0)
2684                 return ret;
2685
2686         pipe_config->limited_color_range =
2687                 intel_dp_limited_color_range(pipe_config, conn_state);
2688
2689         if (pipe_config->dsc.compression_enable)
2690                 output_bpp = pipe_config->dsc.compressed_bpp;
2691         else
2692                 output_bpp = intel_dp_output_bpp(pipe_config, pipe_config->pipe_bpp);
2693
2694         intel_link_compute_m_n(output_bpp,
2695                                pipe_config->lane_count,
2696                                adjusted_mode->crtc_clock,
2697                                pipe_config->port_clock,
2698                                &pipe_config->dp_m_n,
2699                                constant_n, pipe_config->fec_enable);
2700
2701         if (!HAS_DDI(dev_priv))
2702                 intel_dp_set_clock(encoder, pipe_config);
2703
2704         intel_psr_compute_config(intel_dp, pipe_config);
2705         intel_dp_drrs_compute_config(intel_dp, pipe_config, output_bpp,
2706                                      constant_n);
2707         intel_dp_compute_vsc_sdp(intel_dp, pipe_config, conn_state);
2708         intel_dp_compute_hdr_metadata_infoframe_sdp(intel_dp, pipe_config, conn_state);
2709
2710         return 0;
2711 }
2712
2713 void intel_dp_set_link_params(struct intel_dp *intel_dp,
2714                               int link_rate, u8 lane_count,
2715                               bool link_mst)
2716 {
2717         intel_dp->link_trained = false;
2718         intel_dp->link_rate = link_rate;
2719         intel_dp->lane_count = lane_count;
2720         intel_dp->link_mst = link_mst;
2721 }
2722
2723 static void intel_dp_prepare(struct intel_encoder *encoder,
2724                              const struct intel_crtc_state *pipe_config)
2725 {
2726         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2727         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2728         enum port port = encoder->port;
2729         struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
2730         const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
2731
2732         intel_dp_set_link_params(intel_dp, pipe_config->port_clock,
2733                                  pipe_config->lane_count,
2734                                  intel_crtc_has_type(pipe_config,
2735                                                      INTEL_OUTPUT_DP_MST));
2736
2737         /*
2738          * There are four kinds of DP registers:
2739          *
2740          *      IBX PCH
2741          *      SNB CPU
2742          *      IVB CPU
2743          *      CPT PCH
2744          *
2745          * IBX PCH and CPU are the same for almost everything,
2746          * except that the CPU DP PLL is configured in this
2747          * register
2748          *
2749          * CPT PCH is quite different, having many bits moved
2750          * to the TRANS_DP_CTL register instead. That
2751          * configuration happens (oddly) in ilk_pch_enable
2752          */
2753
2754         /* Preserve the BIOS-computed detected bit. This is
2755          * supposed to be read-only.
2756          */
2757         intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED;
2758
2759         /* Handle DP bits in common between all three register formats */
2760         intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
2761         intel_dp->DP |= DP_PORT_WIDTH(pipe_config->lane_count);
2762
2763         /* Split out the IBX/CPU vs CPT settings */
2764
2765         if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
2766                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
2767                         intel_dp->DP |= DP_SYNC_HS_HIGH;
2768                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
2769                         intel_dp->DP |= DP_SYNC_VS_HIGH;
2770                 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
2771
2772                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2773                         intel_dp->DP |= DP_ENHANCED_FRAMING;
2774
2775                 intel_dp->DP |= DP_PIPE_SEL_IVB(crtc->pipe);
2776         } else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
2777                 u32 trans_dp;
2778
2779                 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
2780
2781                 trans_dp = intel_de_read(dev_priv, TRANS_DP_CTL(crtc->pipe));
2782                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2783                         trans_dp |= TRANS_DP_ENH_FRAMING;
2784                 else
2785                         trans_dp &= ~TRANS_DP_ENH_FRAMING;
2786                 intel_de_write(dev_priv, TRANS_DP_CTL(crtc->pipe), trans_dp);
2787         } else {
2788                 if (IS_G4X(dev_priv) && pipe_config->limited_color_range)
2789                         intel_dp->DP |= DP_COLOR_RANGE_16_235;
2790
2791                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
2792                         intel_dp->DP |= DP_SYNC_HS_HIGH;
2793                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
2794                         intel_dp->DP |= DP_SYNC_VS_HIGH;
2795                 intel_dp->DP |= DP_LINK_TRAIN_OFF;
2796
2797                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2798                         intel_dp->DP |= DP_ENHANCED_FRAMING;
2799
2800                 if (IS_CHERRYVIEW(dev_priv))
2801                         intel_dp->DP |= DP_PIPE_SEL_CHV(crtc->pipe);
2802                 else
2803                         intel_dp->DP |= DP_PIPE_SEL(crtc->pipe);
2804         }
2805 }
2806
2807 #define IDLE_ON_MASK            (PP_ON | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
2808 #define IDLE_ON_VALUE           (PP_ON | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
2809
2810 #define IDLE_OFF_MASK           (PP_ON | PP_SEQUENCE_MASK | 0                     | 0)
2811 #define IDLE_OFF_VALUE          (0     | PP_SEQUENCE_NONE | 0                     | 0)
2812
2813 #define IDLE_CYCLE_MASK         (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
2814 #define IDLE_CYCLE_VALUE        (0     | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
2815
2816 static void intel_pps_verify_state(struct intel_dp *intel_dp);
2817
2818 static void wait_panel_status(struct intel_dp *intel_dp,
2819                                        u32 mask,
2820                                        u32 value)
2821 {
2822         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2823         i915_reg_t pp_stat_reg, pp_ctrl_reg;
2824
2825         lockdep_assert_held(&dev_priv->pps_mutex);
2826
2827         intel_pps_verify_state(intel_dp);
2828
2829         pp_stat_reg = _pp_stat_reg(intel_dp);
2830         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2831
2832         drm_dbg_kms(&dev_priv->drm,
2833                     "mask %08x value %08x status %08x control %08x\n",
2834                     mask, value,
2835                     intel_de_read(dev_priv, pp_stat_reg),
2836                     intel_de_read(dev_priv, pp_ctrl_reg));
2837
2838         if (intel_de_wait_for_register(dev_priv, pp_stat_reg,
2839                                        mask, value, 5000))
2840                 drm_err(&dev_priv->drm,
2841                         "Panel status timeout: status %08x control %08x\n",
2842                         intel_de_read(dev_priv, pp_stat_reg),
2843                         intel_de_read(dev_priv, pp_ctrl_reg));
2844
2845         drm_dbg_kms(&dev_priv->drm, "Wait complete\n");
2846 }
2847
2848 static void wait_panel_on(struct intel_dp *intel_dp)
2849 {
2850         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2851
2852         drm_dbg_kms(&i915->drm, "Wait for panel power on\n");
2853         wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
2854 }
2855
2856 static void wait_panel_off(struct intel_dp *intel_dp)
2857 {
2858         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2859
2860         drm_dbg_kms(&i915->drm, "Wait for panel power off time\n");
2861         wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
2862 }
2863
2864 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
2865 {
2866         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2867         ktime_t panel_power_on_time;
2868         s64 panel_power_off_duration;
2869
2870         drm_dbg_kms(&i915->drm, "Wait for panel power cycle\n");
2871
2872         /* take the difference of currrent time and panel power off time
2873          * and then make panel wait for t11_t12 if needed. */
2874         panel_power_on_time = ktime_get_boottime();
2875         panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->panel_power_off_time);
2876
2877         /* When we disable the VDD override bit last we have to do the manual
2878          * wait. */
2879         if (panel_power_off_duration < (s64)intel_dp->panel_power_cycle_delay)
2880                 wait_remaining_ms_from_jiffies(jiffies,
2881                                        intel_dp->panel_power_cycle_delay - panel_power_off_duration);
2882
2883         wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
2884 }
2885
2886 static void wait_backlight_on(struct intel_dp *intel_dp)
2887 {
2888         wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
2889                                        intel_dp->backlight_on_delay);
2890 }
2891
2892 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
2893 {
2894         wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
2895                                        intel_dp->backlight_off_delay);
2896 }
2897
2898 /* Read the current pp_control value, unlocking the register if it
2899  * is locked
2900  */
2901
2902 static  u32 ilk_get_pp_control(struct intel_dp *intel_dp)
2903 {
2904         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2905         u32 control;
2906
2907         lockdep_assert_held(&dev_priv->pps_mutex);
2908
2909         control = intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp));
2910         if (drm_WARN_ON(&dev_priv->drm, !HAS_DDI(dev_priv) &&
2911                         (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) {
2912                 control &= ~PANEL_UNLOCK_MASK;
2913                 control |= PANEL_UNLOCK_REGS;
2914         }
2915         return control;
2916 }
2917
2918 /*
2919  * Must be paired with edp_panel_vdd_off().
2920  * Must hold pps_mutex around the whole on/off sequence.
2921  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
2922  */
2923 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
2924 {
2925         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2926         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2927         u32 pp;
2928         i915_reg_t pp_stat_reg, pp_ctrl_reg;
2929         bool need_to_disable = !intel_dp->want_panel_vdd;
2930
2931         lockdep_assert_held(&dev_priv->pps_mutex);
2932
2933         if (!intel_dp_is_edp(intel_dp))
2934                 return false;
2935
2936         cancel_delayed_work(&intel_dp->panel_vdd_work);
2937         intel_dp->want_panel_vdd = true;
2938
2939         if (edp_have_panel_vdd(intel_dp))
2940                 return need_to_disable;
2941
2942         intel_display_power_get(dev_priv,
2943                                 intel_aux_power_domain(dig_port));
2944
2945         drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD on\n",
2946                     dig_port->base.base.base.id,
2947                     dig_port->base.base.name);
2948
2949         if (!edp_have_panel_power(intel_dp))
2950                 wait_panel_power_cycle(intel_dp);
2951
2952         pp = ilk_get_pp_control(intel_dp);
2953         pp |= EDP_FORCE_VDD;
2954
2955         pp_stat_reg = _pp_stat_reg(intel_dp);
2956         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2957
2958         intel_de_write(dev_priv, pp_ctrl_reg, pp);
2959         intel_de_posting_read(dev_priv, pp_ctrl_reg);
2960         drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
2961                     intel_de_read(dev_priv, pp_stat_reg),
2962                     intel_de_read(dev_priv, pp_ctrl_reg));
2963         /*
2964          * If the panel wasn't on, delay before accessing aux channel
2965          */
2966         if (!edp_have_panel_power(intel_dp)) {
2967                 drm_dbg_kms(&dev_priv->drm,
2968                             "[ENCODER:%d:%s] panel power wasn't enabled\n",
2969                             dig_port->base.base.base.id,
2970                             dig_port->base.base.name);
2971                 msleep(intel_dp->panel_power_up_delay);
2972         }
2973
2974         return need_to_disable;
2975 }
2976
2977 /*
2978  * Must be paired with intel_edp_panel_vdd_off() or
2979  * intel_edp_panel_off().
2980  * Nested calls to these functions are not allowed since
2981  * we drop the lock. Caller must use some higher level
2982  * locking to prevent nested calls from other threads.
2983  */
2984 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
2985 {
2986         intel_wakeref_t wakeref;
2987         bool vdd;
2988
2989         if (!intel_dp_is_edp(intel_dp))
2990                 return;
2991
2992         vdd = false;
2993         with_pps_lock(intel_dp, wakeref)
2994                 vdd = edp_panel_vdd_on(intel_dp);
2995         I915_STATE_WARN(!vdd, "[ENCODER:%d:%s] VDD already requested on\n",
2996                         dp_to_dig_port(intel_dp)->base.base.base.id,
2997                         dp_to_dig_port(intel_dp)->base.base.name);
2998 }
2999
3000 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
3001 {
3002         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3003         struct intel_digital_port *dig_port =
3004                 dp_to_dig_port(intel_dp);
3005         u32 pp;
3006         i915_reg_t pp_stat_reg, pp_ctrl_reg;
3007
3008         lockdep_assert_held(&dev_priv->pps_mutex);
3009
3010         drm_WARN_ON(&dev_priv->drm, intel_dp->want_panel_vdd);
3011
3012         if (!edp_have_panel_vdd(intel_dp))
3013                 return;
3014
3015         drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD off\n",
3016                     dig_port->base.base.base.id,
3017                     dig_port->base.base.name);
3018
3019         pp = ilk_get_pp_control(intel_dp);
3020         pp &= ~EDP_FORCE_VDD;
3021
3022         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
3023         pp_stat_reg = _pp_stat_reg(intel_dp);
3024
3025         intel_de_write(dev_priv, pp_ctrl_reg, pp);
3026         intel_de_posting_read(dev_priv, pp_ctrl_reg);
3027
3028         /* Make sure sequencer is idle before allowing subsequent activity */
3029         drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
3030                     intel_de_read(dev_priv, pp_stat_reg),
3031                     intel_de_read(dev_priv, pp_ctrl_reg));
3032
3033         if ((pp & PANEL_POWER_ON) == 0)
3034                 intel_dp->panel_power_off_time = ktime_get_boottime();
3035
3036         intel_display_power_put_unchecked(dev_priv,
3037                                           intel_aux_power_domain(dig_port));
3038 }
3039
3040 static void edp_panel_vdd_work(struct work_struct *__work)
3041 {
3042         struct intel_dp *intel_dp =
3043                 container_of(to_delayed_work(__work),
3044                              struct intel_dp, panel_vdd_work);
3045         intel_wakeref_t wakeref;
3046
3047         with_pps_lock(intel_dp, wakeref) {
3048                 if (!intel_dp->want_panel_vdd)
3049                         edp_panel_vdd_off_sync(intel_dp);
3050         }
3051 }
3052
3053 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
3054 {
3055         unsigned long delay;
3056
3057         /*
3058          * Queue the timer to fire a long time from now (relative to the power
3059          * down delay) to keep the panel power up across a sequence of
3060          * operations.
3061          */
3062         delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
3063         schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
3064 }
3065
3066 /*
3067  * Must be paired with edp_panel_vdd_on().
3068  * Must hold pps_mutex around the whole on/off sequence.
3069  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
3070  */
3071 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
3072 {
3073         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3074
3075         lockdep_assert_held(&dev_priv->pps_mutex);
3076
3077         if (!intel_dp_is_edp(intel_dp))
3078                 return;
3079
3080         I915_STATE_WARN(!intel_dp->want_panel_vdd, "[ENCODER:%d:%s] VDD not forced on",
3081                         dp_to_dig_port(intel_dp)->base.base.base.id,
3082                         dp_to_dig_port(intel_dp)->base.base.name);
3083
3084         intel_dp->want_panel_vdd = false;
3085
3086         if (sync)
3087                 edp_panel_vdd_off_sync(intel_dp);
3088         else
3089                 edp_panel_vdd_schedule_off(intel_dp);
3090 }
3091
3092 static void edp_panel_on(struct intel_dp *intel_dp)
3093 {
3094         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3095         u32 pp;
3096         i915_reg_t pp_ctrl_reg;
3097
3098         lockdep_assert_held(&dev_priv->pps_mutex);
3099
3100         if (!intel_dp_is_edp(intel_dp))
3101                 return;
3102
3103         drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power on\n",
3104                     dp_to_dig_port(intel_dp)->base.base.base.id,
3105                     dp_to_dig_port(intel_dp)->base.base.name);
3106
3107         if (drm_WARN(&dev_priv->drm, edp_have_panel_power(intel_dp),
3108                      "[ENCODER:%d:%s] panel power already on\n",
3109                      dp_to_dig_port(intel_dp)->base.base.base.id,
3110                      dp_to_dig_port(intel_dp)->base.base.name))
3111                 return;
3112
3113         wait_panel_power_cycle(intel_dp);
3114
3115         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
3116         pp = ilk_get_pp_control(intel_dp);
3117         if (IS_GEN(dev_priv, 5)) {
3118                 /* ILK workaround: disable reset around power sequence */
3119                 pp &= ~PANEL_POWER_RESET;
3120                 intel_de_write(dev_priv, pp_ctrl_reg, pp);
3121                 intel_de_posting_read(dev_priv, pp_ctrl_reg);
3122         }
3123
3124         pp |= PANEL_POWER_ON;
3125         if (!IS_GEN(dev_priv, 5))
3126                 pp |= PANEL_POWER_RESET;
3127
3128         intel_de_write(dev_priv, pp_ctrl_reg, pp);
3129         intel_de_posting_read(dev_priv, pp_ctrl_reg);
3130
3131         wait_panel_on(intel_dp);
3132         intel_dp->last_power_on = jiffies;
3133
3134         if (IS_GEN(dev_priv, 5)) {
3135                 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
3136                 intel_de_write(dev_priv, pp_ctrl_reg, pp);
3137                 intel_de_posting_read(dev_priv, pp_ctrl_reg);
3138         }
3139 }
3140
3141 void intel_edp_panel_on(struct intel_dp *intel_dp)
3142 {
3143         intel_wakeref_t wakeref;
3144
3145         if (!intel_dp_is_edp(intel_dp))
3146                 return;
3147
3148         with_pps_lock(intel_dp, wakeref)
3149                 edp_panel_on(intel_dp);
3150 }
3151
3152
3153 static void edp_panel_off(struct intel_dp *intel_dp)
3154 {
3155         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3156         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3157         u32 pp;
3158         i915_reg_t pp_ctrl_reg;
3159
3160         lockdep_assert_held(&dev_priv->pps_mutex);
3161
3162         if (!intel_dp_is_edp(intel_dp))
3163                 return;
3164
3165         drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power off\n",
3166                     dig_port->base.base.base.id, dig_port->base.base.name);
3167
3168         drm_WARN(&dev_priv->drm, !intel_dp->want_panel_vdd,
3169                  "Need [ENCODER:%d:%s] VDD to turn off panel\n",
3170                  dig_port->base.base.base.id, dig_port->base.base.name);
3171
3172         pp = ilk_get_pp_control(intel_dp);
3173         /* We need to switch off panel power _and_ force vdd, for otherwise some
3174          * panels get very unhappy and cease to work. */
3175         pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
3176                 EDP_BLC_ENABLE);
3177
3178         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
3179
3180         intel_dp->want_panel_vdd = false;
3181
3182         intel_de_write(dev_priv, pp_ctrl_reg, pp);
3183         intel_de_posting_read(dev_priv, pp_ctrl_reg);
3184
3185         wait_panel_off(intel_dp);
3186         intel_dp->panel_power_off_time = ktime_get_boottime();
3187
3188         /* We got a reference when we enabled the VDD. */
3189         intel_display_power_put_unchecked(dev_priv, intel_aux_power_domain(dig_port));
3190 }
3191
3192 void intel_edp_panel_off(struct intel_dp *intel_dp)
3193 {
3194         intel_wakeref_t wakeref;
3195
3196         if (!intel_dp_is_edp(intel_dp))
3197                 return;
3198
3199         with_pps_lock(intel_dp, wakeref)
3200                 edp_panel_off(intel_dp);
3201 }
3202
3203 /* Enable backlight in the panel power control. */
3204 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
3205 {
3206         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3207         intel_wakeref_t wakeref;
3208
3209         /*
3210          * If we enable the backlight right away following a panel power
3211          * on, we may see slight flicker as the panel syncs with the eDP
3212          * link.  So delay a bit to make sure the image is solid before
3213          * allowing it to appear.
3214          */
3215         wait_backlight_on(intel_dp);
3216
3217         with_pps_lock(intel_dp, wakeref) {
3218                 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
3219                 u32 pp;
3220
3221                 pp = ilk_get_pp_control(intel_dp);
3222                 pp |= EDP_BLC_ENABLE;
3223
3224                 intel_de_write(dev_priv, pp_ctrl_reg, pp);
3225                 intel_de_posting_read(dev_priv, pp_ctrl_reg);
3226         }
3227 }
3228
3229 /* Enable backlight PWM and backlight PP control. */
3230 void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
3231                             const struct drm_connector_state *conn_state)
3232 {
3233         struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(conn_state->best_encoder));
3234         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3235
3236         if (!intel_dp_is_edp(intel_dp))
3237                 return;
3238
3239         drm_dbg_kms(&i915->drm, "\n");
3240
3241         intel_panel_enable_backlight(crtc_state, conn_state);
3242         _intel_edp_backlight_on(intel_dp);
3243 }
3244
3245 /* Disable backlight in the panel power control. */
3246 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
3247 {
3248         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3249         intel_wakeref_t wakeref;
3250
3251         if (!intel_dp_is_edp(intel_dp))
3252                 return;
3253
3254         with_pps_lock(intel_dp, wakeref) {
3255                 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
3256                 u32 pp;
3257
3258                 pp = ilk_get_pp_control(intel_dp);
3259                 pp &= ~EDP_BLC_ENABLE;
3260
3261                 intel_de_write(dev_priv, pp_ctrl_reg, pp);
3262                 intel_de_posting_read(dev_priv, pp_ctrl_reg);
3263         }
3264
3265         intel_dp->last_backlight_off = jiffies;
3266         edp_wait_backlight_off(intel_dp);
3267 }
3268
3269 /* Disable backlight PP control and backlight PWM. */
3270 void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state)
3271 {
3272         struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(old_conn_state->best_encoder));
3273         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3274
3275         if (!intel_dp_is_edp(intel_dp))
3276                 return;
3277
3278         drm_dbg_kms(&i915->drm, "\n");
3279
3280         _intel_edp_backlight_off(intel_dp);
3281         intel_panel_disable_backlight(old_conn_state);
3282 }
3283
3284 /*
3285  * Hook for controlling the panel power control backlight through the bl_power
3286  * sysfs attribute. Take care to handle multiple calls.
3287  */
3288 static void intel_edp_backlight_power(struct intel_connector *connector,
3289                                       bool enable)
3290 {
3291         struct drm_i915_private *i915 = to_i915(connector->base.dev);
3292         struct intel_dp *intel_dp = intel_attached_dp(connector);
3293         intel_wakeref_t wakeref;
3294         bool is_enabled;
3295
3296         is_enabled = false;
3297         with_pps_lock(intel_dp, wakeref)
3298                 is_enabled = ilk_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
3299         if (is_enabled == enable)
3300                 return;
3301
3302         drm_dbg_kms(&i915->drm, "panel power control backlight %s\n",
3303                     enable ? "enable" : "disable");
3304
3305         if (enable)
3306                 _intel_edp_backlight_on(intel_dp);
3307         else
3308                 _intel_edp_backlight_off(intel_dp);
3309 }
3310
3311 static void assert_dp_port(struct intel_dp *intel_dp, bool state)
3312 {
3313         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3314         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
3315         bool cur_state = intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN;
3316
3317         I915_STATE_WARN(cur_state != state,
3318                         "[ENCODER:%d:%s] state assertion failure (expected %s, current %s)\n",
3319                         dig_port->base.base.base.id, dig_port->base.base.name,
3320                         onoff(state), onoff(cur_state));
3321 }
3322 #define assert_dp_port_disabled(d) assert_dp_port((d), false)
3323
3324 static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
3325 {
3326         bool cur_state = intel_de_read(dev_priv, DP_A) & DP_PLL_ENABLE;
3327
3328         I915_STATE_WARN(cur_state != state,
3329                         "eDP PLL state assertion failure (expected %s, current %s)\n",
3330                         onoff(state), onoff(cur_state));
3331 }
3332 #define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
3333 #define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
3334
3335 static void ilk_edp_pll_on(struct intel_dp *intel_dp,
3336                            const struct intel_crtc_state *pipe_config)
3337 {
3338         struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
3339         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3340
3341         assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
3342         assert_dp_port_disabled(intel_dp);
3343         assert_edp_pll_disabled(dev_priv);
3344
3345         drm_dbg_kms(&dev_priv->drm, "enabling eDP PLL for clock %d\n",
3346                     pipe_config->port_clock);
3347
3348         intel_dp->DP &= ~DP_PLL_FREQ_MASK;
3349
3350         if (pipe_config->port_clock == 162000)
3351                 intel_dp->DP |= DP_PLL_FREQ_162MHZ;
3352         else
3353                 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
3354
3355         intel_de_write(dev_priv, DP_A, intel_dp->DP);
3356         intel_de_posting_read(dev_priv, DP_A);
3357         udelay(500);
3358
3359         /*
3360          * [DevILK] Work around required when enabling DP PLL
3361          * while a pipe is enabled going to FDI:
3362          * 1. Wait for the start of vertical blank on the enabled pipe going to FDI
3363          * 2. Program DP PLL enable
3364          */
3365         if (IS_GEN(dev_priv, 5))
3366                 intel_wait_for_vblank_if_active(dev_priv, !crtc->pipe);
3367
3368         intel_dp->DP |= DP_PLL_ENABLE;
3369
3370         intel_de_write(dev_priv, DP_A, intel_dp->DP);
3371         intel_de_posting_read(dev_priv, DP_A);
3372         udelay(200);
3373 }
3374
3375 static void ilk_edp_pll_off(struct intel_dp *intel_dp,
3376                             const struct intel_crtc_state *old_crtc_state)
3377 {
3378         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
3379         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3380
3381         assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder);
3382         assert_dp_port_disabled(intel_dp);
3383         assert_edp_pll_enabled(dev_priv);
3384
3385         drm_dbg_kms(&dev_priv->drm, "disabling eDP PLL\n");
3386
3387         intel_dp->DP &= ~DP_PLL_ENABLE;
3388
3389         intel_de_write(dev_priv, DP_A, intel_dp->DP);
3390         intel_de_posting_read(dev_priv, DP_A);
3391         udelay(200);
3392 }
3393
3394 static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp)
3395 {
3396         /*
3397          * DPCD 1.2+ should support BRANCH_DEVICE_CTRL, and thus
3398          * be capable of signalling downstream hpd with a long pulse.
3399          * Whether or not that means D3 is safe to use is not clear,
3400          * but let's assume so until proven otherwise.
3401          *
3402          * FIXME should really check all downstream ports...
3403          */
3404         return intel_dp->dpcd[DP_DPCD_REV] == 0x11 &&
3405                 drm_dp_is_branch(intel_dp->dpcd) &&
3406                 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD;
3407 }
3408
3409 void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
3410                                            const struct intel_crtc_state *crtc_state,
3411                                            bool enable)
3412 {
3413         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3414         int ret;
3415
3416         if (!crtc_state->dsc.compression_enable)
3417                 return;
3418
3419         ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_DSC_ENABLE,
3420                                  enable ? DP_DECOMPRESSION_EN : 0);
3421         if (ret < 0)
3422                 drm_dbg_kms(&i915->drm,
3423                             "Failed to %s sink decompression state\n",
3424                             enable ? "enable" : "disable");
3425 }
3426
3427 /* If the sink supports it, try to set the power state appropriately */
3428 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
3429 {
3430         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3431         int ret, i;
3432
3433         /* Should have a valid DPCD by this point */
3434         if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
3435                 return;
3436
3437         if (mode != DRM_MODE_DPMS_ON) {
3438                 if (downstream_hpd_needs_d0(intel_dp))
3439                         return;
3440
3441                 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
3442                                          DP_SET_POWER_D3);
3443         } else {
3444                 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
3445
3446                 /*
3447                  * When turning on, we need to retry for 1ms to give the sink
3448                  * time to wake up.
3449                  */
3450                 for (i = 0; i < 3; i++) {
3451                         ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
3452                                                  DP_SET_POWER_D0);
3453                         if (ret == 1)
3454                                 break;
3455                         msleep(1);
3456                 }
3457
3458                 if (ret == 1 && lspcon->active)
3459                         lspcon_wait_pcon_mode(lspcon);
3460         }
3461
3462         if (ret != 1)
3463                 drm_dbg_kms(&i915->drm, "failed to %s sink power state\n",
3464                             mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
3465 }
3466
3467 static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv,
3468                                  enum port port, enum pipe *pipe)
3469 {
3470         enum pipe p;
3471
3472         for_each_pipe(dev_priv, p) {
3473                 u32 val = intel_de_read(dev_priv, TRANS_DP_CTL(p));
3474
3475                 if ((val & TRANS_DP_PORT_SEL_MASK) == TRANS_DP_PORT_SEL(port)) {
3476                         *pipe = p;
3477                         return true;
3478                 }
3479         }
3480
3481         drm_dbg_kms(&dev_priv->drm, "No pipe for DP port %c found\n",
3482                     port_name(port));
3483
3484         /* must initialize pipe to something for the asserts */
3485         *pipe = PIPE_A;
3486
3487         return false;
3488 }
3489
3490 bool intel_dp_port_enabled(struct drm_i915_private *dev_priv,
3491                            i915_reg_t dp_reg, enum port port,
3492                            enum pipe *pipe)
3493 {
3494         bool ret;
3495         u32 val;
3496
3497         val = intel_de_read(dev_priv, dp_reg);
3498
3499         ret = val & DP_PORT_EN;
3500
3501         /* asserts want to know the pipe even if the port is disabled */
3502         if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
3503                 *pipe = (val & DP_PIPE_SEL_MASK_IVB) >> DP_PIPE_SEL_SHIFT_IVB;
3504         else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
3505                 ret &= cpt_dp_port_selected(dev_priv, port, pipe);
3506         else if (IS_CHERRYVIEW(dev_priv))
3507                 *pipe = (val & DP_PIPE_SEL_MASK_CHV) >> DP_PIPE_SEL_SHIFT_CHV;
3508         else
3509                 *pipe = (val & DP_PIPE_SEL_MASK) >> DP_PIPE_SEL_SHIFT;
3510
3511         return ret;
3512 }
3513
3514 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
3515                                   enum pipe *pipe)
3516 {
3517         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3518         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3519         intel_wakeref_t wakeref;
3520         bool ret;
3521
3522         wakeref = intel_display_power_get_if_enabled(dev_priv,
3523                                                      encoder->power_domain);
3524         if (!wakeref)
3525                 return false;
3526
3527         ret = intel_dp_port_enabled(dev_priv, intel_dp->output_reg,
3528                                     encoder->port, pipe);
3529
3530         intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
3531
3532         return ret;
3533 }
3534
3535 static void intel_dp_get_config(struct intel_encoder *encoder,
3536                                 struct intel_crtc_state *pipe_config)
3537 {
3538         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3539         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3540         u32 tmp, flags = 0;
3541         enum port port = encoder->port;
3542         struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
3543
3544         if (encoder->type == INTEL_OUTPUT_EDP)
3545                 pipe_config->output_types |= BIT(INTEL_OUTPUT_EDP);
3546         else
3547                 pipe_config->output_types |= BIT(INTEL_OUTPUT_DP);
3548
3549         tmp = intel_de_read(dev_priv, intel_dp->output_reg);
3550
3551         pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
3552
3553         if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
3554                 u32 trans_dp = intel_de_read(dev_priv,
3555                                              TRANS_DP_CTL(crtc->pipe));
3556
3557                 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
3558                         flags |= DRM_MODE_FLAG_PHSYNC;
3559                 else
3560                         flags |= DRM_MODE_FLAG_NHSYNC;
3561
3562                 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
3563                         flags |= DRM_MODE_FLAG_PVSYNC;
3564                 else
3565                         flags |= DRM_MODE_FLAG_NVSYNC;
3566         } else {
3567                 if (tmp & DP_SYNC_HS_HIGH)
3568                         flags |= DRM_MODE_FLAG_PHSYNC;
3569                 else
3570                         flags |= DRM_MODE_FLAG_NHSYNC;
3571
3572                 if (tmp & DP_SYNC_VS_HIGH)
3573                         flags |= DRM_MODE_FLAG_PVSYNC;
3574                 else
3575                         flags |= DRM_MODE_FLAG_NVSYNC;
3576         }
3577
3578         pipe_config->hw.adjusted_mode.flags |= flags;
3579
3580         if (IS_G4X(dev_priv) && tmp & DP_COLOR_RANGE_16_235)
3581                 pipe_config->limited_color_range = true;
3582
3583         pipe_config->lane_count =
3584                 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
3585
3586         intel_dp_get_m_n(crtc, pipe_config);
3587
3588         if (port == PORT_A) {
3589                 if ((intel_de_read(dev_priv, DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
3590                         pipe_config->port_clock = 162000;
3591                 else
3592                         pipe_config->port_clock = 270000;
3593         }
3594
3595         pipe_config->hw.adjusted_mode.crtc_clock =
3596                 intel_dotclock_calculate(pipe_config->port_clock,
3597                                          &pipe_config->dp_m_n);
3598
3599         if (intel_dp_is_edp(intel_dp) && dev_priv->vbt.edp.bpp &&
3600             pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) {
3601                 /*
3602                  * This is a big fat ugly hack.
3603                  *
3604                  * Some machines in UEFI boot mode provide us a VBT that has 18
3605                  * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
3606                  * unknown we fail to light up. Yet the same BIOS boots up with
3607                  * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
3608                  * max, not what it tells us to use.
3609                  *
3610                  * Note: This will still be broken if the eDP panel is not lit
3611                  * up by the BIOS, and thus we can't get the mode at module
3612                  * load.
3613                  */
3614                 drm_dbg_kms(&dev_priv->drm,
3615                             "pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
3616                             pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp);
3617                 dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp;
3618         }
3619 }
3620
3621 static void intel_disable_dp(struct intel_atomic_state *state,
3622                              struct intel_encoder *encoder,
3623                              const struct intel_crtc_state *old_crtc_state,
3624                              const struct drm_connector_state *old_conn_state)
3625 {
3626         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3627
3628         intel_dp->link_trained = false;
3629
3630         if (old_crtc_state->has_audio)
3631                 intel_audio_codec_disable(encoder,
3632                                           old_crtc_state, old_conn_state);
3633
3634         /* Make sure the panel is off before trying to change the mode. But also
3635          * ensure that we have vdd while we switch off the panel. */
3636         intel_edp_panel_vdd_on(intel_dp);
3637         intel_edp_backlight_off(old_conn_state);
3638         intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
3639         intel_edp_panel_off(intel_dp);
3640 }
3641
3642 static void g4x_disable_dp(struct intel_atomic_state *state,
3643                            struct intel_encoder *encoder,
3644                            const struct intel_crtc_state *old_crtc_state,
3645                            const struct drm_connector_state *old_conn_state)
3646 {
3647         intel_disable_dp(state, encoder, old_crtc_state, old_conn_state);
3648 }
3649
3650 static void vlv_disable_dp(struct intel_atomic_state *state,
3651                            struct intel_encoder *encoder,
3652                            const struct intel_crtc_state *old_crtc_state,
3653                            const struct drm_connector_state *old_conn_state)
3654 {
3655         intel_disable_dp(state, encoder, old_crtc_state, old_conn_state);
3656 }
3657
3658 static void g4x_post_disable_dp(struct intel_atomic_state *state,
3659                                 struct intel_encoder *encoder,
3660                                 const struct intel_crtc_state *old_crtc_state,
3661                                 const struct drm_connector_state *old_conn_state)
3662 {
3663         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3664         enum port port = encoder->port;
3665
3666         /*
3667          * Bspec does not list a specific disable sequence for g4x DP.
3668          * Follow the ilk+ sequence (disable pipe before the port) for
3669          * g4x DP as it does not suffer from underruns like the normal
3670          * g4x modeset sequence (disable pipe after the port).
3671          */
3672         intel_dp_link_down(encoder, old_crtc_state);
3673
3674         /* Only ilk+ has port A */
3675         if (port == PORT_A)
3676                 ilk_edp_pll_off(intel_dp, old_crtc_state);
3677 }
3678
3679 static void vlv_post_disable_dp(struct intel_atomic_state *state,
3680                                 struct intel_encoder *encoder,
3681                                 const struct intel_crtc_state *old_crtc_state,
3682                                 const struct drm_connector_state *old_conn_state)
3683 {
3684         intel_dp_link_down(encoder, old_crtc_state);
3685 }
3686
3687 static void chv_post_disable_dp(struct intel_atomic_state *state,
3688                                 struct intel_encoder *encoder,
3689                                 const struct intel_crtc_state *old_crtc_state,
3690                                 const struct drm_connector_state *old_conn_state)
3691 {
3692         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3693
3694         intel_dp_link_down(encoder, old_crtc_state);
3695
3696         vlv_dpio_get(dev_priv);
3697
3698         /* Assert data lane reset */
3699         chv_data_lane_soft_reset(encoder, old_crtc_state, true);
3700
3701         vlv_dpio_put(dev_priv);
3702 }
3703
3704 static void
3705 cpt_set_link_train(struct intel_dp *intel_dp,
3706                    u8 dp_train_pat)
3707 {
3708         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3709         u32 *DP = &intel_dp->DP;
3710
3711         *DP &= ~DP_LINK_TRAIN_MASK_CPT;
3712
3713         switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
3714         case DP_TRAINING_PATTERN_DISABLE:
3715                 *DP |= DP_LINK_TRAIN_OFF_CPT;
3716                 break;
3717         case DP_TRAINING_PATTERN_1:
3718                 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
3719                 break;
3720         case DP_TRAINING_PATTERN_2:
3721                 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
3722                 break;
3723         case DP_TRAINING_PATTERN_3:
3724                 drm_dbg_kms(&dev_priv->drm,
3725                             "TPS3 not supported, using TPS2 instead\n");
3726                 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
3727                 break;
3728         }
3729
3730         intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
3731         intel_de_posting_read(dev_priv, intel_dp->output_reg);
3732 }
3733
3734 static void
3735 g4x_set_link_train(struct intel_dp *intel_dp,
3736                    u8 dp_train_pat)
3737 {
3738         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3739         u32 *DP = &intel_dp->DP;
3740
3741         *DP &= ~DP_LINK_TRAIN_MASK;
3742
3743         switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
3744         case DP_TRAINING_PATTERN_DISABLE:
3745                 *DP |= DP_LINK_TRAIN_OFF;
3746                 break;
3747         case DP_TRAINING_PATTERN_1:
3748                 *DP |= DP_LINK_TRAIN_PAT_1;
3749                 break;
3750         case DP_TRAINING_PATTERN_2:
3751                 *DP |= DP_LINK_TRAIN_PAT_2;
3752                 break;
3753         case DP_TRAINING_PATTERN_3:
3754                 drm_dbg_kms(&dev_priv->drm,
3755                             "TPS3 not supported, using TPS2 instead\n");
3756                 *DP |= DP_LINK_TRAIN_PAT_2;
3757                 break;
3758         }
3759
3760         intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
3761         intel_de_posting_read(dev_priv, intel_dp->output_reg);
3762 }
3763
3764 static void intel_dp_enable_port(struct intel_dp *intel_dp,
3765                                  const struct intel_crtc_state *old_crtc_state)
3766 {
3767         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3768
3769         /* enable with pattern 1 (as per spec) */
3770
3771         intel_dp_program_link_training_pattern(intel_dp, DP_TRAINING_PATTERN_1);
3772
3773         /*
3774          * Magic for VLV/CHV. We _must_ first set up the register
3775          * without actually enabling the port, and then do another
3776          * write to enable the port. Otherwise link training will
3777          * fail when the power sequencer is freshly used for this port.
3778          */
3779         intel_dp->DP |= DP_PORT_EN;
3780         if (old_crtc_state->has_audio)
3781                 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
3782
3783         intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
3784         intel_de_posting_read(dev_priv, intel_dp->output_reg);
3785 }
3786
3787 static void intel_enable_dp(struct intel_atomic_state *state,
3788                             struct intel_encoder *encoder,
3789                             const struct intel_crtc_state *pipe_config,
3790                             const struct drm_connector_state *conn_state)
3791 {
3792         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3793         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3794         struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
3795         u32 dp_reg = intel_de_read(dev_priv, intel_dp->output_reg);
3796         enum pipe pipe = crtc->pipe;
3797         intel_wakeref_t wakeref;
3798
3799         if (drm_WARN_ON(&dev_priv->drm, dp_reg & DP_PORT_EN))
3800                 return;
3801
3802         with_pps_lock(intel_dp, wakeref) {
3803                 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
3804                         vlv_init_panel_power_sequencer(encoder, pipe_config);
3805
3806                 intel_dp_enable_port(intel_dp, pipe_config);
3807
3808                 edp_panel_vdd_on(intel_dp);
3809                 edp_panel_on(intel_dp);
3810                 edp_panel_vdd_off(intel_dp, true);
3811         }
3812
3813         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
3814                 unsigned int lane_mask = 0x0;
3815
3816                 if (IS_CHERRYVIEW(dev_priv))
3817                         lane_mask = intel_dp_unused_lane_mask(pipe_config->lane_count);
3818
3819                 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
3820                                     lane_mask);
3821         }
3822
3823         intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
3824         intel_dp_start_link_train(intel_dp);
3825         intel_dp_stop_link_train(intel_dp);
3826
3827         if (pipe_config->has_audio) {
3828                 drm_dbg(&dev_priv->drm, "Enabling DP audio on pipe %c\n",
3829                         pipe_name(pipe));
3830                 intel_audio_codec_enable(encoder, pipe_config, conn_state);
3831         }
3832 }
3833
3834 static void g4x_enable_dp(struct intel_atomic_state *state,
3835                           struct intel_encoder *encoder,
3836                           const struct intel_crtc_state *pipe_config,
3837                           const struct drm_connector_state *conn_state)
3838 {
3839         intel_enable_dp(state, encoder, pipe_config, conn_state);
3840         intel_edp_backlight_on(pipe_config, conn_state);
3841 }
3842
3843 static void vlv_enable_dp(struct intel_atomic_state *state,
3844                           struct intel_encoder *encoder,
3845                           const struct intel_crtc_state *pipe_config,
3846                           const struct drm_connector_state *conn_state)
3847 {
3848         intel_edp_backlight_on(pipe_config, conn_state);
3849 }
3850
3851 static void g4x_pre_enable_dp(struct intel_atomic_state *state,
3852                               struct intel_encoder *encoder,
3853                               const struct intel_crtc_state *pipe_config,
3854                               const struct drm_connector_state *conn_state)
3855 {
3856         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3857         enum port port = encoder->port;
3858
3859         intel_dp_prepare(encoder, pipe_config);
3860
3861         /* Only ilk+ has port A */
3862         if (port == PORT_A)
3863                 ilk_edp_pll_on(intel_dp, pipe_config);
3864 }
3865
3866 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
3867 {
3868         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3869         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
3870         enum pipe pipe = intel_dp->pps_pipe;
3871         i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe);
3872
3873         drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE);
3874
3875         if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B))
3876                 return;
3877
3878         edp_panel_vdd_off_sync(intel_dp);
3879
3880         /*
3881          * VLV seems to get confused when multiple power sequencers
3882          * have the same port selected (even if only one has power/vdd
3883          * enabled). The failure manifests as vlv_wait_port_ready() failing
3884          * CHV on the other hand doesn't seem to mind having the same port
3885          * selected in multiple power sequencers, but let's clear the
3886          * port select always when logically disconnecting a power sequencer
3887          * from a port.
3888          */
3889         drm_dbg_kms(&dev_priv->drm,
3890                     "detaching pipe %c power sequencer from [ENCODER:%d:%s]\n",
3891                     pipe_name(pipe), dig_port->base.base.base.id,
3892                     dig_port->base.base.name);
3893         intel_de_write(dev_priv, pp_on_reg, 0);
3894         intel_de_posting_read(dev_priv, pp_on_reg);
3895
3896         intel_dp->pps_pipe = INVALID_PIPE;
3897 }
3898
3899 static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
3900                                       enum pipe pipe)
3901 {
3902         struct intel_encoder *encoder;
3903
3904         lockdep_assert_held(&dev_priv->pps_mutex);
3905
3906         for_each_intel_dp(&dev_priv->drm, encoder) {
3907                 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3908
3909                 drm_WARN(&dev_priv->drm, intel_dp->active_pipe == pipe,
3910                          "stealing pipe %c power sequencer from active [ENCODER:%d:%s]\n",
3911                          pipe_name(pipe), encoder->base.base.id,
3912                          encoder->base.name);
3913
3914                 if (intel_dp->pps_pipe != pipe)
3915                         continue;
3916
3917                 drm_dbg_kms(&dev_priv->drm,
3918                             "stealing pipe %c power sequencer from [ENCODER:%d:%s]\n",
3919                             pipe_name(pipe), encoder->base.base.id,
3920                             encoder->base.name);
3921
3922                 /* make sure vdd is off before we steal it */
3923                 vlv_detach_power_sequencer(intel_dp);
3924         }
3925 }
3926
3927 static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
3928                                            const struct intel_crtc_state *crtc_state)
3929 {
3930         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3931         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3932         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3933
3934         lockdep_assert_held(&dev_priv->pps_mutex);
3935
3936         drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE);
3937
3938         if (intel_dp->pps_pipe != INVALID_PIPE &&
3939             intel_dp->pps_pipe != crtc->pipe) {
3940                 /*
3941                  * If another power sequencer was being used on this
3942                  * port previously make sure to turn off vdd there while
3943                  * we still have control of it.
3944                  */
3945                 vlv_detach_power_sequencer(intel_dp);
3946         }
3947
3948         /*
3949          * We may be stealing the power
3950          * sequencer from another port.
3951          */
3952         vlv_steal_power_sequencer(dev_priv, crtc->pipe);
3953
3954         intel_dp->active_pipe = crtc->pipe;
3955
3956         if (!intel_dp_is_edp(intel_dp))
3957                 return;
3958
3959         /* now it's all ours */
3960         intel_dp->pps_pipe = crtc->pipe;
3961
3962         drm_dbg_kms(&dev_priv->drm,
3963                     "initializing pipe %c power sequencer for [ENCODER:%d:%s]\n",
3964                     pipe_name(intel_dp->pps_pipe), encoder->base.base.id,
3965                     encoder->base.name);
3966
3967         /* init power sequencer on this pipe and port */
3968         intel_dp_init_panel_power_sequencer(intel_dp);
3969         intel_dp_init_panel_power_sequencer_registers(intel_dp, true);
3970 }
3971
3972 static void vlv_pre_enable_dp(struct intel_atomic_state *state,
3973                               struct intel_encoder *encoder,
3974                               const struct intel_crtc_state *pipe_config,
3975                               const struct drm_connector_state *conn_state)
3976 {
3977         vlv_phy_pre_encoder_enable(encoder, pipe_config);
3978
3979         intel_enable_dp(state, encoder, pipe_config, conn_state);
3980 }
3981
3982 static void vlv_dp_pre_pll_enable(struct intel_atomic_state *state,
3983                                   struct intel_encoder *encoder,
3984                                   const struct intel_crtc_state *pipe_config,
3985                                   const struct drm_connector_state *conn_state)
3986 {
3987         intel_dp_prepare(encoder, pipe_config);
3988
3989         vlv_phy_pre_pll_enable(encoder, pipe_config);
3990 }
3991
3992 static void chv_pre_enable_dp(struct intel_atomic_state *state,
3993                               struct intel_encoder *encoder,
3994                               const struct intel_crtc_state *pipe_config,
3995                               const struct drm_connector_state *conn_state)
3996 {
3997         chv_phy_pre_encoder_enable(encoder, pipe_config);
3998
3999         intel_enable_dp(state, encoder, pipe_config, conn_state);
4000
4001         /* Second common lane will stay alive on its own now */
4002         chv_phy_release_cl2_override(encoder);
4003 }
4004
4005 static void chv_dp_pre_pll_enable(struct intel_atomic_state *state,
4006                                   struct intel_encoder *encoder,
4007                                   const struct intel_crtc_state *pipe_config,
4008                                   const struct drm_connector_state *conn_state)
4009 {
4010         intel_dp_prepare(encoder, pipe_config);
4011
4012         chv_phy_pre_pll_enable(encoder, pipe_config);
4013 }
4014
4015 static void chv_dp_post_pll_disable(struct intel_atomic_state *state,
4016                                     struct intel_encoder *encoder,
4017                                     const struct intel_crtc_state *old_crtc_state,
4018                                     const struct drm_connector_state *old_conn_state)
4019 {
4020         chv_phy_post_pll_disable(encoder, old_crtc_state);
4021 }
4022
4023 /*
4024  * Fetch AUX CH registers 0x202 - 0x207 which contain
4025  * link status information
4026  */
4027 bool
4028 intel_dp_get_link_status(struct intel_dp *intel_dp, u8 link_status[DP_LINK_STATUS_SIZE])
4029 {
4030         return drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS, link_status,
4031                                 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
4032 }
4033
4034 static u8 intel_dp_voltage_max_2(struct intel_dp *intel_dp)
4035 {
4036         return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
4037 }
4038
4039 static u8 intel_dp_voltage_max_3(struct intel_dp *intel_dp)
4040 {
4041         return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
4042 }
4043
4044 static u8 intel_dp_pre_empemph_max_2(struct intel_dp *intel_dp)
4045 {
4046         return DP_TRAIN_PRE_EMPH_LEVEL_2;
4047 }
4048
4049 static u8 intel_dp_pre_empemph_max_3(struct intel_dp *intel_dp)
4050 {
4051         return DP_TRAIN_PRE_EMPH_LEVEL_3;
4052 }
4053
4054 static void vlv_set_signal_levels(struct intel_dp *intel_dp)
4055 {
4056         struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
4057         unsigned long demph_reg_value, preemph_reg_value,
4058                 uniqtranscale_reg_value;
4059         u8 train_set = intel_dp->train_set[0];
4060
4061         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
4062         case DP_TRAIN_PRE_EMPH_LEVEL_0:
4063                 preemph_reg_value = 0x0004000;
4064                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
4065                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
4066                         demph_reg_value = 0x2B405555;
4067                         uniqtranscale_reg_value = 0x552AB83A;
4068                         break;
4069                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
4070                         demph_reg_value = 0x2B404040;
4071                         uniqtranscale_reg_value = 0x5548B83A;
4072                         break;
4073                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
4074                         demph_reg_value = 0x2B245555;
4075                         uniqtranscale_reg_value = 0x5560B83A;
4076                         break;
4077                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
4078                         demph_reg_value = 0x2B405555;
4079                         uniqtranscale_reg_value = 0x5598DA3A;
4080                         break;
4081                 default:
4082                         return;
4083                 }
4084                 break;
4085         case DP_TRAIN_PRE_EMPH_LEVEL_1:
4086                 preemph_reg_value = 0x0002000;
4087                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
4088                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
4089                         demph_reg_value = 0x2B404040;
4090                         uniqtranscale_reg_value = 0x5552B83A;
4091                         break;
4092                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
4093                         demph_reg_value = 0x2B404848;
4094                         uniqtranscale_reg_value = 0x5580B83A;
4095                         break;
4096                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
4097                         demph_reg_value = 0x2B404040;
4098                         uniqtranscale_reg_value = 0x55ADDA3A;
4099                         break;
4100                 default:
4101                         return;
4102                 }
4103                 break;
4104         case DP_TRAIN_PRE_EMPH_LEVEL_2:
4105                 preemph_reg_value = 0x0000000;
4106                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
4107                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
4108                         demph_reg_value = 0x2B305555;
4109                         uniqtranscale_reg_value = 0x5570B83A;
4110                         break;
4111                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
4112                         demph_reg_value = 0x2B2B4040;
4113                         uniqtranscale_reg_value = 0x55ADDA3A;
4114                         break;
4115                 default:
4116                         return;
4117                 }
4118                 break;
4119         case DP_TRAIN_PRE_EMPH_LEVEL_3:
4120                 preemph_reg_value = 0x0006000;
4121                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
4122                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
4123                         demph_reg_value = 0x1B405555;
4124                         uniqtranscale_reg_value = 0x55ADDA3A;
4125                         break;
4126                 default:
4127                         return;
4128                 }
4129                 break;
4130         default:
4131                 return;
4132         }
4133
4134         vlv_set_phy_signal_level(encoder, demph_reg_value, preemph_reg_value,
4135                                  uniqtranscale_reg_value, 0);
4136 }
4137
4138 static void chv_set_signal_levels(struct intel_dp *intel_dp)
4139 {
4140         struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
4141         u32 deemph_reg_value, margin_reg_value;
4142         bool uniq_trans_scale = false;
4143         u8 train_set = intel_dp->train_set[0];
4144
4145         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
4146         case DP_TRAIN_PRE_EMPH_LEVEL_0:
4147                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
4148                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
4149                         deemph_reg_value = 128;
4150                         margin_reg_value = 52;
4151                         break;
4152                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
4153                         deemph_reg_value = 128;
4154                         margin_reg_value = 77;
4155                         break;
4156                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
4157                         deemph_reg_value = 128;
4158                         margin_reg_value = 102;
4159                         break;
4160                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
4161                         deemph_reg_value = 128;
4162                         margin_reg_value = 154;
4163                         uniq_trans_scale = true;
4164                         break;
4165                 default:
4166                         return;
4167                 }
4168                 break;
4169         case DP_TRAIN_PRE_EMPH_LEVEL_1:
4170                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
4171                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
4172                         deemph_reg_value = 85;
4173                         margin_reg_value = 78;
4174                         break;
4175                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
4176                         deemph_reg_value = 85;
4177                         margin_reg_value = 116;
4178                         break;
4179                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
4180                         deemph_reg_value = 85;
4181                         margin_reg_value = 154;
4182                         break;
4183                 default:
4184                         return;
4185                 }
4186                 break;
4187         case DP_TRAIN_PRE_EMPH_LEVEL_2:
4188                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
4189                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
4190                         deemph_reg_value = 64;
4191                         margin_reg_value = 104;
4192                         break;
4193                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
4194                         deemph_reg_value = 64;
4195                         margin_reg_value = 154;
4196                         break;
4197                 default:
4198                         return;
4199                 }
4200                 break;
4201         case DP_TRAIN_PRE_EMPH_LEVEL_3:
4202                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
4203                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
4204                         deemph_reg_value = 43;
4205                         margin_reg_value = 154;
4206                         break;
4207                 default:
4208                         return;
4209                 }
4210                 break;
4211         default:
4212                 return;
4213         }
4214
4215         chv_set_phy_signal_level(encoder, deemph_reg_value,
4216                                  margin_reg_value, uniq_trans_scale);
4217 }
4218
4219 static u32 g4x_signal_levels(u8 train_set)
4220 {
4221         u32 signal_levels = 0;
4222
4223         switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
4224         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
4225         default:
4226                 signal_levels |= DP_VOLTAGE_0_4;
4227                 break;
4228         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
4229                 signal_levels |= DP_VOLTAGE_0_6;
4230                 break;
4231         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
4232                 signal_levels |= DP_VOLTAGE_0_8;
4233                 break;
4234         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
4235                 signal_levels |= DP_VOLTAGE_1_2;
4236                 break;
4237         }
4238         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
4239         case DP_TRAIN_PRE_EMPH_LEVEL_0:
4240         default:
4241                 signal_levels |= DP_PRE_EMPHASIS_0;
4242                 break;
4243         case DP_TRAIN_PRE_EMPH_LEVEL_1:
4244                 signal_levels |= DP_PRE_EMPHASIS_3_5;
4245                 break;
4246         case DP_TRAIN_PRE_EMPH_LEVEL_2:
4247                 signal_levels |= DP_PRE_EMPHASIS_6;
4248                 break;
4249         case DP_TRAIN_PRE_EMPH_LEVEL_3:
4250                 signal_levels |= DP_PRE_EMPHASIS_9_5;
4251                 break;
4252         }
4253         return signal_levels;
4254 }
4255
4256 static void
4257 g4x_set_signal_levels(struct intel_dp *intel_dp)
4258 {
4259         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4260         u8 train_set = intel_dp->train_set[0];
4261         u32 signal_levels;
4262
4263         signal_levels = g4x_signal_levels(train_set);
4264
4265         drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
4266                     signal_levels);
4267
4268         intel_dp->DP &= ~(DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK);
4269         intel_dp->DP |= signal_levels;
4270
4271         intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
4272         intel_de_posting_read(dev_priv, intel_dp->output_reg);
4273 }
4274
4275 /* SNB CPU eDP voltage swing and pre-emphasis control */
4276 static u32 snb_cpu_edp_signal_levels(u8 train_set)
4277 {
4278         u8 signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
4279                                         DP_TRAIN_PRE_EMPHASIS_MASK);
4280
4281         switch (signal_levels) {
4282         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
4283         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
4284                 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
4285         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
4286                 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
4287         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
4288         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
4289                 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
4290         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
4291         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
4292                 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
4293         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
4294         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
4295                 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
4296         default:
4297                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
4298                               "0x%x\n", signal_levels);
4299                 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
4300         }
4301 }
4302
4303 static void
4304 snb_cpu_edp_set_signal_levels(struct intel_dp *intel_dp)
4305 {
4306         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4307         u8 train_set = intel_dp->train_set[0];
4308         u32 signal_levels;
4309
4310         signal_levels = snb_cpu_edp_signal_levels(train_set);
4311
4312         drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
4313                     signal_levels);
4314
4315         intel_dp->DP &= ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
4316         intel_dp->DP |= signal_levels;
4317
4318         intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
4319         intel_de_posting_read(dev_priv, intel_dp->output_reg);
4320 }
4321
4322 /* IVB CPU eDP voltage swing and pre-emphasis control */
4323 static u32 ivb_cpu_edp_signal_levels(u8 train_set)
4324 {
4325         u8 signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
4326                                         DP_TRAIN_PRE_EMPHASIS_MASK);
4327
4328         switch (signal_levels) {
4329         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
4330                 return EDP_LINK_TRAIN_400MV_0DB_IVB;
4331         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
4332                 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
4333         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
4334         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
4335                 return EDP_LINK_TRAIN_400MV_6DB_IVB;
4336
4337         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
4338                 return EDP_LINK_TRAIN_600MV_0DB_IVB;
4339         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
4340                 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
4341
4342         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
4343                 return EDP_LINK_TRAIN_800MV_0DB_IVB;
4344         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
4345                 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
4346
4347         default:
4348                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
4349                               "0x%x\n", signal_levels);
4350                 return EDP_LINK_TRAIN_500MV_0DB_IVB;
4351         }
4352 }
4353
4354 static void
4355 ivb_cpu_edp_set_signal_levels(struct intel_dp *intel_dp)
4356 {
4357         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4358         u8 train_set = intel_dp->train_set[0];
4359         u32 signal_levels;
4360
4361         signal_levels = ivb_cpu_edp_signal_levels(train_set);
4362
4363         drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
4364                     signal_levels);
4365
4366         intel_dp->DP &= ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
4367         intel_dp->DP |= signal_levels;
4368
4369         intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
4370         intel_de_posting_read(dev_priv, intel_dp->output_reg);
4371 }
4372
4373 void intel_dp_set_signal_levels(struct intel_dp *intel_dp)
4374 {
4375         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4376         u8 train_set = intel_dp->train_set[0];
4377
4378         drm_dbg_kms(&dev_priv->drm, "Using vswing level %d%s\n",
4379                     train_set & DP_TRAIN_VOLTAGE_SWING_MASK,
4380                     train_set & DP_TRAIN_MAX_SWING_REACHED ? " (max)" : "");
4381         drm_dbg_kms(&dev_priv->drm, "Using pre-emphasis level %d%s\n",
4382                     (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
4383                     DP_TRAIN_PRE_EMPHASIS_SHIFT,
4384                     train_set & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED ?
4385                     " (max)" : "");
4386
4387         intel_dp->set_signal_levels(intel_dp);
4388 }
4389
4390 void
4391 intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
4392                                        u8 dp_train_pat)
4393 {
4394         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4395         u8 train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd);
4396
4397         if (dp_train_pat & train_pat_mask)
4398                 drm_dbg_kms(&dev_priv->drm,
4399                             "Using DP training pattern TPS%d\n",
4400                             dp_train_pat & train_pat_mask);
4401
4402         intel_dp->set_link_train(intel_dp, dp_train_pat);
4403 }
4404
4405 void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
4406 {
4407         if (intel_dp->set_idle_link_train)
4408                 intel_dp->set_idle_link_train(intel_dp);
4409 }
4410
4411 static void
4412 intel_dp_link_down(struct intel_encoder *encoder,
4413                    const struct intel_crtc_state *old_crtc_state)
4414 {
4415         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4416         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
4417         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
4418         enum port port = encoder->port;
4419         u32 DP = intel_dp->DP;
4420
4421         if (drm_WARN_ON(&dev_priv->drm,
4422                         (intel_de_read(dev_priv, intel_dp->output_reg) &
4423                          DP_PORT_EN) == 0))
4424                 return;
4425
4426         drm_dbg_kms(&dev_priv->drm, "\n");
4427
4428         if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
4429             (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
4430                 DP &= ~DP_LINK_TRAIN_MASK_CPT;
4431                 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
4432         } else {
4433                 DP &= ~DP_LINK_TRAIN_MASK;
4434                 DP |= DP_LINK_TRAIN_PAT_IDLE;
4435         }
4436         intel_de_write(dev_priv, intel_dp->output_reg, DP);
4437         intel_de_posting_read(dev_priv, intel_dp->output_reg);
4438
4439         DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
4440         intel_de_write(dev_priv, intel_dp->output_reg, DP);
4441         intel_de_posting_read(dev_priv, intel_dp->output_reg);
4442
4443         /*
4444          * HW workaround for IBX, we need to move the port
4445          * to transcoder A after disabling it to allow the
4446          * matching HDMI port to be enabled on transcoder A.
4447          */
4448         if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B && port != PORT_A) {
4449                 /*
4450                  * We get CPU/PCH FIFO underruns on the other pipe when
4451                  * doing the workaround. Sweep them under the rug.
4452                  */
4453                 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
4454                 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
4455
4456                 /* always enable with pattern 1 (as per spec) */
4457                 DP &= ~(DP_PIPE_SEL_MASK | DP_LINK_TRAIN_MASK);
4458                 DP |= DP_PORT_EN | DP_PIPE_SEL(PIPE_A) |
4459                         DP_LINK_TRAIN_PAT_1;
4460                 intel_de_write(dev_priv, intel_dp->output_reg, DP);
4461                 intel_de_posting_read(dev_priv, intel_dp->output_reg);
4462
4463                 DP &= ~DP_PORT_EN;
4464                 intel_de_write(dev_priv, intel_dp->output_reg, DP);
4465                 intel_de_posting_read(dev_priv, intel_dp->output_reg);
4466
4467                 intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
4468                 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
4469                 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
4470         }
4471
4472         msleep(intel_dp->panel_power_down_delay);
4473
4474         intel_dp->DP = DP;
4475
4476         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
4477                 intel_wakeref_t wakeref;
4478
4479                 with_pps_lock(intel_dp, wakeref)
4480                         intel_dp->active_pipe = INVALID_PIPE;
4481         }
4482 }
4483
4484 bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
4485 {
4486         u8 dprx = 0;
4487
4488         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST,
4489                               &dprx) != 1)
4490                 return false;
4491         return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED;
4492 }
4493
4494 static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp)
4495 {
4496         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
4497
4498         /*
4499          * Clear the cached register set to avoid using stale values
4500          * for the sinks that do not support DSC.
4501          */
4502         memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd));
4503
4504         /* Clear fec_capable to avoid using stale values */
4505         intel_dp->fec_capable = 0;
4506
4507         /* Cache the DSC DPCD if eDP or DP rev >= 1.4 */
4508         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x14 ||
4509             intel_dp->edp_dpcd[0] >= DP_EDP_14) {
4510                 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DSC_SUPPORT,
4511                                      intel_dp->dsc_dpcd,
4512                                      sizeof(intel_dp->dsc_dpcd)) < 0)
4513                         drm_err(&i915->drm,
4514                                 "Failed to read DPCD register 0x%x\n",
4515                                 DP_DSC_SUPPORT);
4516
4517                 drm_dbg_kms(&i915->drm, "DSC DPCD: %*ph\n",
4518                             (int)sizeof(intel_dp->dsc_dpcd),
4519                             intel_dp->dsc_dpcd);
4520
4521                 /* FEC is supported only on DP 1.4 */
4522                 if (!intel_dp_is_edp(intel_dp) &&
4523                     drm_dp_dpcd_readb(&intel_dp->aux, DP_FEC_CAPABILITY,
4524                                       &intel_dp->fec_capable) < 0)
4525                         drm_err(&i915->drm,
4526                                 "Failed to read FEC DPCD register\n");
4527
4528                 drm_dbg_kms(&i915->drm, "FEC CAPABILITY: %x\n",
4529                             intel_dp->fec_capable);
4530         }
4531 }
4532
4533 static bool
4534 intel_edp_init_dpcd(struct intel_dp *intel_dp)
4535 {
4536         struct drm_i915_private *dev_priv =
4537                 to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
4538
4539         /* this function is meant to be called only once */
4540         drm_WARN_ON(&dev_priv->drm, intel_dp->dpcd[DP_DPCD_REV] != 0);
4541
4542         if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd) != 0)
4543                 return false;
4544
4545         drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
4546                          drm_dp_is_branch(intel_dp->dpcd));
4547
4548         /*
4549          * Read the eDP display control registers.
4550          *
4551          * Do this independent of DP_DPCD_DISPLAY_CONTROL_CAPABLE bit in
4552          * DP_EDP_CONFIGURATION_CAP, because some buggy displays do not have it
4553          * set, but require eDP 1.4+ detection (e.g. for supported link rates
4554          * method). The display control registers should read zero if they're
4555          * not supported anyway.
4556          */
4557         if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
4558                              intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
4559                              sizeof(intel_dp->edp_dpcd))
4560                 drm_dbg_kms(&dev_priv->drm, "eDP DPCD: %*ph\n",
4561                             (int)sizeof(intel_dp->edp_dpcd),
4562                             intel_dp->edp_dpcd);
4563
4564         /*
4565          * This has to be called after intel_dp->edp_dpcd is filled, PSR checks
4566          * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1]
4567          */
4568         intel_psr_init_dpcd(intel_dp);
4569
4570         /* Read the eDP 1.4+ supported link rates. */
4571         if (intel_dp->edp_dpcd[0] >= DP_EDP_14) {
4572                 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
4573                 int i;
4574
4575                 drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES,
4576                                 sink_rates, sizeof(sink_rates));
4577
4578                 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
4579                         int val = le16_to_cpu(sink_rates[i]);
4580
4581                         if (val == 0)
4582                                 break;
4583
4584                         /* Value read multiplied by 200kHz gives the per-lane
4585                          * link rate in kHz. The source rates are, however,
4586                          * stored in terms of LS_Clk kHz. The full conversion
4587                          * back to symbols is
4588                          * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte)
4589                          */
4590                         intel_dp->sink_rates[i] = (val * 200) / 10;
4591                 }
4592                 intel_dp->num_sink_rates = i;
4593         }
4594
4595         /*
4596          * Use DP_LINK_RATE_SET if DP_SUPPORTED_LINK_RATES are available,
4597          * default to DP_MAX_LINK_RATE and DP_LINK_BW_SET otherwise.
4598          */
4599         if (intel_dp->num_sink_rates)
4600                 intel_dp->use_rate_select = true;
4601         else
4602                 intel_dp_set_sink_rates(intel_dp);
4603
4604         intel_dp_set_common_rates(intel_dp);
4605
4606         /* Read the eDP DSC DPCD registers */
4607         if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
4608                 intel_dp_get_dsc_sink_cap(intel_dp);
4609
4610         return true;
4611 }
4612
4613 static bool
4614 intel_dp_has_sink_count(struct intel_dp *intel_dp)
4615 {
4616         if (!intel_dp->attached_connector)
4617                 return false;
4618
4619         return drm_dp_read_sink_count_cap(&intel_dp->attached_connector->base,
4620                                           intel_dp->dpcd,
4621                                           &intel_dp->desc);
4622 }
4623
4624 static bool
4625 intel_dp_get_dpcd(struct intel_dp *intel_dp)
4626 {
4627         int ret;
4628
4629         if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd))
4630                 return false;
4631
4632         /*
4633          * Don't clobber cached eDP rates. Also skip re-reading
4634          * the OUI/ID since we know it won't change.
4635          */
4636         if (!intel_dp_is_edp(intel_dp)) {
4637                 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
4638                                  drm_dp_is_branch(intel_dp->dpcd));
4639
4640                 intel_dp_set_sink_rates(intel_dp);
4641                 intel_dp_set_common_rates(intel_dp);
4642         }
4643
4644         if (intel_dp_has_sink_count(intel_dp)) {
4645                 ret = drm_dp_read_sink_count(&intel_dp->aux);
4646                 if (ret < 0)
4647                         return false;
4648
4649                 /*
4650                  * Sink count can change between short pulse hpd hence
4651                  * a member variable in intel_dp will track any changes
4652                  * between short pulse interrupts.
4653                  */
4654                 intel_dp->sink_count = ret;
4655
4656                 /*
4657                  * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that
4658                  * a dongle is present but no display. Unless we require to know
4659                  * if a dongle is present or not, we don't need to update
4660                  * downstream port information. So, an early return here saves
4661                  * time from performing other operations which are not required.
4662                  */
4663                 if (!intel_dp->sink_count)
4664                         return false;
4665         }
4666
4667         return drm_dp_read_downstream_info(&intel_dp->aux, intel_dp->dpcd,
4668                                            intel_dp->downstream_ports) == 0;
4669 }
4670
4671 static bool
4672 intel_dp_can_mst(struct intel_dp *intel_dp)
4673 {
4674         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
4675
4676         return i915->params.enable_dp_mst &&
4677                 intel_dp->can_mst &&
4678                 drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd);
4679 }
4680
4681 static void
4682 intel_dp_configure_mst(struct intel_dp *intel_dp)
4683 {
4684         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
4685         struct intel_encoder *encoder =
4686                 &dp_to_dig_port(intel_dp)->base;
4687         bool sink_can_mst = drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd);
4688
4689         drm_dbg_kms(&i915->drm,
4690                     "[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s\n",
4691                     encoder->base.base.id, encoder->base.name,
4692                     yesno(intel_dp->can_mst), yesno(sink_can_mst),
4693                     yesno(i915->params.enable_dp_mst));
4694
4695         if (!intel_dp->can_mst)
4696                 return;
4697
4698         intel_dp->is_mst = sink_can_mst &&
4699                 i915->params.enable_dp_mst;
4700
4701         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
4702                                         intel_dp->is_mst);
4703 }
4704
4705 static bool
4706 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4707 {
4708         return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI,
4709                                 sink_irq_vector, DP_DPRX_ESI_LEN) ==
4710                 DP_DPRX_ESI_LEN;
4711 }
4712
4713 bool
4714 intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state,
4715                        const struct drm_connector_state *conn_state)
4716 {
4717         /*
4718          * As per DP 1.4a spec section 2.2.4.3 [MSA Field for Indication
4719          * of Color Encoding Format and Content Color Gamut], in order to
4720          * sending YCBCR 420 or HDR BT.2020 signals we should use DP VSC SDP.
4721          */
4722         if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
4723                 return true;
4724
4725         switch (conn_state->colorspace) {
4726         case DRM_MODE_COLORIMETRY_SYCC_601:
4727         case DRM_MODE_COLORIMETRY_OPYCC_601:
4728         case DRM_MODE_COLORIMETRY_BT2020_YCC:
4729         case DRM_MODE_COLORIMETRY_BT2020_RGB:
4730         case DRM_MODE_COLORIMETRY_BT2020_CYCC:
4731                 return true;
4732         default:
4733                 break;
4734         }
4735
4736         return false;
4737 }
4738
4739 static ssize_t intel_dp_vsc_sdp_pack(const struct drm_dp_vsc_sdp *vsc,
4740                                      struct dp_sdp *sdp, size_t size)
4741 {
4742         size_t length = sizeof(struct dp_sdp);
4743
4744         if (size < length)
4745                 return -ENOSPC;
4746
4747         memset(sdp, 0, size);
4748
4749         /*
4750          * Prepare VSC Header for SU as per DP 1.4a spec, Table 2-119
4751          * VSC SDP Header Bytes
4752          */
4753         sdp->sdp_header.HB0 = 0; /* Secondary-Data Packet ID = 0 */
4754         sdp->sdp_header.HB1 = vsc->sdp_type; /* Secondary-data Packet Type */
4755         sdp->sdp_header.HB2 = vsc->revision; /* Revision Number */
4756         sdp->sdp_header.HB3 = vsc->length; /* Number of Valid Data Bytes */
4757
4758         /*
4759          * Only revision 0x5 supports Pixel Encoding/Colorimetry Format as
4760          * per DP 1.4a spec.
4761          */
4762         if (vsc->revision != 0x5)
4763                 goto out;
4764
4765         /* VSC SDP Payload for DB16 through DB18 */
4766         /* Pixel Encoding and Colorimetry Formats  */
4767         sdp->db[16] = (vsc->pixelformat & 0xf) << 4; /* DB16[7:4] */
4768         sdp->db[16] |= vsc->colorimetry & 0xf; /* DB16[3:0] */
4769
4770         switch (vsc->bpc) {
4771         case 6:
4772                 /* 6bpc: 0x0 */
4773                 break;
4774         case 8:
4775                 sdp->db[17] = 0x1; /* DB17[3:0] */
4776                 break;
4777         case 10:
4778                 sdp->db[17] = 0x2;
4779                 break;
4780         case 12:
4781                 sdp->db[17] = 0x3;
4782                 break;
4783         case 16:
4784                 sdp->db[17] = 0x4;
4785                 break;
4786         default:
4787                 MISSING_CASE(vsc->bpc);
4788                 break;
4789         }
4790         /* Dynamic Range and Component Bit Depth */
4791         if (vsc->dynamic_range == DP_DYNAMIC_RANGE_CTA)
4792                 sdp->db[17] |= 0x80;  /* DB17[7] */
4793
4794         /* Content Type */
4795         sdp->db[18] = vsc->content_type & 0x7;
4796
4797 out:
4798         return length;
4799 }
4800
4801 static ssize_t
4802 intel_dp_hdr_metadata_infoframe_sdp_pack(const struct hdmi_drm_infoframe *drm_infoframe,
4803                                          struct dp_sdp *sdp,
4804                                          size_t size)
4805 {
4806         size_t length = sizeof(struct dp_sdp);
4807         const int infoframe_size = HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE;
4808         unsigned char buf[HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE];
4809         ssize_t len;
4810
4811         if (size < length)
4812                 return -ENOSPC;
4813
4814         memset(sdp, 0, size);
4815
4816         len = hdmi_drm_infoframe_pack_only(drm_infoframe, buf, sizeof(buf));
4817         if (len < 0) {
4818                 DRM_DEBUG_KMS("buffer size is smaller than hdr metadata infoframe\n");
4819                 return -ENOSPC;
4820         }
4821
4822         if (len != infoframe_size) {
4823                 DRM_DEBUG_KMS("wrong static hdr metadata size\n");
4824                 return -ENOSPC;
4825         }
4826
4827         /*
4828          * Set up the infoframe sdp packet for HDR static metadata.
4829          * Prepare VSC Header for SU as per DP 1.4a spec,
4830          * Table 2-100 and Table 2-101
4831          */
4832
4833         /* Secondary-Data Packet ID, 00h for non-Audio INFOFRAME */
4834         sdp->sdp_header.HB0 = 0;
4835         /*
4836          * Packet Type 80h + Non-audio INFOFRAME Type value
4837          * HDMI_INFOFRAME_TYPE_DRM: 0x87
4838          * - 80h + Non-audio INFOFRAME Type value
4839          * - InfoFrame Type: 0x07
4840          *    [CTA-861-G Table-42 Dynamic Range and Mastering InfoFrame]
4841          */
4842         sdp->sdp_header.HB1 = drm_infoframe->type;
4843         /*
4844          * Least Significant Eight Bits of (Data Byte Count – 1)
4845          * infoframe_size - 1
4846          */
4847         sdp->sdp_header.HB2 = 0x1D;
4848         /* INFOFRAME SDP Version Number */
4849         sdp->sdp_header.HB3 = (0x13 << 2);
4850         /* CTA Header Byte 2 (INFOFRAME Version Number) */
4851         sdp->db[0] = drm_infoframe->version;
4852         /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */
4853         sdp->db[1] = drm_infoframe->length;
4854         /*
4855          * Copy HDMI_DRM_INFOFRAME_SIZE size from a buffer after
4856          * HDMI_INFOFRAME_HEADER_SIZE
4857          */
4858         BUILD_BUG_ON(sizeof(sdp->db) < HDMI_DRM_INFOFRAME_SIZE + 2);
4859         memcpy(&sdp->db[2], &buf[HDMI_INFOFRAME_HEADER_SIZE],
4860                HDMI_DRM_INFOFRAME_SIZE);
4861
4862         /*
4863          * Size of DP infoframe sdp packet for HDR static metadata consists of
4864          * - DP SDP Header(struct dp_sdp_header): 4 bytes
4865          * - Two Data Blocks: 2 bytes
4866          *    CTA Header Byte2 (INFOFRAME Version Number)
4867          *    CTA Header Byte3 (Length of INFOFRAME)
4868          * - HDMI_DRM_INFOFRAME_SIZE: 26 bytes
4869          *
4870          * Prior to GEN11's GMP register size is identical to DP HDR static metadata
4871          * infoframe size. But GEN11+ has larger than that size, write_infoframe
4872          * will pad rest of the size.
4873          */
4874         return sizeof(struct dp_sdp_header) + 2 + HDMI_DRM_INFOFRAME_SIZE;
4875 }
4876
4877 static void intel_write_dp_sdp(struct intel_encoder *encoder,
4878                                const struct intel_crtc_state *crtc_state,
4879                                unsigned int type)
4880 {
4881         struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
4882         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4883         struct dp_sdp sdp = {};
4884         ssize_t len;
4885
4886         if ((crtc_state->infoframes.enable &
4887              intel_hdmi_infoframe_enable(type)) == 0)
4888                 return;
4889
4890         switch (type) {
4891         case DP_SDP_VSC:
4892                 len = intel_dp_vsc_sdp_pack(&crtc_state->infoframes.vsc, &sdp,
4893                                             sizeof(sdp));
4894                 break;
4895         case HDMI_PACKET_TYPE_GAMUT_METADATA:
4896                 len = intel_dp_hdr_metadata_infoframe_sdp_pack(&crtc_state->infoframes.drm.drm,
4897                                                                &sdp, sizeof(sdp));
4898                 break;
4899         default:
4900                 MISSING_CASE(type);
4901                 return;
4902         }
4903
4904         if (drm_WARN_ON(&dev_priv->drm, len < 0))
4905                 return;
4906
4907         dig_port->write_infoframe(encoder, crtc_state, type, &sdp, len);
4908 }
4909
4910 void intel_write_dp_vsc_sdp(struct intel_encoder *encoder,
4911                             const struct intel_crtc_state *crtc_state,
4912                             struct drm_dp_vsc_sdp *vsc)
4913 {
4914         struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
4915         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4916         struct dp_sdp sdp = {};
4917         ssize_t len;
4918
4919         len = intel_dp_vsc_sdp_pack(vsc, &sdp, sizeof(sdp));
4920
4921         if (drm_WARN_ON(&dev_priv->drm, len < 0))
4922                 return;
4923
4924         dig_port->write_infoframe(encoder, crtc_state, DP_SDP_VSC,
4925                                         &sdp, len);
4926 }
4927
4928 void intel_dp_set_infoframes(struct intel_encoder *encoder,
4929                              bool enable,
4930                              const struct intel_crtc_state *crtc_state,
4931                              const struct drm_connector_state *conn_state)
4932 {
4933         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4934         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
4935         i915_reg_t reg = HSW_TVIDEO_DIP_CTL(crtc_state->cpu_transcoder);
4936         u32 dip_enable = VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_GCP_HSW |
4937                          VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW |
4938                          VIDEO_DIP_ENABLE_SPD_HSW | VIDEO_DIP_ENABLE_DRM_GLK;
4939         u32 val = intel_de_read(dev_priv, reg);
4940
4941         /* TODO: Add DSC case (DIP_ENABLE_PPS) */
4942         /* When PSR is enabled, this routine doesn't disable VSC DIP */
4943         if (intel_psr_enabled(intel_dp))
4944                 val &= ~dip_enable;
4945         else
4946                 val &= ~(dip_enable | VIDEO_DIP_ENABLE_VSC_HSW);
4947
4948         if (!enable) {
4949                 intel_de_write(dev_priv, reg, val);
4950                 intel_de_posting_read(dev_priv, reg);
4951                 return;
4952         }
4953
4954         intel_de_write(dev_priv, reg, val);
4955         intel_de_posting_read(dev_priv, reg);
4956
4957         /* When PSR is enabled, VSC SDP is handled by PSR routine */
4958         if (!intel_psr_enabled(intel_dp))
4959                 intel_write_dp_sdp(encoder, crtc_state, DP_SDP_VSC);
4960
4961         intel_write_dp_sdp(encoder, crtc_state, HDMI_PACKET_TYPE_GAMUT_METADATA);
4962 }
4963
4964 static int intel_dp_vsc_sdp_unpack(struct drm_dp_vsc_sdp *vsc,
4965                                    const void *buffer, size_t size)
4966 {
4967         const struct dp_sdp *sdp = buffer;
4968
4969         if (size < sizeof(struct dp_sdp))
4970                 return -EINVAL;
4971
4972         memset(vsc, 0, size);
4973
4974         if (sdp->sdp_header.HB0 != 0)
4975                 return -EINVAL;
4976
4977         if (sdp->sdp_header.HB1 != DP_SDP_VSC)
4978                 return -EINVAL;
4979
4980         vsc->sdp_type = sdp->sdp_header.HB1;
4981         vsc->revision = sdp->sdp_header.HB2;
4982         vsc->length = sdp->sdp_header.HB3;
4983
4984         if ((sdp->sdp_header.HB2 == 0x2 && sdp->sdp_header.HB3 == 0x8) ||
4985             (sdp->sdp_header.HB2 == 0x4 && sdp->sdp_header.HB3 == 0xe)) {
4986                 /*
4987                  * - HB2 = 0x2, HB3 = 0x8
4988                  *   VSC SDP supporting 3D stereo + PSR
4989                  * - HB2 = 0x4, HB3 = 0xe
4990                  *   VSC SDP supporting 3D stereo + PSR2 with Y-coordinate of
4991                  *   first scan line of the SU region (applies to eDP v1.4b
4992                  *   and higher).
4993                  */
4994                 return 0;
4995         } else if (sdp->sdp_header.HB2 == 0x5 && sdp->sdp_header.HB3 == 0x13) {
4996                 /*
4997                  * - HB2 = 0x5, HB3 = 0x13
4998                  *   VSC SDP supporting 3D stereo + PSR2 + Pixel Encoding/Colorimetry
4999                  *   Format.
5000                  */
5001                 vsc->pixelformat = (sdp->db[16] >> 4) & 0xf;
5002                 vsc->colorimetry = sdp->db[16] & 0xf;
5003                 vsc->dynamic_range = (sdp->db[17] >> 7) & 0x1;
5004
5005                 switch (sdp->db[17] & 0x7) {
5006                 case 0x0:
5007                         vsc->bpc = 6;
5008                         break;
5009                 case 0x1:
5010                         vsc->bpc = 8;
5011                         break;
5012                 case 0x2:
5013                         vsc->bpc = 10;
5014                         break;
5015                 case 0x3:
5016                         vsc->bpc = 12;
5017                         break;
5018                 case 0x4:
5019                         vsc->bpc = 16;
5020                         break;
5021                 default:
5022                         MISSING_CASE(sdp->db[17] & 0x7);
5023                         return -EINVAL;
5024                 }
5025
5026                 vsc->content_type = sdp->db[18] & 0x7;
5027         } else {
5028                 return -EINVAL;
5029         }
5030
5031         return 0;
5032 }
5033
5034 static int
5035 intel_dp_hdr_metadata_infoframe_sdp_unpack(struct hdmi_drm_infoframe *drm_infoframe,
5036                                            const void *buffer, size_t size)
5037 {
5038         int ret;
5039
5040         const struct dp_sdp *sdp = buffer;
5041
5042         if (size < sizeof(struct dp_sdp))
5043                 return -EINVAL;
5044
5045         if (sdp->sdp_header.HB0 != 0)
5046                 return -EINVAL;
5047
5048         if (sdp->sdp_header.HB1 != HDMI_INFOFRAME_TYPE_DRM)
5049                 return -EINVAL;
5050
5051         /*
5052          * Least Significant Eight Bits of (Data Byte Count – 1)
5053          * 1Dh (i.e., Data Byte Count = 30 bytes).
5054          */
5055         if (sdp->sdp_header.HB2 != 0x1D)
5056                 return -EINVAL;
5057
5058         /* Most Significant Two Bits of (Data Byte Count – 1), Clear to 00b. */
5059         if ((sdp->sdp_header.HB3 & 0x3) != 0)
5060                 return -EINVAL;
5061
5062         /* INFOFRAME SDP Version Number */
5063         if (((sdp->sdp_header.HB3 >> 2) & 0x3f) != 0x13)
5064                 return -EINVAL;
5065
5066         /* CTA Header Byte 2 (INFOFRAME Version Number) */
5067         if (sdp->db[0] != 1)
5068                 return -EINVAL;
5069
5070         /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */
5071         if (sdp->db[1] != HDMI_DRM_INFOFRAME_SIZE)
5072                 return -EINVAL;
5073
5074         ret = hdmi_drm_infoframe_unpack_only(drm_infoframe, &sdp->db[2],
5075                                              HDMI_DRM_INFOFRAME_SIZE);
5076
5077         return ret;
5078 }
5079
5080 static void intel_read_dp_vsc_sdp(struct intel_encoder *encoder,
5081                                   struct intel_crtc_state *crtc_state,
5082                                   struct drm_dp_vsc_sdp *vsc)
5083 {
5084         struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
5085         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
5086         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5087         unsigned int type = DP_SDP_VSC;
5088         struct dp_sdp sdp = {};
5089         int ret;
5090
5091         /* When PSR is enabled, VSC SDP is handled by PSR routine */
5092         if (intel_psr_enabled(intel_dp))
5093                 return;
5094
5095         if ((crtc_state->infoframes.enable &
5096              intel_hdmi_infoframe_enable(type)) == 0)
5097                 return;
5098
5099         dig_port->read_infoframe(encoder, crtc_state, type, &sdp, sizeof(sdp));
5100
5101         ret = intel_dp_vsc_sdp_unpack(vsc, &sdp, sizeof(sdp));
5102
5103         if (ret)
5104                 drm_dbg_kms(&dev_priv->drm, "Failed to unpack DP VSC SDP\n");
5105 }
5106
5107 static void intel_read_dp_hdr_metadata_infoframe_sdp(struct intel_encoder *encoder,
5108                                                      struct intel_crtc_state *crtc_state,
5109                                                      struct hdmi_drm_infoframe *drm_infoframe)
5110 {
5111         struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
5112         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5113         unsigned int type = HDMI_PACKET_TYPE_GAMUT_METADATA;
5114         struct dp_sdp sdp = {};
5115         int ret;
5116
5117         if ((crtc_state->infoframes.enable &
5118             intel_hdmi_infoframe_enable(type)) == 0)
5119                 return;
5120
5121         dig_port->read_infoframe(encoder, crtc_state, type, &sdp,
5122                                  sizeof(sdp));
5123
5124         ret = intel_dp_hdr_metadata_infoframe_sdp_unpack(drm_infoframe, &sdp,
5125                                                          sizeof(sdp));
5126
5127         if (ret)
5128                 drm_dbg_kms(&dev_priv->drm,
5129                             "Failed to unpack DP HDR Metadata Infoframe SDP\n");
5130 }
5131
5132 void intel_read_dp_sdp(struct intel_encoder *encoder,
5133                        struct intel_crtc_state *crtc_state,
5134                        unsigned int type)
5135 {
5136         if (encoder->type != INTEL_OUTPUT_DDI)
5137                 return;
5138
5139         switch (type) {
5140         case DP_SDP_VSC:
5141                 intel_read_dp_vsc_sdp(encoder, crtc_state,
5142                                       &crtc_state->infoframes.vsc);
5143                 break;
5144         case HDMI_PACKET_TYPE_GAMUT_METADATA:
5145                 intel_read_dp_hdr_metadata_infoframe_sdp(encoder, crtc_state,
5146                                                          &crtc_state->infoframes.drm.drm);
5147                 break;
5148         default:
5149                 MISSING_CASE(type);
5150                 break;
5151         }
5152 }
5153
5154 static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp)
5155 {
5156         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5157         int status = 0;
5158         int test_link_rate;
5159         u8 test_lane_count, test_link_bw;
5160         /* (DP CTS 1.2)
5161          * 4.3.1.11
5162          */
5163         /* Read the TEST_LANE_COUNT and TEST_LINK_RTAE fields (DP CTS 3.1.4) */
5164         status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LANE_COUNT,
5165                                    &test_lane_count);
5166
5167         if (status <= 0) {
5168                 drm_dbg_kms(&i915->drm, "Lane count read failed\n");
5169                 return DP_TEST_NAK;
5170         }
5171         test_lane_count &= DP_MAX_LANE_COUNT_MASK;
5172
5173         status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE,
5174                                    &test_link_bw);
5175         if (status <= 0) {
5176                 drm_dbg_kms(&i915->drm, "Link Rate read failed\n");
5177                 return DP_TEST_NAK;
5178         }
5179         test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw);
5180
5181         /* Validate the requested link rate and lane count */
5182         if (!intel_dp_link_params_valid(intel_dp, test_link_rate,
5183                                         test_lane_count))
5184                 return DP_TEST_NAK;
5185
5186         intel_dp->compliance.test_lane_count = test_lane_count;
5187         intel_dp->compliance.test_link_rate = test_link_rate;
5188
5189         return DP_TEST_ACK;
5190 }
5191
5192 static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
5193 {
5194         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5195         u8 test_pattern;
5196         u8 test_misc;
5197         __be16 h_width, v_height;
5198         int status = 0;
5199
5200         /* Read the TEST_PATTERN (DP CTS 3.1.5) */
5201         status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_PATTERN,
5202                                    &test_pattern);
5203         if (status <= 0) {
5204                 drm_dbg_kms(&i915->drm, "Test pattern read failed\n");
5205                 return DP_TEST_NAK;
5206         }
5207         if (test_pattern != DP_COLOR_RAMP)
5208                 return DP_TEST_NAK;
5209
5210         status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_H_WIDTH_HI,
5211                                   &h_width, 2);
5212         if (status <= 0) {
5213                 drm_dbg_kms(&i915->drm, "H Width read failed\n");
5214                 return DP_TEST_NAK;
5215         }
5216
5217         status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_V_HEIGHT_HI,
5218                                   &v_height, 2);
5219         if (status <= 0) {
5220                 drm_dbg_kms(&i915->drm, "V Height read failed\n");
5221                 return DP_TEST_NAK;
5222         }
5223
5224         status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_MISC0,
5225                                    &test_misc);
5226         if (status <= 0) {
5227                 drm_dbg_kms(&i915->drm, "TEST MISC read failed\n");
5228                 return DP_TEST_NAK;
5229         }
5230         if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB)
5231                 return DP_TEST_NAK;
5232         if (test_misc & DP_TEST_DYNAMIC_RANGE_CEA)
5233                 return DP_TEST_NAK;
5234         switch (test_misc & DP_TEST_BIT_DEPTH_MASK) {
5235         case DP_TEST_BIT_DEPTH_6:
5236                 intel_dp->compliance.test_data.bpc = 6;
5237                 break;
5238         case DP_TEST_BIT_DEPTH_8:
5239                 intel_dp->compliance.test_data.bpc = 8;
5240                 break;
5241         default:
5242                 return DP_TEST_NAK;
5243         }
5244
5245         intel_dp->compliance.test_data.video_pattern = test_pattern;
5246         intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width);
5247         intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height);
5248         /* Set test active flag here so userspace doesn't interrupt things */
5249         intel_dp->compliance.test_active = true;
5250
5251         return DP_TEST_ACK;
5252 }
5253
5254 static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp)
5255 {
5256         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5257         u8 test_result = DP_TEST_ACK;
5258         struct intel_connector *intel_connector = intel_dp->attached_connector;
5259         struct drm_connector *connector = &intel_connector->base;
5260
5261         if (intel_connector->detect_edid == NULL ||
5262             connector->edid_corrupt ||
5263             intel_dp->aux.i2c_defer_count > 6) {
5264                 /* Check EDID read for NACKs, DEFERs and corruption
5265                  * (DP CTS 1.2 Core r1.1)
5266                  *    4.2.2.4 : Failed EDID read, I2C_NAK
5267                  *    4.2.2.5 : Failed EDID read, I2C_DEFER
5268                  *    4.2.2.6 : EDID corruption detected
5269                  * Use failsafe mode for all cases
5270                  */
5271                 if (intel_dp->aux.i2c_nack_count > 0 ||
5272                         intel_dp->aux.i2c_defer_count > 0)
5273                         drm_dbg_kms(&i915->drm,
5274                                     "EDID read had %d NACKs, %d DEFERs\n",
5275                                     intel_dp->aux.i2c_nack_count,
5276                                     intel_dp->aux.i2c_defer_count);
5277                 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE;
5278         } else {
5279                 struct edid *block = intel_connector->detect_edid;
5280
5281                 /* We have to write the checksum
5282                  * of the last block read
5283                  */
5284                 block += intel_connector->detect_edid->extensions;
5285
5286                 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_EDID_CHECKSUM,
5287                                        block->checksum) <= 0)
5288                         drm_dbg_kms(&i915->drm,
5289                                     "Failed to write EDID checksum\n");
5290
5291                 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
5292                 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED;
5293         }
5294
5295         /* Set test active flag here so userspace doesn't interrupt things */
5296         intel_dp->compliance.test_active = true;
5297
5298         return test_result;
5299 }
5300
5301 static u8 intel_dp_prepare_phytest(struct intel_dp *intel_dp)
5302 {
5303         struct drm_dp_phy_test_params *data =
5304                 &intel_dp->compliance.test_data.phytest;
5305
5306         if (drm_dp_get_phy_test_pattern(&intel_dp->aux, data)) {
5307                 DRM_DEBUG_KMS("DP Phy Test pattern AUX read failure\n");
5308                 return DP_TEST_NAK;
5309         }
5310
5311         /*
5312          * link_mst is set to false to avoid executing mst related code
5313          * during compliance testing.
5314          */
5315         intel_dp->link_mst = false;
5316
5317         return DP_TEST_ACK;
5318 }
5319
5320 static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp)
5321 {
5322         struct drm_i915_private *dev_priv =
5323                         to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
5324         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5325         struct drm_dp_phy_test_params *data =
5326                         &intel_dp->compliance.test_data.phytest;
5327         struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
5328         enum pipe pipe = crtc->pipe;
5329         u32 pattern_val;
5330
5331         switch (data->phy_pattern) {
5332         case DP_PHY_TEST_PATTERN_NONE:
5333                 DRM_DEBUG_KMS("Disable Phy Test Pattern\n");
5334                 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 0x0);
5335                 break;
5336         case DP_PHY_TEST_PATTERN_D10_2:
5337                 DRM_DEBUG_KMS("Set D10.2 Phy Test Pattern\n");
5338                 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
5339                                DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_D10_2);
5340                 break;
5341         case DP_PHY_TEST_PATTERN_ERROR_COUNT:
5342                 DRM_DEBUG_KMS("Set Error Count Phy Test Pattern\n");
5343                 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
5344                                DDI_DP_COMP_CTL_ENABLE |
5345                                DDI_DP_COMP_CTL_SCRAMBLED_0);
5346                 break;
5347         case DP_PHY_TEST_PATTERN_PRBS7:
5348                 DRM_DEBUG_KMS("Set PRBS7 Phy Test Pattern\n");
5349                 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
5350                                DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_PRBS7);
5351                 break;
5352         case DP_PHY_TEST_PATTERN_80BIT_CUSTOM:
5353                 /*
5354                  * FIXME: Ideally pattern should come from DPCD 0x250. As
5355                  * current firmware of DPR-100 could not set it, so hardcoding
5356                  * now for complaince test.
5357                  */
5358                 DRM_DEBUG_KMS("Set 80Bit Custom Phy Test Pattern 0x3e0f83e0 0x0f83e0f8 0x0000f83e\n");
5359                 pattern_val = 0x3e0f83e0;
5360                 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 0), pattern_val);
5361                 pattern_val = 0x0f83e0f8;
5362                 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 1), pattern_val);
5363                 pattern_val = 0x0000f83e;
5364                 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 2), pattern_val);
5365                 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
5366                                DDI_DP_COMP_CTL_ENABLE |
5367                                DDI_DP_COMP_CTL_CUSTOM80);
5368                 break;
5369         case DP_PHY_TEST_PATTERN_CP2520:
5370                 /*
5371                  * FIXME: Ideally pattern should come from DPCD 0x24A. As
5372                  * current firmware of DPR-100 could not set it, so hardcoding
5373                  * now for complaince test.
5374                  */
5375                 DRM_DEBUG_KMS("Set HBR2 compliance Phy Test Pattern\n");
5376                 pattern_val = 0xFB;
5377                 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
5378                                DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_HBR2 |
5379                                pattern_val);
5380                 break;
5381         default:
5382                 WARN(1, "Invalid Phy Test Pattern\n");
5383         }
5384 }
5385
5386 static void
5387 intel_dp_autotest_phy_ddi_disable(struct intel_dp *intel_dp)
5388 {
5389         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5390         struct drm_device *dev = dig_port->base.base.dev;
5391         struct drm_i915_private *dev_priv = to_i915(dev);
5392         struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
5393         enum pipe pipe = crtc->pipe;
5394         u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value;
5395
5396         trans_ddi_func_ctl_value = intel_de_read(dev_priv,
5397                                                  TRANS_DDI_FUNC_CTL(pipe));
5398         trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe));
5399         dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe));
5400
5401         trans_ddi_func_ctl_value &= ~(TRANS_DDI_FUNC_ENABLE |
5402                                       TGL_TRANS_DDI_PORT_MASK);
5403         trans_conf_value &= ~PIPECONF_ENABLE;
5404         dp_tp_ctl_value &= ~DP_TP_CTL_ENABLE;
5405
5406         intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value);
5407         intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe),
5408                        trans_ddi_func_ctl_value);
5409         intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value);
5410 }
5411
5412 static void
5413 intel_dp_autotest_phy_ddi_enable(struct intel_dp *intel_dp, uint8_t lane_cnt)
5414 {
5415         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5416         struct drm_device *dev = dig_port->base.base.dev;
5417         struct drm_i915_private *dev_priv = to_i915(dev);
5418         enum port port = dig_port->base.port;
5419         struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
5420         enum pipe pipe = crtc->pipe;
5421         u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value;
5422
5423         trans_ddi_func_ctl_value = intel_de_read(dev_priv,
5424                                                  TRANS_DDI_FUNC_CTL(pipe));
5425         trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe));
5426         dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe));
5427
5428         trans_ddi_func_ctl_value |= TRANS_DDI_FUNC_ENABLE |
5429                                     TGL_TRANS_DDI_SELECT_PORT(port);
5430         trans_conf_value |= PIPECONF_ENABLE;
5431         dp_tp_ctl_value |= DP_TP_CTL_ENABLE;
5432
5433         intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value);
5434         intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value);
5435         intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe),
5436                        trans_ddi_func_ctl_value);
5437 }
5438
5439 void intel_dp_process_phy_request(struct intel_dp *intel_dp)
5440 {
5441         struct drm_dp_phy_test_params *data =
5442                 &intel_dp->compliance.test_data.phytest;
5443         u8 link_status[DP_LINK_STATUS_SIZE];
5444
5445         if (!intel_dp_get_link_status(intel_dp, link_status)) {
5446                 DRM_DEBUG_KMS("failed to get link status\n");
5447                 return;
5448         }
5449
5450         /* retrieve vswing & pre-emphasis setting */
5451         intel_dp_get_adjust_train(intel_dp, link_status);
5452
5453         intel_dp_autotest_phy_ddi_disable(intel_dp);
5454
5455         intel_dp_set_signal_levels(intel_dp);
5456
5457         intel_dp_phy_pattern_update(intel_dp);
5458
5459         intel_dp_autotest_phy_ddi_enable(intel_dp, data->num_lanes);
5460
5461         drm_dp_set_phy_test_pattern(&intel_dp->aux, data,
5462                                     link_status[DP_DPCD_REV]);
5463 }
5464
5465 static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
5466 {
5467         u8 test_result;
5468
5469         test_result = intel_dp_prepare_phytest(intel_dp);
5470         if (test_result != DP_TEST_ACK)
5471                 DRM_ERROR("Phy test preparation failed\n");
5472
5473         intel_dp_process_phy_request(intel_dp);
5474
5475         return test_result;
5476 }
5477
5478 static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
5479 {
5480         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5481         u8 response = DP_TEST_NAK;
5482         u8 request = 0;
5483         int status;
5484
5485         status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request);
5486         if (status <= 0) {
5487                 drm_dbg_kms(&i915->drm,
5488                             "Could not read test request from sink\n");
5489                 goto update_status;
5490         }
5491
5492         switch (request) {
5493         case DP_TEST_LINK_TRAINING:
5494                 drm_dbg_kms(&i915->drm, "LINK_TRAINING test requested\n");
5495                 response = intel_dp_autotest_link_training(intel_dp);
5496                 break;
5497         case DP_TEST_LINK_VIDEO_PATTERN:
5498                 drm_dbg_kms(&i915->drm, "TEST_PATTERN test requested\n");
5499                 response = intel_dp_autotest_video_pattern(intel_dp);
5500                 break;
5501         case DP_TEST_LINK_EDID_READ:
5502                 drm_dbg_kms(&i915->drm, "EDID test requested\n");
5503                 response = intel_dp_autotest_edid(intel_dp);
5504                 break;
5505         case DP_TEST_LINK_PHY_TEST_PATTERN:
5506                 drm_dbg_kms(&i915->drm, "PHY_PATTERN test requested\n");
5507                 response = intel_dp_autotest_phy_pattern(intel_dp);
5508                 break;
5509         default:
5510                 drm_dbg_kms(&i915->drm, "Invalid test request '%02x'\n",
5511                             request);
5512                 break;
5513         }
5514
5515         if (response & DP_TEST_ACK)
5516                 intel_dp->compliance.test_type = request;
5517
5518 update_status:
5519         status = drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, response);
5520         if (status <= 0)
5521                 drm_dbg_kms(&i915->drm,
5522                             "Could not write test response to sink\n");
5523 }
5524
5525 /**
5526  * intel_dp_check_mst_status - service any pending MST interrupts, check link status
5527  * @intel_dp: Intel DP struct
5528  *
5529  * Read any pending MST interrupts, call MST core to handle these and ack the
5530  * interrupts. Check if the main and AUX link state is ok.
5531  *
5532  * Returns:
5533  * - %true if pending interrupts were serviced (or no interrupts were
5534  *   pending) w/o detecting an error condition.
5535  * - %false if an error condition - like AUX failure or a loss of link - is
5536  *   detected, which needs servicing from the hotplug work.
5537  */
5538 static bool
5539 intel_dp_check_mst_status(struct intel_dp *intel_dp)
5540 {
5541         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5542         bool link_ok = true;
5543
5544         drm_WARN_ON_ONCE(&i915->drm, intel_dp->active_mst_links < 0);
5545
5546         for (;;) {
5547                 u8 esi[DP_DPRX_ESI_LEN] = {};
5548                 bool handled;
5549                 int retry;
5550
5551                 if (!intel_dp_get_sink_irq_esi(intel_dp, esi)) {
5552                         drm_dbg_kms(&i915->drm,
5553                                     "failed to get ESI - device may have failed\n");
5554                         link_ok = false;
5555
5556                         break;
5557                 }
5558
5559                 /* check link status - esi[10] = 0x200c */
5560                 if (intel_dp->active_mst_links > 0 && link_ok &&
5561                     !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
5562                         drm_dbg_kms(&i915->drm,
5563                                     "channel EQ not ok, retraining\n");
5564                         link_ok = false;
5565                 }
5566
5567                 drm_dbg_kms(&i915->drm, "got esi %3ph\n", esi);
5568
5569                 drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
5570                 if (!handled)
5571                         break;
5572
5573                 for (retry = 0; retry < 3; retry++) {
5574                         int wret;
5575
5576                         wret = drm_dp_dpcd_write(&intel_dp->aux,
5577                                                  DP_SINK_COUNT_ESI+1,
5578                                                  &esi[1], 3);
5579                         if (wret == 3)
5580                                 break;
5581                 }
5582         }
5583
5584         return link_ok;
5585 }
5586
5587 static bool
5588 intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
5589 {
5590         u8 link_status[DP_LINK_STATUS_SIZE];
5591
5592         if (!intel_dp->link_trained)
5593                 return false;
5594
5595         /*
5596          * While PSR source HW is enabled, it will control main-link sending
5597          * frames, enabling and disabling it so trying to do a retrain will fail
5598          * as the link would or not be on or it could mix training patterns
5599          * and frame data at the same time causing retrain to fail.
5600          * Also when exiting PSR, HW will retrain the link anyways fixing
5601          * any link status error.
5602          */
5603         if (intel_psr_enabled(intel_dp))
5604                 return false;
5605
5606         if (!intel_dp_get_link_status(intel_dp, link_status))
5607                 return false;
5608
5609         /*
5610          * Validate the cached values of intel_dp->link_rate and
5611          * intel_dp->lane_count before attempting to retrain.
5612          */
5613         if (!intel_dp_link_params_valid(intel_dp, intel_dp->link_rate,
5614                                         intel_dp->lane_count))
5615                 return false;
5616
5617         /* Retrain if Channel EQ or CR not ok */
5618         return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count);
5619 }
5620
5621 static bool intel_dp_has_connector(struct intel_dp *intel_dp,
5622                                    const struct drm_connector_state *conn_state)
5623 {
5624         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5625         struct intel_encoder *encoder;
5626         enum pipe pipe;
5627
5628         if (!conn_state->best_encoder)
5629                 return false;
5630
5631         /* SST */
5632         encoder = &dp_to_dig_port(intel_dp)->base;
5633         if (conn_state->best_encoder == &encoder->base)
5634                 return true;
5635
5636         /* MST */
5637         for_each_pipe(i915, pipe) {
5638                 encoder = &intel_dp->mst_encoders[pipe]->base;
5639                 if (conn_state->best_encoder == &encoder->base)
5640                         return true;
5641         }
5642
5643         return false;
5644 }
5645
5646 static int intel_dp_prep_link_retrain(struct intel_dp *intel_dp,
5647                                       struct drm_modeset_acquire_ctx *ctx,
5648                                       u32 *crtc_mask)
5649 {
5650         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5651         struct drm_connector_list_iter conn_iter;
5652         struct intel_connector *connector;
5653         int ret = 0;
5654
5655         *crtc_mask = 0;
5656
5657         if (!intel_dp_needs_link_retrain(intel_dp))
5658                 return 0;
5659
5660         drm_connector_list_iter_begin(&i915->drm, &conn_iter);
5661         for_each_intel_connector_iter(connector, &conn_iter) {
5662                 struct drm_connector_state *conn_state =
5663                         connector->base.state;
5664                 struct intel_crtc_state *crtc_state;
5665                 struct intel_crtc *crtc;
5666
5667                 if (!intel_dp_has_connector(intel_dp, conn_state))
5668                         continue;
5669
5670                 crtc = to_intel_crtc(conn_state->crtc);
5671                 if (!crtc)
5672                         continue;
5673
5674                 ret = drm_modeset_lock(&crtc->base.mutex, ctx);
5675                 if (ret)
5676                         break;
5677
5678                 crtc_state = to_intel_crtc_state(crtc->base.state);
5679
5680                 drm_WARN_ON(&i915->drm, !intel_crtc_has_dp_encoder(crtc_state));
5681
5682                 if (!crtc_state->hw.active)
5683                         continue;
5684
5685                 if (conn_state->commit &&
5686                     !try_wait_for_completion(&conn_state->commit->hw_done))
5687                         continue;
5688
5689                 *crtc_mask |= drm_crtc_mask(&crtc->base);
5690         }
5691         drm_connector_list_iter_end(&conn_iter);
5692
5693         if (!intel_dp_needs_link_retrain(intel_dp))
5694                 *crtc_mask = 0;
5695
5696         return ret;
5697 }
5698
5699 static bool intel_dp_is_connected(struct intel_dp *intel_dp)
5700 {
5701         struct intel_connector *connector = intel_dp->attached_connector;
5702
5703         return connector->base.status == connector_status_connected ||
5704                 intel_dp->is_mst;
5705 }
5706
5707 int intel_dp_retrain_link(struct intel_encoder *encoder,
5708                           struct drm_modeset_acquire_ctx *ctx)
5709 {
5710         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5711         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
5712         struct intel_crtc *crtc;
5713         u32 crtc_mask;
5714         int ret;
5715
5716         if (!intel_dp_is_connected(intel_dp))
5717                 return 0;
5718
5719         ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
5720                                ctx);
5721         if (ret)
5722                 return ret;
5723
5724         ret = intel_dp_prep_link_retrain(intel_dp, ctx, &crtc_mask);
5725         if (ret)
5726                 return ret;
5727
5728         if (crtc_mask == 0)
5729                 return 0;
5730
5731         drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] retraining link\n",
5732                     encoder->base.base.id, encoder->base.name);
5733
5734         for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) {
5735                 const struct intel_crtc_state *crtc_state =
5736                         to_intel_crtc_state(crtc->base.state);
5737
5738                 /* Suppress underruns caused by re-training */
5739                 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
5740                 if (crtc_state->has_pch_encoder)
5741                         intel_set_pch_fifo_underrun_reporting(dev_priv,
5742                                                               intel_crtc_pch_transcoder(crtc), false);
5743         }
5744
5745         intel_dp_start_link_train(intel_dp);
5746         intel_dp_stop_link_train(intel_dp);
5747
5748         for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) {
5749                 const struct intel_crtc_state *crtc_state =
5750                         to_intel_crtc_state(crtc->base.state);
5751
5752                 /* Keep underrun reporting disabled until things are stable */
5753                 intel_wait_for_vblank(dev_priv, crtc->pipe);
5754
5755                 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
5756                 if (crtc_state->has_pch_encoder)
5757                         intel_set_pch_fifo_underrun_reporting(dev_priv,
5758                                                               intel_crtc_pch_transcoder(crtc), true);
5759         }
5760
5761         return 0;
5762 }
5763
5764 /*
5765  * If display is now connected check links status,
5766  * there has been known issues of link loss triggering
5767  * long pulse.
5768  *
5769  * Some sinks (eg. ASUS PB287Q) seem to perform some
5770  * weird HPD ping pong during modesets. So we can apparently
5771  * end up with HPD going low during a modeset, and then
5772  * going back up soon after. And once that happens we must
5773  * retrain the link to get a picture. That's in case no
5774  * userspace component reacted to intermittent HPD dip.
5775  */
5776 static enum intel_hotplug_state
5777 intel_dp_hotplug(struct intel_encoder *encoder,
5778                  struct intel_connector *connector)
5779 {
5780         struct drm_modeset_acquire_ctx ctx;
5781         enum intel_hotplug_state state;
5782         int ret;
5783
5784         state = intel_encoder_hotplug(encoder, connector);
5785
5786         drm_modeset_acquire_init(&ctx, 0);
5787
5788         for (;;) {
5789                 ret = intel_dp_retrain_link(encoder, &ctx);
5790
5791                 if (ret == -EDEADLK) {
5792                         drm_modeset_backoff(&ctx);
5793                         continue;
5794                 }
5795
5796                 break;
5797         }
5798
5799         drm_modeset_drop_locks(&ctx);
5800         drm_modeset_acquire_fini(&ctx);
5801         drm_WARN(encoder->base.dev, ret,
5802                  "Acquiring modeset locks failed with %i\n", ret);
5803
5804         /*
5805          * Keeping it consistent with intel_ddi_hotplug() and
5806          * intel_hdmi_hotplug().
5807          */
5808         if (state == INTEL_HOTPLUG_UNCHANGED && !connector->hotplug_retries)
5809                 state = INTEL_HOTPLUG_RETRY;
5810
5811         return state;
5812 }
5813
5814 static void intel_dp_check_service_irq(struct intel_dp *intel_dp)
5815 {
5816         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5817         u8 val;
5818
5819         if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
5820                 return;
5821
5822         if (drm_dp_dpcd_readb(&intel_dp->aux,
5823                               DP_DEVICE_SERVICE_IRQ_VECTOR, &val) != 1 || !val)
5824                 return;
5825
5826         drm_dp_dpcd_writeb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, val);
5827
5828         if (val & DP_AUTOMATED_TEST_REQUEST)
5829                 intel_dp_handle_test_request(intel_dp);
5830
5831         if (val & DP_CP_IRQ)
5832                 intel_hdcp_handle_cp_irq(intel_dp->attached_connector);
5833
5834         if (val & DP_SINK_SPECIFIC_IRQ)
5835                 drm_dbg_kms(&i915->drm, "Sink specific irq unhandled\n");
5836 }
5837
5838 /*
5839  * According to DP spec
5840  * 5.1.2:
5841  *  1. Read DPCD
5842  *  2. Configure link according to Receiver Capabilities
5843  *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
5844  *  4. Check link status on receipt of hot-plug interrupt
5845  *
5846  * intel_dp_short_pulse -  handles short pulse interrupts
5847  * when full detection is not required.
5848  * Returns %true if short pulse is handled and full detection
5849  * is NOT required and %false otherwise.
5850  */
5851 static bool
5852 intel_dp_short_pulse(struct intel_dp *intel_dp)
5853 {
5854         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
5855         u8 old_sink_count = intel_dp->sink_count;
5856         bool ret;
5857
5858         /*
5859          * Clearing compliance test variables to allow capturing
5860          * of values for next automated test request.
5861          */
5862         memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
5863
5864         /*
5865          * Now read the DPCD to see if it's actually running
5866          * If the current value of sink count doesn't match with
5867          * the value that was stored earlier or dpcd read failed
5868          * we need to do full detection
5869          */
5870         ret = intel_dp_get_dpcd(intel_dp);
5871
5872         if ((old_sink_count != intel_dp->sink_count) || !ret) {
5873                 /* No need to proceed if we are going to do full detect */
5874                 return false;
5875         }
5876
5877         intel_dp_check_service_irq(intel_dp);
5878
5879         /* Handle CEC interrupts, if any */
5880         drm_dp_cec_irq(&intel_dp->aux);
5881
5882         /* defer to the hotplug work for link retraining if needed */
5883         if (intel_dp_needs_link_retrain(intel_dp))
5884                 return false;
5885
5886         intel_psr_short_pulse(intel_dp);
5887
5888         if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
5889                 drm_dbg_kms(&dev_priv->drm,
5890                             "Link Training Compliance Test requested\n");
5891                 /* Send a Hotplug Uevent to userspace to start modeset */
5892                 drm_kms_helper_hotplug_event(&dev_priv->drm);
5893         }
5894
5895         return true;
5896 }
5897
5898 /* XXX this is probably wrong for multiple downstream ports */
5899 static enum drm_connector_status
5900 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
5901 {
5902         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5903         struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
5904         u8 *dpcd = intel_dp->dpcd;
5905         u8 type;
5906
5907         if (drm_WARN_ON(&i915->drm, intel_dp_is_edp(intel_dp)))
5908                 return connector_status_connected;
5909
5910         if (lspcon->active)
5911                 lspcon_resume(lspcon);
5912
5913         if (!intel_dp_get_dpcd(intel_dp))
5914                 return connector_status_disconnected;
5915
5916         /* if there's no downstream port, we're done */
5917         if (!drm_dp_is_branch(dpcd))
5918                 return connector_status_connected;
5919
5920         /* If we're HPD-aware, SINK_COUNT changes dynamically */
5921         if (intel_dp_has_sink_count(intel_dp) &&
5922             intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
5923                 return intel_dp->sink_count ?
5924                 connector_status_connected : connector_status_disconnected;
5925         }
5926
5927         if (intel_dp_can_mst(intel_dp))
5928                 return connector_status_connected;
5929
5930         /* If no HPD, poke DDC gently */
5931         if (drm_probe_ddc(&intel_dp->aux.ddc))
5932                 return connector_status_connected;
5933
5934         /* Well we tried, say unknown for unreliable port types */
5935         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
5936                 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
5937                 if (type == DP_DS_PORT_TYPE_VGA ||
5938                     type == DP_DS_PORT_TYPE_NON_EDID)
5939                         return connector_status_unknown;
5940         } else {
5941                 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
5942                         DP_DWN_STRM_PORT_TYPE_MASK;
5943                 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
5944                     type == DP_DWN_STRM_PORT_TYPE_OTHER)
5945                         return connector_status_unknown;
5946         }
5947
5948         /* Anything else is out of spec, warn and ignore */
5949         drm_dbg_kms(&i915->drm, "Broken DP branch device, ignoring\n");
5950         return connector_status_disconnected;
5951 }
5952
5953 static enum drm_connector_status
5954 edp_detect(struct intel_dp *intel_dp)
5955 {
5956         return connector_status_connected;
5957 }
5958
5959 static bool ibx_digital_port_connected(struct intel_encoder *encoder)
5960 {
5961         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5962         u32 bit = dev_priv->hotplug.pch_hpd[encoder->hpd_pin];
5963
5964         return intel_de_read(dev_priv, SDEISR) & bit;
5965 }
5966
5967 static bool g4x_digital_port_connected(struct intel_encoder *encoder)
5968 {
5969         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5970         u32 bit;
5971
5972         switch (encoder->hpd_pin) {
5973         case HPD_PORT_B:
5974                 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
5975                 break;
5976         case HPD_PORT_C:
5977                 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
5978                 break;
5979         case HPD_PORT_D:
5980                 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
5981                 break;
5982         default:
5983                 MISSING_CASE(encoder->hpd_pin);
5984                 return false;
5985         }
5986
5987         return intel_de_read(dev_priv, PORT_HOTPLUG_STAT) & bit;
5988 }
5989
5990 static bool gm45_digital_port_connected(struct intel_encoder *encoder)
5991 {
5992         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5993         u32 bit;
5994
5995         switch (encoder->hpd_pin) {
5996         case HPD_PORT_B:
5997                 bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
5998                 break;
5999         case HPD_PORT_C:
6000                 bit = PORTC_HOTPLUG_LIVE_STATUS_GM45;
6001                 break;
6002         case HPD_PORT_D:
6003                 bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
6004                 break;
6005         default:
6006                 MISSING_CASE(encoder->hpd_pin);
6007                 return false;
6008         }
6009
6010         return intel_de_read(dev_priv, PORT_HOTPLUG_STAT) & bit;
6011 }
6012
6013 static bool ilk_digital_port_connected(struct intel_encoder *encoder)
6014 {
6015         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
6016         u32 bit = dev_priv->hotplug.hpd[encoder->hpd_pin];
6017
6018         return intel_de_read(dev_priv, DEISR) & bit;
6019 }
6020
6021 /*
6022  * intel_digital_port_connected - is the specified port connected?
6023  * @encoder: intel_encoder
6024  *
6025  * In cases where there's a connector physically connected but it can't be used
6026  * by our hardware we also return false, since the rest of the driver should
6027  * pretty much treat the port as disconnected. This is relevant for type-C
6028  * (starting on ICL) where there's ownership involved.
6029  *
6030  * Return %true if port is connected, %false otherwise.
6031  */
6032 bool intel_digital_port_connected(struct intel_encoder *encoder)
6033 {
6034         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
6035         struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
6036         bool is_connected = false;
6037         intel_wakeref_t wakeref;
6038
6039         with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref)
6040                 is_connected = dig_port->connected(encoder);
6041
6042         return is_connected;
6043 }
6044
6045 static struct edid *
6046 intel_dp_get_edid(struct intel_dp *intel_dp)
6047 {
6048         struct intel_connector *intel_connector = intel_dp->attached_connector;
6049
6050         /* use cached edid if we have one */
6051         if (intel_connector->edid) {
6052                 /* invalid edid */
6053                 if (IS_ERR(intel_connector->edid))
6054                         return NULL;
6055
6056                 return drm_edid_duplicate(intel_connector->edid);
6057         } else
6058                 return drm_get_edid(&intel_connector->base,
6059                                     &intel_dp->aux.ddc);
6060 }
6061
6062 static void
6063 intel_dp_set_edid(struct intel_dp *intel_dp)
6064 {
6065         struct intel_connector *intel_connector = intel_dp->attached_connector;
6066         struct edid *edid;
6067
6068         intel_dp_unset_edid(intel_dp);
6069         edid = intel_dp_get_edid(intel_dp);
6070         intel_connector->detect_edid = edid;
6071
6072         if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) {
6073                 intel_dp->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
6074                 intel_dp->has_audio = drm_detect_monitor_audio(edid);
6075         }
6076
6077         drm_dp_cec_set_edid(&intel_dp->aux, edid);
6078         intel_dp->edid_quirks = drm_dp_get_edid_quirks(edid);
6079 }
6080
6081 static void
6082 intel_dp_unset_edid(struct intel_dp *intel_dp)
6083 {
6084         struct intel_connector *intel_connector = intel_dp->attached_connector;
6085
6086         drm_dp_cec_unset_edid(&intel_dp->aux);
6087         kfree(intel_connector->detect_edid);
6088         intel_connector->detect_edid = NULL;
6089
6090         intel_dp->has_hdmi_sink = false;
6091         intel_dp->has_audio = false;
6092         intel_dp->edid_quirks = 0;
6093 }
6094
6095 static int
6096 intel_dp_detect(struct drm_connector *connector,
6097                 struct drm_modeset_acquire_ctx *ctx,
6098                 bool force)
6099 {
6100         struct drm_i915_private *dev_priv = to_i915(connector->dev);
6101         struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
6102         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
6103         struct intel_encoder *encoder = &dig_port->base;
6104         enum drm_connector_status status;
6105
6106         drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n",
6107                     connector->base.id, connector->name);
6108         drm_WARN_ON(&dev_priv->drm,
6109                     !drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
6110
6111         if (!INTEL_DISPLAY_ENABLED(dev_priv))
6112                 return connector_status_disconnected;
6113
6114         /* Can't disconnect eDP */
6115         if (intel_dp_is_edp(intel_dp))
6116                 status = edp_detect(intel_dp);
6117         else if (intel_digital_port_connected(encoder))
6118                 status = intel_dp_detect_dpcd(intel_dp);
6119         else
6120                 status = connector_status_disconnected;
6121
6122         if (status == connector_status_disconnected) {
6123                 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
6124                 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd));
6125
6126                 if (intel_dp->is_mst) {
6127                         drm_dbg_kms(&dev_priv->drm,
6128                                     "MST device may have disappeared %d vs %d\n",
6129                                     intel_dp->is_mst,
6130                                     intel_dp->mst_mgr.mst_state);
6131                         intel_dp->is_mst = false;
6132                         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
6133                                                         intel_dp->is_mst);
6134                 }
6135
6136                 goto out;
6137         }
6138
6139         /* Read DP Sink DSC Cap DPCD regs for DP v1.4 */
6140         if (INTEL_GEN(dev_priv) >= 11)
6141                 intel_dp_get_dsc_sink_cap(intel_dp);
6142
6143         intel_dp_configure_mst(intel_dp);
6144
6145         /*
6146          * TODO: Reset link params when switching to MST mode, until MST
6147          * supports link training fallback params.
6148          */
6149         if (intel_dp->reset_link_params || intel_dp->is_mst) {
6150                 /* Initial max link lane count */
6151                 intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp);
6152
6153                 /* Initial max link rate */
6154                 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
6155
6156                 intel_dp->reset_link_params = false;
6157         }
6158
6159         intel_dp_print_rates(intel_dp);
6160
6161         if (intel_dp->is_mst) {
6162                 /*
6163                  * If we are in MST mode then this connector
6164                  * won't appear connected or have anything
6165                  * with EDID on it
6166                  */
6167                 status = connector_status_disconnected;
6168                 goto out;
6169         }
6170
6171         /*
6172          * Some external monitors do not signal loss of link synchronization
6173          * with an IRQ_HPD, so force a link status check.
6174          */
6175         if (!intel_dp_is_edp(intel_dp)) {
6176                 int ret;
6177
6178                 ret = intel_dp_retrain_link(encoder, ctx);
6179                 if (ret)
6180                         return ret;
6181         }
6182
6183         /*
6184          * Clearing NACK and defer counts to get their exact values
6185          * while reading EDID which are required by Compliance tests
6186          * 4.2.2.4 and 4.2.2.5
6187          */
6188         intel_dp->aux.i2c_nack_count = 0;
6189         intel_dp->aux.i2c_defer_count = 0;
6190
6191         intel_dp_set_edid(intel_dp);
6192         if (intel_dp_is_edp(intel_dp) ||
6193             to_intel_connector(connector)->detect_edid)
6194                 status = connector_status_connected;
6195
6196         intel_dp_check_service_irq(intel_dp);
6197
6198 out:
6199         if (status != connector_status_connected && !intel_dp->is_mst)
6200                 intel_dp_unset_edid(intel_dp);
6201
6202         /*
6203          * Make sure the refs for power wells enabled during detect are
6204          * dropped to avoid a new detect cycle triggered by HPD polling.
6205          */
6206         intel_display_power_flush_work(dev_priv);
6207
6208         if (!intel_dp_is_edp(intel_dp))
6209                 drm_dp_set_subconnector_property(connector,
6210                                                  status,
6211                                                  intel_dp->dpcd,
6212                                                  intel_dp->downstream_ports);
6213         return status;
6214 }
6215
6216 static void
6217 intel_dp_force(struct drm_connector *connector)
6218 {
6219         struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
6220         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
6221         struct intel_encoder *intel_encoder = &dig_port->base;
6222         struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
6223         enum intel_display_power_domain aux_domain =
6224                 intel_aux_power_domain(dig_port);
6225         intel_wakeref_t wakeref;
6226
6227         drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n",
6228                     connector->base.id, connector->name);
6229         intel_dp_unset_edid(intel_dp);
6230
6231         if (connector->status != connector_status_connected)
6232                 return;
6233
6234         wakeref = intel_display_power_get(dev_priv, aux_domain);
6235
6236         intel_dp_set_edid(intel_dp);
6237
6238         intel_display_power_put(dev_priv, aux_domain, wakeref);
6239 }
6240
6241 static int intel_dp_get_modes(struct drm_connector *connector)
6242 {
6243         struct intel_connector *intel_connector = to_intel_connector(connector);
6244         struct edid *edid;
6245
6246         edid = intel_connector->detect_edid;
6247         if (edid) {
6248                 int ret = intel_connector_update_modes(connector, edid);
6249                 if (ret)
6250                         return ret;
6251         }
6252
6253         /* if eDP has no EDID, fall back to fixed mode */
6254         if (intel_dp_is_edp(intel_attached_dp(to_intel_connector(connector))) &&
6255             intel_connector->panel.fixed_mode) {
6256                 struct drm_display_mode *mode;
6257
6258                 mode = drm_mode_duplicate(connector->dev,
6259                                           intel_connector->panel.fixed_mode);
6260                 if (mode) {
6261                         drm_mode_probed_add(connector, mode);
6262                         return 1;
6263                 }
6264         }
6265
6266         return 0;
6267 }
6268
6269 static int
6270 intel_dp_connector_register(struct drm_connector *connector)
6271 {
6272         struct drm_i915_private *i915 = to_i915(connector->dev);
6273         struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
6274         int ret;
6275
6276         ret = intel_connector_register(connector);
6277         if (ret)
6278                 return ret;
6279
6280         drm_dbg_kms(&i915->drm, "registering %s bus for %s\n",
6281                     intel_dp->aux.name, connector->kdev->kobj.name);
6282
6283         intel_dp->aux.dev = connector->kdev;
6284         ret = drm_dp_aux_register(&intel_dp->aux);
6285         if (!ret)
6286                 drm_dp_cec_register_connector(&intel_dp->aux, connector);
6287         return ret;
6288 }
6289
6290 static void
6291 intel_dp_connector_unregister(struct drm_connector *connector)
6292 {
6293         struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
6294
6295         drm_dp_cec_unregister_connector(&intel_dp->aux);
6296         drm_dp_aux_unregister(&intel_dp->aux);
6297         intel_connector_unregister(connector);
6298 }
6299
6300 void intel_dp_encoder_flush_work(struct drm_encoder *encoder)
6301 {
6302         struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder));
6303         struct intel_dp *intel_dp = &dig_port->dp;
6304
6305         intel_dp_mst_encoder_cleanup(dig_port);
6306         if (intel_dp_is_edp(intel_dp)) {
6307                 intel_wakeref_t wakeref;
6308
6309                 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
6310                 /*
6311                  * vdd might still be enabled do to the delayed vdd off.
6312                  * Make sure vdd is actually turned off here.
6313                  */
6314                 with_pps_lock(intel_dp, wakeref)
6315                         edp_panel_vdd_off_sync(intel_dp);
6316
6317                 if (intel_dp->edp_notifier.notifier_call) {
6318                         unregister_reboot_notifier(&intel_dp->edp_notifier);
6319                         intel_dp->edp_notifier.notifier_call = NULL;
6320                 }
6321         }
6322
6323         intel_dp_aux_fini(intel_dp);
6324 }
6325
6326 static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
6327 {
6328         intel_dp_encoder_flush_work(encoder);
6329
6330         drm_encoder_cleanup(encoder);
6331         kfree(enc_to_dig_port(to_intel_encoder(encoder)));
6332 }
6333
6334 void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
6335 {
6336         struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
6337         intel_wakeref_t wakeref;
6338
6339         if (!intel_dp_is_edp(intel_dp))
6340                 return;
6341
6342         /*
6343          * vdd might still be enabled do to the delayed vdd off.
6344          * Make sure vdd is actually turned off here.
6345          */
6346         cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
6347         with_pps_lock(intel_dp, wakeref)
6348                 edp_panel_vdd_off_sync(intel_dp);
6349 }
6350
6351 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
6352 {
6353         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6354         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
6355
6356         lockdep_assert_held(&dev_priv->pps_mutex);
6357
6358         if (!edp_have_panel_vdd(intel_dp))
6359                 return;
6360
6361         /*
6362          * The VDD bit needs a power domain reference, so if the bit is
6363          * already enabled when we boot or resume, grab this reference and
6364          * schedule a vdd off, so we don't hold on to the reference
6365          * indefinitely.
6366          */
6367         drm_dbg_kms(&dev_priv->drm,
6368                     "VDD left on by BIOS, adjusting state tracking\n");
6369         intel_display_power_get(dev_priv, intel_aux_power_domain(dig_port));
6370
6371         edp_panel_vdd_schedule_off(intel_dp);
6372 }
6373
6374 static enum pipe vlv_active_pipe(struct intel_dp *intel_dp)
6375 {
6376         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6377         struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
6378         enum pipe pipe;
6379
6380         if (intel_dp_port_enabled(dev_priv, intel_dp->output_reg,
6381                                   encoder->port, &pipe))
6382                 return pipe;
6383
6384         return INVALID_PIPE;
6385 }
6386
6387 void intel_dp_encoder_reset(struct drm_encoder *encoder)
6388 {
6389         struct drm_i915_private *dev_priv = to_i915(encoder->dev);
6390         struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(encoder));
6391         struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
6392         intel_wakeref_t wakeref;
6393
6394         if (!HAS_DDI(dev_priv))
6395                 intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg);
6396
6397         if (lspcon->active)
6398                 lspcon_resume(lspcon);
6399
6400         intel_dp->reset_link_params = true;
6401
6402         if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
6403             !intel_dp_is_edp(intel_dp))
6404                 return;
6405
6406         with_pps_lock(intel_dp, wakeref) {
6407                 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6408                         intel_dp->active_pipe = vlv_active_pipe(intel_dp);
6409
6410                 if (intel_dp_is_edp(intel_dp)) {
6411                         /*
6412                          * Reinit the power sequencer, in case BIOS did
6413                          * something nasty with it.
6414                          */
6415                         intel_dp_pps_init(intel_dp);
6416                         intel_edp_panel_vdd_sanitize(intel_dp);
6417                 }
6418         }
6419 }
6420
6421 static int intel_modeset_tile_group(struct intel_atomic_state *state,
6422                                     int tile_group_id)
6423 {
6424         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6425         struct drm_connector_list_iter conn_iter;
6426         struct drm_connector *connector;
6427         int ret = 0;
6428
6429         drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
6430         drm_for_each_connector_iter(connector, &conn_iter) {
6431                 struct drm_connector_state *conn_state;
6432                 struct intel_crtc_state *crtc_state;
6433                 struct intel_crtc *crtc;
6434
6435                 if (!connector->has_tile ||
6436                     connector->tile_group->id != tile_group_id)
6437                         continue;
6438
6439                 conn_state = drm_atomic_get_connector_state(&state->base,
6440                                                             connector);
6441                 if (IS_ERR(conn_state)) {
6442                         ret = PTR_ERR(conn_state);
6443                         break;
6444                 }
6445
6446                 crtc = to_intel_crtc(conn_state->crtc);
6447
6448                 if (!crtc)
6449                         continue;
6450
6451                 crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
6452                 crtc_state->uapi.mode_changed = true;
6453
6454                 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
6455                 if (ret)
6456                         break;
6457         }
6458         drm_connector_list_iter_end(&conn_iter);
6459
6460         return ret;
6461 }
6462
6463 static int intel_modeset_affected_transcoders(struct intel_atomic_state *state, u8 transcoders)
6464 {
6465         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6466         struct intel_crtc *crtc;
6467
6468         if (transcoders == 0)
6469                 return 0;
6470
6471         for_each_intel_crtc(&dev_priv->drm, crtc) {
6472                 struct intel_crtc_state *crtc_state;
6473                 int ret;
6474
6475                 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
6476                 if (IS_ERR(crtc_state))
6477                         return PTR_ERR(crtc_state);
6478
6479                 if (!crtc_state->hw.enable)
6480                         continue;
6481
6482                 if (!(transcoders & BIT(crtc_state->cpu_transcoder)))
6483                         continue;
6484
6485                 crtc_state->uapi.mode_changed = true;
6486
6487                 ret = drm_atomic_add_affected_connectors(&state->base, &crtc->base);
6488                 if (ret)
6489                         return ret;
6490
6491                 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
6492                 if (ret)
6493                         return ret;
6494
6495                 transcoders &= ~BIT(crtc_state->cpu_transcoder);
6496         }
6497
6498         drm_WARN_ON(&dev_priv->drm, transcoders != 0);
6499
6500         return 0;
6501 }
6502
6503 static int intel_modeset_synced_crtcs(struct intel_atomic_state *state,
6504                                       struct drm_connector *connector)
6505 {
6506         const struct drm_connector_state *old_conn_state =
6507                 drm_atomic_get_old_connector_state(&state->base, connector);
6508         const struct intel_crtc_state *old_crtc_state;
6509         struct intel_crtc *crtc;
6510         u8 transcoders;
6511
6512         crtc = to_intel_crtc(old_conn_state->crtc);
6513         if (!crtc)
6514                 return 0;
6515
6516         old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
6517
6518         if (!old_crtc_state->hw.active)
6519                 return 0;
6520
6521         transcoders = old_crtc_state->sync_mode_slaves_mask;
6522         if (old_crtc_state->master_transcoder != INVALID_TRANSCODER)
6523                 transcoders |= BIT(old_crtc_state->master_transcoder);
6524
6525         return intel_modeset_affected_transcoders(state,
6526                                                   transcoders);
6527 }
6528
6529 static int intel_dp_connector_atomic_check(struct drm_connector *conn,
6530                                            struct drm_atomic_state *_state)
6531 {
6532         struct drm_i915_private *dev_priv = to_i915(conn->dev);
6533         struct intel_atomic_state *state = to_intel_atomic_state(_state);
6534         int ret;
6535
6536         ret = intel_digital_connector_atomic_check(conn, &state->base);
6537         if (ret)
6538                 return ret;
6539
6540         /*
6541          * We don't enable port sync on BDW due to missing w/as and
6542          * due to not having adjusted the modeset sequence appropriately.
6543          */
6544         if (INTEL_GEN(dev_priv) < 9)
6545                 return 0;
6546
6547         if (!intel_connector_needs_modeset(state, conn))
6548                 return 0;
6549
6550         if (conn->has_tile) {
6551                 ret = intel_modeset_tile_group(state, conn->tile_group->id);
6552                 if (ret)
6553                         return ret;
6554         }
6555
6556         return intel_modeset_synced_crtcs(state, conn);
6557 }
6558
6559 static const struct drm_connector_funcs intel_dp_connector_funcs = {
6560         .force = intel_dp_force,
6561         .fill_modes = drm_helper_probe_single_connector_modes,
6562         .atomic_get_property = intel_digital_connector_atomic_get_property,
6563         .atomic_set_property = intel_digital_connector_atomic_set_property,
6564         .late_register = intel_dp_connector_register,
6565         .early_unregister = intel_dp_connector_unregister,
6566         .destroy = intel_connector_destroy,
6567         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6568         .atomic_duplicate_state = intel_digital_connector_duplicate_state,
6569 };
6570
6571 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
6572         .detect_ctx = intel_dp_detect,
6573         .get_modes = intel_dp_get_modes,
6574         .mode_valid = intel_dp_mode_valid,
6575         .atomic_check = intel_dp_connector_atomic_check,
6576 };
6577
6578 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
6579         .reset = intel_dp_encoder_reset,
6580         .destroy = intel_dp_encoder_destroy,
6581 };
6582
6583 static bool intel_edp_have_power(struct intel_dp *intel_dp)
6584 {
6585         intel_wakeref_t wakeref;
6586         bool have_power = false;
6587
6588         with_pps_lock(intel_dp, wakeref) {
6589                 have_power = edp_have_panel_power(intel_dp) &&
6590                                                   edp_have_panel_vdd(intel_dp);
6591         }
6592
6593         return have_power;
6594 }
6595
6596 enum irqreturn
6597 intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd)
6598 {
6599         struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
6600         struct intel_dp *intel_dp = &dig_port->dp;
6601
6602         if (dig_port->base.type == INTEL_OUTPUT_EDP &&
6603             (long_hpd || !intel_edp_have_power(intel_dp))) {
6604                 /*
6605                  * vdd off can generate a long/short pulse on eDP which
6606                  * would require vdd on to handle it, and thus we
6607                  * would end up in an endless cycle of
6608                  * "vdd off -> long/short hpd -> vdd on -> detect -> vdd off -> ..."
6609                  */
6610                 drm_dbg_kms(&i915->drm,
6611                             "ignoring %s hpd on eDP [ENCODER:%d:%s]\n",
6612                             long_hpd ? "long" : "short",
6613                             dig_port->base.base.base.id,
6614                             dig_port->base.base.name);
6615                 return IRQ_HANDLED;
6616         }
6617
6618         drm_dbg_kms(&i915->drm, "got hpd irq on [ENCODER:%d:%s] - %s\n",
6619                     dig_port->base.base.base.id,
6620                     dig_port->base.base.name,
6621                     long_hpd ? "long" : "short");
6622
6623         if (long_hpd) {
6624                 intel_dp->reset_link_params = true;
6625                 return IRQ_NONE;
6626         }
6627
6628         if (intel_dp->is_mst) {
6629                 if (!intel_dp_check_mst_status(intel_dp))
6630                         return IRQ_NONE;
6631         } else if (!intel_dp_short_pulse(intel_dp)) {
6632                 return IRQ_NONE;
6633         }
6634
6635         return IRQ_HANDLED;
6636 }
6637
6638 /* check the VBT to see whether the eDP is on another port */
6639 bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
6640 {
6641         /*
6642          * eDP not supported on g4x. so bail out early just
6643          * for a bit extra safety in case the VBT is bonkers.
6644          */
6645         if (INTEL_GEN(dev_priv) < 5)
6646                 return false;
6647
6648         if (INTEL_GEN(dev_priv) < 9 && port == PORT_A)
6649                 return true;
6650
6651         return intel_bios_is_port_edp(dev_priv, port);
6652 }
6653
6654 static void
6655 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
6656 {
6657         struct drm_i915_private *dev_priv = to_i915(connector->dev);
6658         enum port port = dp_to_dig_port(intel_dp)->base.port;
6659
6660         if (!intel_dp_is_edp(intel_dp))
6661                 drm_connector_attach_dp_subconnector_property(connector);
6662
6663         if (!IS_G4X(dev_priv) && port != PORT_A)
6664                 intel_attach_force_audio_property(connector);
6665
6666         intel_attach_broadcast_rgb_property(connector);
6667         if (HAS_GMCH(dev_priv))
6668                 drm_connector_attach_max_bpc_property(connector, 6, 10);
6669         else if (INTEL_GEN(dev_priv) >= 5)
6670                 drm_connector_attach_max_bpc_property(connector, 6, 12);
6671
6672         intel_attach_colorspace_property(connector);
6673
6674         if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 11)
6675                 drm_object_attach_property(&connector->base,
6676                                            connector->dev->mode_config.hdr_output_metadata_property,
6677                                            0);
6678
6679         if (intel_dp_is_edp(intel_dp)) {
6680                 u32 allowed_scalers;
6681
6682                 allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN);
6683                 if (!HAS_GMCH(dev_priv))
6684                         allowed_scalers |= BIT(DRM_MODE_SCALE_CENTER);
6685
6686                 drm_connector_attach_scaling_mode_property(connector, allowed_scalers);
6687
6688                 connector->state->scaling_mode = DRM_MODE_SCALE_ASPECT;
6689
6690         }
6691 }
6692
6693 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
6694 {
6695         intel_dp->panel_power_off_time = ktime_get_boottime();
6696         intel_dp->last_power_on = jiffies;
6697         intel_dp->last_backlight_off = jiffies;
6698 }
6699
6700 static void
6701 intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
6702 {
6703         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6704         u32 pp_on, pp_off, pp_ctl;
6705         struct pps_registers regs;
6706
6707         intel_pps_get_registers(intel_dp, &regs);
6708
6709         pp_ctl = ilk_get_pp_control(intel_dp);
6710
6711         /* Ensure PPS is unlocked */
6712         if (!HAS_DDI(dev_priv))
6713                 intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl);
6714
6715         pp_on = intel_de_read(dev_priv, regs.pp_on);
6716         pp_off = intel_de_read(dev_priv, regs.pp_off);
6717
6718         /* Pull timing values out of registers */
6719         seq->t1_t3 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on);
6720         seq->t8 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, pp_on);
6721         seq->t9 = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, pp_off);
6722         seq->t10 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, pp_off);
6723
6724         if (i915_mmio_reg_valid(regs.pp_div)) {
6725                 u32 pp_div;
6726
6727                 pp_div = intel_de_read(dev_priv, regs.pp_div);
6728
6729                 seq->t11_t12 = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div) * 1000;
6730         } else {
6731                 seq->t11_t12 = REG_FIELD_GET(BXT_POWER_CYCLE_DELAY_MASK, pp_ctl) * 1000;
6732         }
6733 }
6734
6735 static void
6736 intel_pps_dump_state(const char *state_name, const struct edp_power_seq *seq)
6737 {
6738         DRM_DEBUG_KMS("%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
6739                       state_name,
6740                       seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12);
6741 }
6742
6743 static void
6744 intel_pps_verify_state(struct intel_dp *intel_dp)
6745 {
6746         struct edp_power_seq hw;
6747         struct edp_power_seq *sw = &intel_dp->pps_delays;
6748
6749         intel_pps_readout_hw_state(intel_dp, &hw);
6750
6751         if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 ||
6752             hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) {
6753                 DRM_ERROR("PPS state mismatch\n");
6754                 intel_pps_dump_state("sw", sw);
6755                 intel_pps_dump_state("hw", &hw);
6756         }
6757 }
6758
6759 static void
6760 intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp)
6761 {
6762         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6763         struct edp_power_seq cur, vbt, spec,
6764                 *final = &intel_dp->pps_delays;
6765
6766         lockdep_assert_held(&dev_priv->pps_mutex);
6767
6768         /* already initialized? */
6769         if (final->t11_t12 != 0)
6770                 return;
6771
6772         intel_pps_readout_hw_state(intel_dp, &cur);
6773
6774         intel_pps_dump_state("cur", &cur);
6775
6776         vbt = dev_priv->vbt.edp.pps;
6777         /* On Toshiba Satellite P50-C-18C system the VBT T12 delay
6778          * of 500ms appears to be too short. Ocassionally the panel
6779          * just fails to power back on. Increasing the delay to 800ms
6780          * seems sufficient to avoid this problem.
6781          */
6782         if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) {
6783                 vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10);
6784                 drm_dbg_kms(&dev_priv->drm,
6785                             "Increasing T12 panel delay as per the quirk to %d\n",
6786                             vbt.t11_t12);
6787         }
6788         /* T11_T12 delay is special and actually in units of 100ms, but zero
6789          * based in the hw (so we need to add 100 ms). But the sw vbt
6790          * table multiplies it with 1000 to make it in units of 100usec,
6791          * too. */
6792         vbt.t11_t12 += 100 * 10;
6793
6794         /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
6795          * our hw here, which are all in 100usec. */
6796         spec.t1_t3 = 210 * 10;
6797         spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
6798         spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
6799         spec.t10 = 500 * 10;
6800         /* This one is special and actually in units of 100ms, but zero
6801          * based in the hw (so we need to add 100 ms). But the sw vbt
6802          * table multiplies it with 1000 to make it in units of 100usec,
6803          * too. */
6804         spec.t11_t12 = (510 + 100) * 10;
6805
6806         intel_pps_dump_state("vbt", &vbt);
6807
6808         /* Use the max of the register settings and vbt. If both are
6809          * unset, fall back to the spec limits. */
6810 #define assign_final(field)     final->field = (max(cur.field, vbt.field) == 0 ? \
6811                                        spec.field : \
6812                                        max(cur.field, vbt.field))
6813         assign_final(t1_t3);
6814         assign_final(t8);
6815         assign_final(t9);
6816         assign_final(t10);
6817         assign_final(t11_t12);
6818 #undef assign_final
6819
6820 #define get_delay(field)        (DIV_ROUND_UP(final->field, 10))
6821         intel_dp->panel_power_up_delay = get_delay(t1_t3);
6822         intel_dp->backlight_on_delay = get_delay(t8);
6823         intel_dp->backlight_off_delay = get_delay(t9);
6824         intel_dp->panel_power_down_delay = get_delay(t10);
6825         intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
6826 #undef get_delay
6827
6828         drm_dbg_kms(&dev_priv->drm,
6829                     "panel power up delay %d, power down delay %d, power cycle delay %d\n",
6830                     intel_dp->panel_power_up_delay,
6831                     intel_dp->panel_power_down_delay,
6832                     intel_dp->panel_power_cycle_delay);
6833
6834         drm_dbg_kms(&dev_priv->drm, "backlight on delay %d, off delay %d\n",
6835                     intel_dp->backlight_on_delay,
6836                     intel_dp->backlight_off_delay);
6837
6838         /*
6839          * We override the HW backlight delays to 1 because we do manual waits
6840          * on them. For T8, even BSpec recommends doing it. For T9, if we
6841          * don't do this, we'll end up waiting for the backlight off delay
6842          * twice: once when we do the manual sleep, and once when we disable
6843          * the panel and wait for the PP_STATUS bit to become zero.
6844          */
6845         final->t8 = 1;
6846         final->t9 = 1;
6847
6848         /*
6849          * HW has only a 100msec granularity for t11_t12 so round it up
6850          * accordingly.
6851          */
6852         final->t11_t12 = roundup(final->t11_t12, 100 * 10);
6853 }
6854
6855 static void
6856 intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
6857                                               bool force_disable_vdd)
6858 {
6859         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6860         u32 pp_on, pp_off, port_sel = 0;
6861         int div = RUNTIME_INFO(dev_priv)->rawclk_freq / 1000;
6862         struct pps_registers regs;
6863         enum port port = dp_to_dig_port(intel_dp)->base.port;
6864         const struct edp_power_seq *seq = &intel_dp->pps_delays;
6865
6866         lockdep_assert_held(&dev_priv->pps_mutex);
6867
6868         intel_pps_get_registers(intel_dp, &regs);
6869
6870         /*
6871          * On some VLV machines the BIOS can leave the VDD
6872          * enabled even on power sequencers which aren't
6873          * hooked up to any port. This would mess up the
6874          * power domain tracking the first time we pick
6875          * one of these power sequencers for use since
6876          * edp_panel_vdd_on() would notice that the VDD was
6877          * already on and therefore wouldn't grab the power
6878          * domain reference. Disable VDD first to avoid this.
6879          * This also avoids spuriously turning the VDD on as
6880          * soon as the new power sequencer gets initialized.
6881          */
6882         if (force_disable_vdd) {
6883                 u32 pp = ilk_get_pp_control(intel_dp);
6884
6885                 drm_WARN(&dev_priv->drm, pp & PANEL_POWER_ON,
6886                          "Panel power already on\n");
6887
6888                 if (pp & EDP_FORCE_VDD)
6889                         drm_dbg_kms(&dev_priv->drm,
6890                                     "VDD already on, disabling first\n");
6891
6892                 pp &= ~EDP_FORCE_VDD;
6893
6894                 intel_de_write(dev_priv, regs.pp_ctrl, pp);
6895         }
6896
6897         pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->t1_t3) |
6898                 REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, seq->t8);
6899         pp_off = REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, seq->t9) |
6900                 REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, seq->t10);
6901
6902         /* Haswell doesn't have any port selection bits for the panel
6903          * power sequencer any more. */
6904         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
6905                 port_sel = PANEL_PORT_SELECT_VLV(port);
6906         } else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
6907                 switch (port) {
6908                 case PORT_A:
6909                         port_sel = PANEL_PORT_SELECT_DPA;
6910                         break;
6911                 case PORT_C:
6912                         port_sel = PANEL_PORT_SELECT_DPC;
6913                         break;
6914                 case PORT_D:
6915                         port_sel = PANEL_PORT_SELECT_DPD;
6916                         break;
6917                 default:
6918                         MISSING_CASE(port);
6919                         break;
6920                 }
6921         }
6922
6923         pp_on |= port_sel;
6924
6925         intel_de_write(dev_priv, regs.pp_on, pp_on);
6926         intel_de_write(dev_priv, regs.pp_off, pp_off);
6927
6928         /*
6929          * Compute the divisor for the pp clock, simply match the Bspec formula.
6930          */
6931         if (i915_mmio_reg_valid(regs.pp_div)) {
6932                 intel_de_write(dev_priv, regs.pp_div,
6933                                REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)));
6934         } else {
6935                 u32 pp_ctl;
6936
6937                 pp_ctl = intel_de_read(dev_priv, regs.pp_ctrl);
6938                 pp_ctl &= ~BXT_POWER_CYCLE_DELAY_MASK;
6939                 pp_ctl |= REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000));
6940                 intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl);
6941         }
6942
6943         drm_dbg_kms(&dev_priv->drm,
6944                     "panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
6945                     intel_de_read(dev_priv, regs.pp_on),
6946                     intel_de_read(dev_priv, regs.pp_off),
6947                     i915_mmio_reg_valid(regs.pp_div) ?
6948                     intel_de_read(dev_priv, regs.pp_div) :
6949                     (intel_de_read(dev_priv, regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK));
6950 }
6951
6952 static void intel_dp_pps_init(struct intel_dp *intel_dp)
6953 {
6954         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6955
6956         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
6957                 vlv_initial_power_sequencer_setup(intel_dp);
6958         } else {
6959                 intel_dp_init_panel_power_sequencer(intel_dp);
6960                 intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
6961         }
6962 }
6963
6964 /**
6965  * intel_dp_set_drrs_state - program registers for RR switch to take effect
6966  * @dev_priv: i915 device
6967  * @crtc_state: a pointer to the active intel_crtc_state
6968  * @refresh_rate: RR to be programmed
6969  *
6970  * This function gets called when refresh rate (RR) has to be changed from
6971  * one frequency to another. Switches can be between high and low RR
6972  * supported by the panel or to any other RR based on media playback (in
6973  * this case, RR value needs to be passed from user space).
6974  *
6975  * The caller of this function needs to take a lock on dev_priv->drrs.
6976  */
6977 static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
6978                                     const struct intel_crtc_state *crtc_state,
6979                                     int refresh_rate)
6980 {
6981         struct intel_dp *intel_dp = dev_priv->drrs.dp;
6982         struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
6983         enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
6984
6985         if (refresh_rate <= 0) {
6986                 drm_dbg_kms(&dev_priv->drm,
6987                             "Refresh rate should be positive non-zero.\n");
6988                 return;
6989         }
6990
6991         if (intel_dp == NULL) {
6992                 drm_dbg_kms(&dev_priv->drm, "DRRS not supported.\n");
6993                 return;
6994         }
6995
6996         if (!intel_crtc) {
6997                 drm_dbg_kms(&dev_priv->drm,
6998                             "DRRS: intel_crtc not initialized\n");
6999                 return;
7000         }
7001
7002         if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
7003                 drm_dbg_kms(&dev_priv->drm, "Only Seamless DRRS supported.\n");
7004                 return;
7005         }
7006
7007         if (drm_mode_vrefresh(intel_dp->attached_connector->panel.downclock_mode) ==
7008                         refresh_rate)
7009                 index = DRRS_LOW_RR;
7010
7011         if (index == dev_priv->drrs.refresh_rate_type) {
7012                 drm_dbg_kms(&dev_priv->drm,
7013                             "DRRS requested for previously set RR...ignoring\n");
7014                 return;
7015         }
7016
7017         if (!crtc_state->hw.active) {
7018                 drm_dbg_kms(&dev_priv->drm,
7019                             "eDP encoder disabled. CRTC not Active\n");
7020                 return;
7021         }
7022
7023         if (INTEL_GEN(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) {
7024                 switch (index) {
7025                 case DRRS_HIGH_RR:
7026                         intel_dp_set_m_n(crtc_state, M1_N1);
7027                         break;
7028                 case DRRS_LOW_RR:
7029                         intel_dp_set_m_n(crtc_state, M2_N2);
7030                         break;
7031                 case DRRS_MAX_RR:
7032                 default:
7033                         drm_err(&dev_priv->drm,
7034                                 "Unsupported refreshrate type\n");
7035                 }
7036         } else if (INTEL_GEN(dev_priv) > 6) {
7037                 i915_reg_t reg = PIPECONF(crtc_state->cpu_transcoder);
7038                 u32 val;
7039
7040                 val = intel_de_read(dev_priv, reg);
7041                 if (index > DRRS_HIGH_RR) {
7042                         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
7043                                 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
7044                         else
7045                                 val |= PIPECONF_EDP_RR_MODE_SWITCH;
7046                 } else {
7047                         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
7048                                 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
7049                         else
7050                                 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
7051                 }
7052                 intel_de_write(dev_priv, reg, val);
7053         }
7054
7055         dev_priv->drrs.refresh_rate_type = index;
7056
7057         drm_dbg_kms(&dev_priv->drm, "eDP Refresh Rate set to : %dHz\n",
7058                     refresh_rate);
7059 }
7060
7061 static void
7062 intel_edp_drrs_enable_locked(struct intel_dp *intel_dp)
7063 {
7064         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
7065
7066         dev_priv->drrs.busy_frontbuffer_bits = 0;
7067         dev_priv->drrs.dp = intel_dp;
7068 }
7069
7070 /**
7071  * intel_edp_drrs_enable - init drrs struct if supported
7072  * @intel_dp: DP struct
7073  * @crtc_state: A pointer to the active crtc state.
7074  *
7075  * Initializes frontbuffer_bits and drrs.dp
7076  */
7077 void intel_edp_drrs_enable(struct intel_dp *intel_dp,
7078                            const struct intel_crtc_state *crtc_state)
7079 {
7080         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
7081
7082         if (!crtc_state->has_drrs)
7083                 return;
7084
7085         drm_dbg_kms(&dev_priv->drm, "Enabling DRRS\n");
7086
7087         mutex_lock(&dev_priv->drrs.mutex);
7088
7089         if (dev_priv->drrs.dp) {
7090                 drm_warn(&dev_priv->drm, "DRRS already enabled\n");
7091                 goto unlock;
7092         }
7093
7094         intel_edp_drrs_enable_locked(intel_dp);
7095
7096 unlock:
7097         mutex_unlock(&dev_priv->drrs.mutex);
7098 }
7099
7100 static void
7101 intel_edp_drrs_disable_locked(struct intel_dp *intel_dp,
7102                               const struct intel_crtc_state *crtc_state)
7103 {
7104         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
7105
7106         if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) {
7107                 int refresh;
7108
7109                 refresh = drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode);
7110                 intel_dp_set_drrs_state(dev_priv, crtc_state, refresh);
7111         }
7112
7113         dev_priv->drrs.dp = NULL;
7114 }
7115
7116 /**
7117  * intel_edp_drrs_disable - Disable DRRS
7118  * @intel_dp: DP struct
7119  * @old_crtc_state: Pointer to old crtc_state.
7120  *
7121  */
7122 void intel_edp_drrs_disable(struct intel_dp *intel_dp,
7123                             const struct intel_crtc_state *old_crtc_state)
7124 {
7125         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
7126
7127         if (!old_crtc_state->has_drrs)
7128                 return;
7129
7130         mutex_lock(&dev_priv->drrs.mutex);
7131         if (!dev_priv->drrs.dp) {
7132                 mutex_unlock(&dev_priv->drrs.mutex);
7133                 return;
7134         }
7135
7136         intel_edp_drrs_disable_locked(intel_dp, old_crtc_state);
7137         mutex_unlock(&dev_priv->drrs.mutex);
7138
7139         cancel_delayed_work_sync(&dev_priv->drrs.work);
7140 }
7141
7142 /**
7143  * intel_edp_drrs_update - Update DRRS state
7144  * @intel_dp: Intel DP
7145  * @crtc_state: new CRTC state
7146  *
7147  * This function will update DRRS states, disabling or enabling DRRS when
7148  * executing fastsets. For full modeset, intel_edp_drrs_disable() and
7149  * intel_edp_drrs_enable() should be called instead.
7150  */
7151 void
7152 intel_edp_drrs_update(struct intel_dp *intel_dp,
7153                       const struct intel_crtc_state *crtc_state)
7154 {
7155         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
7156
7157         if (dev_priv->drrs.type != SEAMLESS_DRRS_SUPPORT)
7158                 return;
7159
7160         mutex_lock(&dev_priv->drrs.mutex);
7161
7162         /* New state matches current one? */
7163         if (crtc_state->has_drrs == !!dev_priv->drrs.dp)
7164                 goto unlock;
7165
7166         if (crtc_state->has_drrs)
7167                 intel_edp_drrs_enable_locked(intel_dp);
7168         else
7169                 intel_edp_drrs_disable_locked(intel_dp, crtc_state);
7170
7171 unlock:
7172         mutex_unlock(&dev_priv->drrs.mutex);
7173 }
7174
7175 static void intel_edp_drrs_downclock_work(struct work_struct *work)
7176 {
7177         struct drm_i915_private *dev_priv =
7178                 container_of(work, typeof(*dev_priv), drrs.work.work);
7179         struct intel_dp *intel_dp;
7180
7181         mutex_lock(&dev_priv->drrs.mutex);
7182
7183         intel_dp = dev_priv->drrs.dp;
7184
7185         if (!intel_dp)
7186                 goto unlock;
7187
7188         /*
7189          * The delayed work can race with an invalidate hence we need to
7190          * recheck.
7191          */
7192
7193         if (dev_priv->drrs.busy_frontbuffer_bits)
7194                 goto unlock;
7195
7196         if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR) {
7197                 struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
7198
7199                 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
7200                         drm_mode_vrefresh(intel_dp->attached_connector->panel.downclock_mode));
7201         }
7202
7203 unlock:
7204         mutex_unlock(&dev_priv->drrs.mutex);
7205 }
7206
7207 /**
7208  * intel_edp_drrs_invalidate - Disable Idleness DRRS
7209  * @dev_priv: i915 device
7210  * @frontbuffer_bits: frontbuffer plane tracking bits
7211  *
7212  * This function gets called everytime rendering on the given planes start.
7213  * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
7214  *
7215  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
7216  */
7217 void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
7218                                unsigned int frontbuffer_bits)
7219 {
7220         struct intel_dp *intel_dp;
7221         struct drm_crtc *crtc;
7222         enum pipe pipe;
7223
7224         if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
7225                 return;
7226
7227         cancel_delayed_work(&dev_priv->drrs.work);
7228
7229         mutex_lock(&dev_priv->drrs.mutex);
7230
7231         intel_dp = dev_priv->drrs.dp;
7232         if (!intel_dp) {
7233                 mutex_unlock(&dev_priv->drrs.mutex);
7234                 return;
7235         }
7236
7237         crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
7238         pipe = to_intel_crtc(crtc)->pipe;
7239
7240         frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
7241         dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
7242
7243         /* invalidate means busy screen hence upclock */
7244         if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
7245                 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
7246                                         drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode));
7247
7248         mutex_unlock(&dev_priv->drrs.mutex);
7249 }
7250
7251 /**
7252  * intel_edp_drrs_flush - Restart Idleness DRRS
7253  * @dev_priv: i915 device
7254  * @frontbuffer_bits: frontbuffer plane tracking bits
7255  *
7256  * This function gets called every time rendering on the given planes has
7257  * completed or flip on a crtc is completed. So DRRS should be upclocked
7258  * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
7259  * if no other planes are dirty.
7260  *
7261  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
7262  */
7263 void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
7264                           unsigned int frontbuffer_bits)
7265 {
7266         struct intel_dp *intel_dp;
7267         struct drm_crtc *crtc;
7268         enum pipe pipe;
7269
7270         if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
7271                 return;
7272
7273         cancel_delayed_work(&dev_priv->drrs.work);
7274
7275         mutex_lock(&dev_priv->drrs.mutex);
7276
7277         intel_dp = dev_priv->drrs.dp;
7278         if (!intel_dp) {
7279                 mutex_unlock(&dev_priv->drrs.mutex);
7280                 return;
7281         }
7282
7283         crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
7284         pipe = to_intel_crtc(crtc)->pipe;
7285
7286         frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
7287         dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
7288
7289         /* flush means busy screen hence upclock */
7290         if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
7291                 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
7292                                         drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode));
7293
7294         /*
7295          * flush also means no more activity hence schedule downclock, if all
7296          * other fbs are quiescent too
7297          */
7298         if (!dev_priv->drrs.busy_frontbuffer_bits)
7299                 schedule_delayed_work(&dev_priv->drrs.work,
7300                                 msecs_to_jiffies(1000));
7301         mutex_unlock(&dev_priv->drrs.mutex);
7302 }
7303
7304 /**
7305  * DOC: Display Refresh Rate Switching (DRRS)
7306  *
7307  * Display Refresh Rate Switching (DRRS) is a power conservation feature
7308  * which enables swtching between low and high refresh rates,
7309  * dynamically, based on the usage scenario. This feature is applicable
7310  * for internal panels.
7311  *
7312  * Indication that the panel supports DRRS is given by the panel EDID, which
7313  * would list multiple refresh rates for one resolution.
7314  *
7315  * DRRS is of 2 types - static and seamless.
7316  * Static DRRS involves changing refresh rate (RR) by doing a full modeset
7317  * (may appear as a blink on screen) and is used in dock-undock scenario.
7318  * Seamless DRRS involves changing RR without any visual effect to the user
7319  * and can be used during normal system usage. This is done by programming
7320  * certain registers.
7321  *
7322  * Support for static/seamless DRRS may be indicated in the VBT based on
7323  * inputs from the panel spec.
7324  *
7325  * DRRS saves power by switching to low RR based on usage scenarios.
7326  *
7327  * The implementation is based on frontbuffer tracking implementation.  When
7328  * there is a disturbance on the screen triggered by user activity or a periodic
7329  * system activity, DRRS is disabled (RR is changed to high RR).  When there is
7330  * no movement on screen, after a timeout of 1 second, a switch to low RR is
7331  * made.
7332  *
7333  * For integration with frontbuffer tracking code, intel_edp_drrs_invalidate()
7334  * and intel_edp_drrs_flush() are called.
7335  *
7336  * DRRS can be further extended to support other internal panels and also
7337  * the scenario of video playback wherein RR is set based on the rate
7338  * requested by userspace.
7339  */
7340
7341 /**
7342  * intel_dp_drrs_init - Init basic DRRS work and mutex.
7343  * @connector: eDP connector
7344  * @fixed_mode: preferred mode of panel
7345  *
7346  * This function is  called only once at driver load to initialize basic
7347  * DRRS stuff.
7348  *
7349  * Returns:
7350  * Downclock mode if panel supports it, else return NULL.
7351  * DRRS support is determined by the presence of downclock mode (apart
7352  * from VBT setting).
7353  */
7354 static struct drm_display_mode *
7355 intel_dp_drrs_init(struct intel_connector *connector,
7356                    struct drm_display_mode *fixed_mode)
7357 {
7358         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
7359         struct drm_display_mode *downclock_mode = NULL;
7360
7361         INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
7362         mutex_init(&dev_priv->drrs.mutex);
7363
7364         if (INTEL_GEN(dev_priv) <= 6) {
7365                 drm_dbg_kms(&dev_priv->drm,
7366                             "DRRS supported for Gen7 and above\n");
7367                 return NULL;
7368         }
7369
7370         if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
7371                 drm_dbg_kms(&dev_priv->drm, "VBT doesn't support DRRS\n");
7372                 return NULL;
7373         }
7374
7375         downclock_mode = intel_panel_edid_downclock_mode(connector, fixed_mode);
7376         if (!downclock_mode) {
7377                 drm_dbg_kms(&dev_priv->drm,
7378                             "Downclock mode is not found. DRRS not supported\n");
7379                 return NULL;
7380         }
7381
7382         dev_priv->drrs.type = dev_priv->vbt.drrs_type;
7383
7384         dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
7385         drm_dbg_kms(&dev_priv->drm,
7386                     "seamless DRRS supported for eDP panel.\n");
7387         return downclock_mode;
7388 }
7389
7390 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
7391                                      struct intel_connector *intel_connector)
7392 {
7393         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
7394         struct drm_device *dev = &dev_priv->drm;
7395         struct drm_connector *connector = &intel_connector->base;
7396         struct drm_display_mode *fixed_mode = NULL;
7397         struct drm_display_mode *downclock_mode = NULL;
7398         bool has_dpcd;
7399         enum pipe pipe = INVALID_PIPE;
7400         intel_wakeref_t wakeref;
7401         struct edid *edid;
7402
7403         if (!intel_dp_is_edp(intel_dp))
7404                 return true;
7405
7406         INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, edp_panel_vdd_work);
7407
7408         /*
7409          * On IBX/CPT we may get here with LVDS already registered. Since the
7410          * driver uses the only internal power sequencer available for both
7411          * eDP and LVDS bail out early in this case to prevent interfering
7412          * with an already powered-on LVDS power sequencer.
7413          */
7414         if (intel_get_lvds_encoder(dev_priv)) {
7415                 drm_WARN_ON(dev,
7416                             !(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
7417                 drm_info(&dev_priv->drm,
7418                          "LVDS was detected, not registering eDP\n");
7419
7420                 return false;
7421         }
7422
7423         with_pps_lock(intel_dp, wakeref) {
7424                 intel_dp_init_panel_power_timestamps(intel_dp);
7425                 intel_dp_pps_init(intel_dp);
7426                 intel_edp_panel_vdd_sanitize(intel_dp);
7427         }
7428
7429         /* Cache DPCD and EDID for edp. */
7430         has_dpcd = intel_edp_init_dpcd(intel_dp);
7431
7432         if (!has_dpcd) {
7433                 /* if this fails, presume the device is a ghost */
7434                 drm_info(&dev_priv->drm,
7435                          "failed to retrieve link info, disabling eDP\n");
7436                 goto out_vdd_off;
7437         }
7438
7439         mutex_lock(&dev->mode_config.mutex);
7440         edid = drm_get_edid(connector, &intel_dp->aux.ddc);
7441         if (edid) {
7442                 if (drm_add_edid_modes(connector, edid)) {
7443                         drm_connector_update_edid_property(connector, edid);
7444                         intel_dp->edid_quirks = drm_dp_get_edid_quirks(edid);
7445                 } else {
7446                         kfree(edid);
7447                         edid = ERR_PTR(-EINVAL);
7448                 }
7449         } else {
7450                 edid = ERR_PTR(-ENOENT);
7451         }
7452         intel_connector->edid = edid;
7453
7454         fixed_mode = intel_panel_edid_fixed_mode(intel_connector);
7455         if (fixed_mode)
7456                 downclock_mode = intel_dp_drrs_init(intel_connector, fixed_mode);
7457
7458         /* fallback to VBT if available for eDP */
7459         if (!fixed_mode)
7460                 fixed_mode = intel_panel_vbt_fixed_mode(intel_connector);
7461         mutex_unlock(&dev->mode_config.mutex);
7462
7463         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
7464                 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
7465                 register_reboot_notifier(&intel_dp->edp_notifier);
7466
7467                 /*
7468                  * Figure out the current pipe for the initial backlight setup.
7469                  * If the current pipe isn't valid, try the PPS pipe, and if that
7470                  * fails just assume pipe A.
7471                  */
7472                 pipe = vlv_active_pipe(intel_dp);
7473
7474                 if (pipe != PIPE_A && pipe != PIPE_B)
7475                         pipe = intel_dp->pps_pipe;
7476
7477                 if (pipe != PIPE_A && pipe != PIPE_B)
7478                         pipe = PIPE_A;
7479
7480                 drm_dbg_kms(&dev_priv->drm,
7481                             "using pipe %c for initial backlight setup\n",
7482                             pipe_name(pipe));
7483         }
7484
7485         intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
7486         intel_connector->panel.backlight.power = intel_edp_backlight_power;
7487         intel_panel_setup_backlight(connector, pipe);
7488
7489         if (fixed_mode) {
7490                 drm_connector_set_panel_orientation_with_quirk(connector,
7491                                 dev_priv->vbt.orientation,
7492                                 fixed_mode->hdisplay, fixed_mode->vdisplay);
7493         }
7494
7495         return true;
7496
7497 out_vdd_off:
7498         cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
7499         /*
7500          * vdd might still be enabled do to the delayed vdd off.
7501          * Make sure vdd is actually turned off here.
7502          */
7503         with_pps_lock(intel_dp, wakeref)
7504                 edp_panel_vdd_off_sync(intel_dp);
7505
7506         return false;
7507 }
7508
7509 static void intel_dp_modeset_retry_work_fn(struct work_struct *work)
7510 {
7511         struct intel_connector *intel_connector;
7512         struct drm_connector *connector;
7513
7514         intel_connector = container_of(work, typeof(*intel_connector),
7515                                        modeset_retry_work);
7516         connector = &intel_connector->base;
7517         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
7518                       connector->name);
7519
7520         /* Grab the locks before changing connector property*/
7521         mutex_lock(&connector->dev->mode_config.mutex);
7522         /* Set connector link status to BAD and send a Uevent to notify
7523          * userspace to do a modeset.
7524          */
7525         drm_connector_set_link_status_property(connector,
7526                                                DRM_MODE_LINK_STATUS_BAD);
7527         mutex_unlock(&connector->dev->mode_config.mutex);
7528         /* Send Hotplug uevent so userspace can reprobe */
7529         drm_kms_helper_hotplug_event(connector->dev);
7530 }
7531
7532 bool
7533 intel_dp_init_connector(struct intel_digital_port *dig_port,
7534                         struct intel_connector *intel_connector)
7535 {
7536         struct drm_connector *connector = &intel_connector->base;
7537         struct intel_dp *intel_dp = &dig_port->dp;
7538         struct intel_encoder *intel_encoder = &dig_port->base;
7539         struct drm_device *dev = intel_encoder->base.dev;
7540         struct drm_i915_private *dev_priv = to_i915(dev);
7541         enum port port = intel_encoder->port;
7542         enum phy phy = intel_port_to_phy(dev_priv, port);
7543         int type;
7544
7545         /* Initialize the work for modeset in case of link train failure */
7546         INIT_WORK(&intel_connector->modeset_retry_work,
7547                   intel_dp_modeset_retry_work_fn);
7548
7549         if (drm_WARN(dev, dig_port->max_lanes < 1,
7550                      "Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n",
7551                      dig_port->max_lanes, intel_encoder->base.base.id,
7552                      intel_encoder->base.name))
7553                 return false;
7554
7555         intel_dp_set_source_rates(intel_dp);
7556
7557         intel_dp->reset_link_params = true;
7558         intel_dp->pps_pipe = INVALID_PIPE;
7559         intel_dp->active_pipe = INVALID_PIPE;
7560
7561         /* Preserve the current hw state. */
7562         intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg);
7563         intel_dp->attached_connector = intel_connector;
7564
7565         if (intel_dp_is_port_edp(dev_priv, port)) {
7566                 /*
7567                  * Currently we don't support eDP on TypeC ports, although in
7568                  * theory it could work on TypeC legacy ports.
7569                  */
7570                 drm_WARN_ON(dev, intel_phy_is_tc(dev_priv, phy));
7571                 type = DRM_MODE_CONNECTOR_eDP;
7572         } else {
7573                 type = DRM_MODE_CONNECTOR_DisplayPort;
7574         }
7575
7576         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
7577                 intel_dp->active_pipe = vlv_active_pipe(intel_dp);
7578
7579         /*
7580          * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
7581          * for DP the encoder type can be set by the caller to
7582          * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
7583          */
7584         if (type == DRM_MODE_CONNECTOR_eDP)
7585                 intel_encoder->type = INTEL_OUTPUT_EDP;
7586
7587         /* eDP only on port B and/or C on vlv/chv */
7588         if (drm_WARN_ON(dev, (IS_VALLEYVIEW(dev_priv) ||
7589                               IS_CHERRYVIEW(dev_priv)) &&
7590                         intel_dp_is_edp(intel_dp) &&
7591                         port != PORT_B && port != PORT_C))
7592                 return false;
7593
7594         drm_dbg_kms(&dev_priv->drm,
7595                     "Adding %s connector on [ENCODER:%d:%s]\n",
7596                     type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
7597                     intel_encoder->base.base.id, intel_encoder->base.name);
7598
7599         drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
7600         drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
7601
7602         if (!HAS_GMCH(dev_priv))
7603                 connector->interlace_allowed = true;
7604         connector->doublescan_allowed = 0;
7605
7606         if (INTEL_GEN(dev_priv) >= 11)
7607                 connector->ycbcr_420_allowed = true;
7608
7609         intel_connector->polled = DRM_CONNECTOR_POLL_HPD;
7610
7611         intel_dp_aux_init(intel_dp);
7612
7613         intel_connector_attach_encoder(intel_connector, intel_encoder);
7614
7615         if (HAS_DDI(dev_priv))
7616                 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
7617         else
7618                 intel_connector->get_hw_state = intel_connector_get_hw_state;
7619
7620         /* init MST on ports that can support it */
7621         intel_dp_mst_encoder_init(dig_port,
7622                                   intel_connector->base.base.id);
7623
7624         if (!intel_edp_init_connector(intel_dp, intel_connector)) {
7625                 intel_dp_aux_fini(intel_dp);
7626                 intel_dp_mst_encoder_cleanup(dig_port);
7627                 goto fail;
7628         }
7629
7630         intel_dp_add_properties(intel_dp, connector);
7631
7632         if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_edp(intel_dp)) {
7633                 int ret = intel_dp_init_hdcp(dig_port, intel_connector);
7634                 if (ret)
7635                         drm_dbg_kms(&dev_priv->drm,
7636                                     "HDCP init failed, skipping.\n");
7637         }
7638
7639         /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
7640          * 0xd.  Failure to do so will result in spurious interrupts being
7641          * generated on the port when a cable is not attached.
7642          */
7643         if (IS_G45(dev_priv)) {
7644                 u32 temp = intel_de_read(dev_priv, PEG_BAND_GAP_DATA);
7645                 intel_de_write(dev_priv, PEG_BAND_GAP_DATA,
7646                                (temp & ~0xf) | 0xd);
7647         }
7648
7649         return true;
7650
7651 fail:
7652         drm_connector_cleanup(connector);
7653
7654         return false;
7655 }
7656
7657 bool intel_dp_init(struct drm_i915_private *dev_priv,
7658                    i915_reg_t output_reg,
7659                    enum port port)
7660 {
7661         struct intel_digital_port *dig_port;
7662         struct intel_encoder *intel_encoder;
7663         struct drm_encoder *encoder;
7664         struct intel_connector *intel_connector;
7665
7666         dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL);
7667         if (!dig_port)
7668                 return false;
7669
7670         intel_connector = intel_connector_alloc();
7671         if (!intel_connector)
7672                 goto err_connector_alloc;
7673
7674         intel_encoder = &dig_port->base;
7675         encoder = &intel_encoder->base;
7676
7677         mutex_init(&dig_port->hdcp_mutex);
7678
7679         if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
7680                              &intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS,
7681                              "DP %c", port_name(port)))
7682                 goto err_encoder_init;
7683
7684         intel_encoder->hotplug = intel_dp_hotplug;
7685         intel_encoder->compute_config = intel_dp_compute_config;
7686         intel_encoder->get_hw_state = intel_dp_get_hw_state;
7687         intel_encoder->get_config = intel_dp_get_config;
7688         intel_encoder->update_pipe = intel_panel_update_backlight;
7689         intel_encoder->suspend = intel_dp_encoder_suspend;
7690         if (IS_CHERRYVIEW(dev_priv)) {
7691                 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
7692                 intel_encoder->pre_enable = chv_pre_enable_dp;
7693                 intel_encoder->enable = vlv_enable_dp;
7694                 intel_encoder->disable = vlv_disable_dp;
7695                 intel_encoder->post_disable = chv_post_disable_dp;
7696                 intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
7697         } else if (IS_VALLEYVIEW(dev_priv)) {
7698                 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
7699                 intel_encoder->pre_enable = vlv_pre_enable_dp;
7700                 intel_encoder->enable = vlv_enable_dp;
7701                 intel_encoder->disable = vlv_disable_dp;
7702                 intel_encoder->post_disable = vlv_post_disable_dp;
7703         } else {
7704                 intel_encoder->pre_enable = g4x_pre_enable_dp;
7705                 intel_encoder->enable = g4x_enable_dp;
7706                 intel_encoder->disable = g4x_disable_dp;
7707                 intel_encoder->post_disable = g4x_post_disable_dp;
7708         }
7709
7710         if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
7711             (HAS_PCH_CPT(dev_priv) && port != PORT_A))
7712                 dig_port->dp.set_link_train = cpt_set_link_train;
7713         else
7714                 dig_port->dp.set_link_train = g4x_set_link_train;
7715
7716         if (IS_CHERRYVIEW(dev_priv))
7717                 dig_port->dp.set_signal_levels = chv_set_signal_levels;
7718         else if (IS_VALLEYVIEW(dev_priv))
7719                 dig_port->dp.set_signal_levels = vlv_set_signal_levels;
7720         else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
7721                 dig_port->dp.set_signal_levels = ivb_cpu_edp_set_signal_levels;
7722         else if (IS_GEN(dev_priv, 6) && port == PORT_A)
7723                 dig_port->dp.set_signal_levels = snb_cpu_edp_set_signal_levels;
7724         else
7725                 dig_port->dp.set_signal_levels = g4x_set_signal_levels;
7726
7727         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv) ||
7728             (HAS_PCH_SPLIT(dev_priv) && port != PORT_A)) {
7729                 dig_port->dp.preemph_max = intel_dp_pre_empemph_max_3;
7730                 dig_port->dp.voltage_max = intel_dp_voltage_max_3;
7731         } else {
7732                 dig_port->dp.preemph_max = intel_dp_pre_empemph_max_2;
7733                 dig_port->dp.voltage_max = intel_dp_voltage_max_2;
7734         }
7735
7736         dig_port->dp.output_reg = output_reg;
7737         dig_port->max_lanes = 4;
7738         dig_port->dp.regs.dp_tp_ctl = DP_TP_CTL(port);
7739         dig_port->dp.regs.dp_tp_status = DP_TP_STATUS(port);
7740
7741         intel_encoder->type = INTEL_OUTPUT_DP;
7742         intel_encoder->power_domain = intel_port_to_power_domain(port);
7743         if (IS_CHERRYVIEW(dev_priv)) {
7744                 if (port == PORT_D)
7745                         intel_encoder->pipe_mask = BIT(PIPE_C);
7746                 else
7747                         intel_encoder->pipe_mask = BIT(PIPE_A) | BIT(PIPE_B);
7748         } else {
7749                 intel_encoder->pipe_mask = ~0;
7750         }
7751         intel_encoder->cloneable = 0;
7752         intel_encoder->port = port;
7753         intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port);
7754
7755         dig_port->hpd_pulse = intel_dp_hpd_pulse;
7756
7757         if (HAS_GMCH(dev_priv)) {
7758                 if (IS_GM45(dev_priv))
7759                         dig_port->connected = gm45_digital_port_connected;
7760                 else
7761                         dig_port->connected = g4x_digital_port_connected;
7762         } else {
7763                 if (port == PORT_A)
7764                         dig_port->connected = ilk_digital_port_connected;
7765                 else
7766                         dig_port->connected = ibx_digital_port_connected;
7767         }
7768
7769         if (port != PORT_A)
7770                 intel_infoframe_init(dig_port);
7771
7772         dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
7773         if (!intel_dp_init_connector(dig_port, intel_connector))
7774                 goto err_init_connector;
7775
7776         return true;
7777
7778 err_init_connector:
7779         drm_encoder_cleanup(encoder);
7780 err_encoder_init:
7781         kfree(intel_connector);
7782 err_connector_alloc:
7783         kfree(dig_port);
7784         return false;
7785 }
7786
7787 void intel_dp_mst_suspend(struct drm_i915_private *dev_priv)
7788 {
7789         struct intel_encoder *encoder;
7790
7791         for_each_intel_encoder(&dev_priv->drm, encoder) {
7792                 struct intel_dp *intel_dp;
7793
7794                 if (encoder->type != INTEL_OUTPUT_DDI)
7795                         continue;
7796
7797                 intel_dp = enc_to_intel_dp(encoder);
7798
7799                 if (!intel_dp->can_mst)
7800                         continue;
7801
7802                 if (intel_dp->is_mst)
7803                         drm_dp_mst_topology_mgr_suspend(&intel_dp->mst_mgr);
7804         }
7805 }
7806
7807 void intel_dp_mst_resume(struct drm_i915_private *dev_priv)
7808 {
7809         struct intel_encoder *encoder;
7810
7811         for_each_intel_encoder(&dev_priv->drm, encoder) {
7812                 struct intel_dp *intel_dp;
7813                 int ret;
7814
7815                 if (encoder->type != INTEL_OUTPUT_DDI)
7816                         continue;
7817
7818                 intel_dp = enc_to_intel_dp(encoder);
7819
7820                 if (!intel_dp->can_mst)
7821                         continue;
7822
7823                 ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr,
7824                                                      true);
7825                 if (ret) {
7826                         intel_dp->is_mst = false;
7827                         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
7828                                                         false);
7829                 }
7830         }
7831 }