drm/i915: Reworkd DFP max bpc handling
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / display / intel_dp.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Keith Packard <keithp@keithp.com>
25  *
26  */
27
28 #include <linux/export.h>
29 #include <linux/i2c.h>
30 #include <linux/notifier.h>
31 #include <linux/reboot.h>
32 #include <linux/slab.h>
33 #include <linux/types.h>
34
35 #include <asm/byteorder.h>
36
37 #include <drm/drm_atomic_helper.h>
38 #include <drm/drm_crtc.h>
39 #include <drm/drm_dp_helper.h>
40 #include <drm/drm_edid.h>
41 #include <drm/drm_probe_helper.h>
42
43 #include "i915_debugfs.h"
44 #include "i915_drv.h"
45 #include "i915_trace.h"
46 #include "intel_atomic.h"
47 #include "intel_audio.h"
48 #include "intel_connector.h"
49 #include "intel_ddi.h"
50 #include "intel_display_types.h"
51 #include "intel_dp.h"
52 #include "intel_dp_link_training.h"
53 #include "intel_dp_mst.h"
54 #include "intel_dpio_phy.h"
55 #include "intel_fifo_underrun.h"
56 #include "intel_hdcp.h"
57 #include "intel_hdmi.h"
58 #include "intel_hotplug.h"
59 #include "intel_lspcon.h"
60 #include "intel_lvds.h"
61 #include "intel_panel.h"
62 #include "intel_psr.h"
63 #include "intel_sideband.h"
64 #include "intel_tc.h"
65 #include "intel_vdsc.h"
66
67 #define DP_DPRX_ESI_LEN 14
68
69 /* DP DSC throughput values used for slice count calculations KPixels/s */
70 #define DP_DSC_PEAK_PIXEL_RATE                  2720000
71 #define DP_DSC_MAX_ENC_THROUGHPUT_0             340000
72 #define DP_DSC_MAX_ENC_THROUGHPUT_1             400000
73
74 /* DP DSC FEC Overhead factor = 1/(0.972261) */
75 #define DP_DSC_FEC_OVERHEAD_FACTOR              972261
76
77 /* Compliance test status bits  */
78 #define INTEL_DP_RESOLUTION_SHIFT_MASK  0
79 #define INTEL_DP_RESOLUTION_PREFERRED   (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
80 #define INTEL_DP_RESOLUTION_STANDARD    (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
81 #define INTEL_DP_RESOLUTION_FAILSAFE    (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
82
83 struct dp_link_dpll {
84         int clock;
85         struct dpll dpll;
86 };
87
88 static const struct dp_link_dpll g4x_dpll[] = {
89         { 162000,
90                 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
91         { 270000,
92                 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
93 };
94
95 static const struct dp_link_dpll pch_dpll[] = {
96         { 162000,
97                 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
98         { 270000,
99                 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
100 };
101
102 static const struct dp_link_dpll vlv_dpll[] = {
103         { 162000,
104                 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
105         { 270000,
106                 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
107 };
108
109 /*
110  * CHV supports eDP 1.4 that have  more link rates.
111  * Below only provides the fixed rate but exclude variable rate.
112  */
113 static const struct dp_link_dpll chv_dpll[] = {
114         /*
115          * CHV requires to program fractional division for m2.
116          * m2 is stored in fixed point format using formula below
117          * (m2_int << 22) | m2_fraction
118          */
119         { 162000,       /* m2_int = 32, m2_fraction = 1677722 */
120                 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
121         { 270000,       /* m2_int = 27, m2_fraction = 0 */
122                 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
123 };
124
125 /* Constants for DP DSC configurations */
126 static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15};
127
128 /* With Single pipe configuration, HW is capable of supporting maximum
129  * of 4 slices per line.
130  */
131 static const u8 valid_dsc_slicecount[] = {1, 2, 4};
132
133 /**
134  * intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH)
135  * @intel_dp: DP struct
136  *
137  * If a CPU or PCH DP output is attached to an eDP panel, this function
138  * will return true, and false otherwise.
139  */
140 bool intel_dp_is_edp(struct intel_dp *intel_dp)
141 {
142         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
143
144         return dig_port->base.type == INTEL_OUTPUT_EDP;
145 }
146
147 static void intel_dp_link_down(struct intel_encoder *encoder,
148                                const struct intel_crtc_state *old_crtc_state);
149 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
150 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
151 static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
152                                            const struct intel_crtc_state *crtc_state);
153 static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
154                                       enum pipe pipe);
155 static void intel_dp_unset_edid(struct intel_dp *intel_dp);
156
157 /* update sink rates from dpcd */
158 static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
159 {
160         static const int dp_rates[] = {
161                 162000, 270000, 540000, 810000
162         };
163         int i, max_rate;
164
165         if (drm_dp_has_quirk(&intel_dp->desc, 0,
166                              DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS)) {
167                 /* Needed, e.g., for Apple MBP 2017, 15 inch eDP Retina panel */
168                 static const int quirk_rates[] = { 162000, 270000, 324000 };
169
170                 memcpy(intel_dp->sink_rates, quirk_rates, sizeof(quirk_rates));
171                 intel_dp->num_sink_rates = ARRAY_SIZE(quirk_rates);
172
173                 return;
174         }
175
176         max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]);
177
178         for (i = 0; i < ARRAY_SIZE(dp_rates); i++) {
179                 if (dp_rates[i] > max_rate)
180                         break;
181                 intel_dp->sink_rates[i] = dp_rates[i];
182         }
183
184         intel_dp->num_sink_rates = i;
185 }
186
187 /* Get length of rates array potentially limited by max_rate. */
188 static int intel_dp_rate_limit_len(const int *rates, int len, int max_rate)
189 {
190         int i;
191
192         /* Limit results by potentially reduced max rate */
193         for (i = 0; i < len; i++) {
194                 if (rates[len - i - 1] <= max_rate)
195                         return len - i;
196         }
197
198         return 0;
199 }
200
201 /* Get length of common rates array potentially limited by max_rate. */
202 static int intel_dp_common_len_rate_limit(const struct intel_dp *intel_dp,
203                                           int max_rate)
204 {
205         return intel_dp_rate_limit_len(intel_dp->common_rates,
206                                        intel_dp->num_common_rates, max_rate);
207 }
208
209 /* Theoretical max between source and sink */
210 static int intel_dp_max_common_rate(struct intel_dp *intel_dp)
211 {
212         return intel_dp->common_rates[intel_dp->num_common_rates - 1];
213 }
214
215 /* Theoretical max between source and sink */
216 static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp)
217 {
218         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
219         int source_max = dig_port->max_lanes;
220         int sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
221         int fia_max = intel_tc_port_fia_max_lane_count(dig_port);
222
223         return min3(source_max, sink_max, fia_max);
224 }
225
226 int intel_dp_max_lane_count(struct intel_dp *intel_dp)
227 {
228         return intel_dp->max_link_lane_count;
229 }
230
231 int
232 intel_dp_link_required(int pixel_clock, int bpp)
233 {
234         /* pixel_clock is in kHz, divide bpp by 8 for bit to Byte conversion */
235         return DIV_ROUND_UP(pixel_clock * bpp, 8);
236 }
237
238 int
239 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
240 {
241         /* max_link_clock is the link symbol clock (LS_Clk) in kHz and not the
242          * link rate that is generally expressed in Gbps. Since, 8 bits of data
243          * is transmitted every LS_Clk per lane, there is no need to account for
244          * the channel encoding that is done in the PHY layer here.
245          */
246
247         return max_link_clock * max_lanes;
248 }
249
250 static int
251 intel_dp_downstream_max_dotclock(struct intel_dp *intel_dp)
252 {
253         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
254         struct intel_encoder *encoder = &dig_port->base;
255         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
256         int max_dotclk = dev_priv->max_dotclk_freq;
257         int ds_max_dotclk;
258
259         int type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
260
261         if (type != DP_DS_PORT_TYPE_VGA)
262                 return max_dotclk;
263
264         ds_max_dotclk = drm_dp_downstream_max_clock(intel_dp->dpcd,
265                                                     intel_dp->downstream_ports);
266
267         if (ds_max_dotclk != 0)
268                 max_dotclk = min(max_dotclk, ds_max_dotclk);
269
270         return max_dotclk;
271 }
272
273 static int cnl_max_source_rate(struct intel_dp *intel_dp)
274 {
275         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
276         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
277         enum port port = dig_port->base.port;
278
279         u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
280
281         /* Low voltage SKUs are limited to max of 5.4G */
282         if (voltage == VOLTAGE_INFO_0_85V)
283                 return 540000;
284
285         /* For this SKU 8.1G is supported in all ports */
286         if (IS_CNL_WITH_PORT_F(dev_priv))
287                 return 810000;
288
289         /* For other SKUs, max rate on ports A and D is 5.4G */
290         if (port == PORT_A || port == PORT_D)
291                 return 540000;
292
293         return 810000;
294 }
295
296 static int icl_max_source_rate(struct intel_dp *intel_dp)
297 {
298         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
299         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
300         enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
301
302         if (intel_phy_is_combo(dev_priv, phy) &&
303             !IS_ELKHARTLAKE(dev_priv) &&
304             !intel_dp_is_edp(intel_dp))
305                 return 540000;
306
307         return 810000;
308 }
309
310 static void
311 intel_dp_set_source_rates(struct intel_dp *intel_dp)
312 {
313         /* The values must be in increasing order */
314         static const int cnl_rates[] = {
315                 162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000
316         };
317         static const int bxt_rates[] = {
318                 162000, 216000, 243000, 270000, 324000, 432000, 540000
319         };
320         static const int skl_rates[] = {
321                 162000, 216000, 270000, 324000, 432000, 540000
322         };
323         static const int hsw_rates[] = {
324                 162000, 270000, 540000
325         };
326         static const int g4x_rates[] = {
327                 162000, 270000
328         };
329         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
330         struct intel_encoder *encoder = &dig_port->base;
331         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
332         const int *source_rates;
333         int size, max_rate = 0, vbt_max_rate;
334
335         /* This should only be done once */
336         drm_WARN_ON(&dev_priv->drm,
337                     intel_dp->source_rates || intel_dp->num_source_rates);
338
339         if (INTEL_GEN(dev_priv) >= 10) {
340                 source_rates = cnl_rates;
341                 size = ARRAY_SIZE(cnl_rates);
342                 if (IS_GEN(dev_priv, 10))
343                         max_rate = cnl_max_source_rate(intel_dp);
344                 else
345                         max_rate = icl_max_source_rate(intel_dp);
346         } else if (IS_GEN9_LP(dev_priv)) {
347                 source_rates = bxt_rates;
348                 size = ARRAY_SIZE(bxt_rates);
349         } else if (IS_GEN9_BC(dev_priv)) {
350                 source_rates = skl_rates;
351                 size = ARRAY_SIZE(skl_rates);
352         } else if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) ||
353                    IS_BROADWELL(dev_priv)) {
354                 source_rates = hsw_rates;
355                 size = ARRAY_SIZE(hsw_rates);
356         } else {
357                 source_rates = g4x_rates;
358                 size = ARRAY_SIZE(g4x_rates);
359         }
360
361         vbt_max_rate = intel_bios_dp_max_link_rate(encoder);
362         if (max_rate && vbt_max_rate)
363                 max_rate = min(max_rate, vbt_max_rate);
364         else if (vbt_max_rate)
365                 max_rate = vbt_max_rate;
366
367         if (max_rate)
368                 size = intel_dp_rate_limit_len(source_rates, size, max_rate);
369
370         intel_dp->source_rates = source_rates;
371         intel_dp->num_source_rates = size;
372 }
373
374 static int intersect_rates(const int *source_rates, int source_len,
375                            const int *sink_rates, int sink_len,
376                            int *common_rates)
377 {
378         int i = 0, j = 0, k = 0;
379
380         while (i < source_len && j < sink_len) {
381                 if (source_rates[i] == sink_rates[j]) {
382                         if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
383                                 return k;
384                         common_rates[k] = source_rates[i];
385                         ++k;
386                         ++i;
387                         ++j;
388                 } else if (source_rates[i] < sink_rates[j]) {
389                         ++i;
390                 } else {
391                         ++j;
392                 }
393         }
394         return k;
395 }
396
397 /* return index of rate in rates array, or -1 if not found */
398 static int intel_dp_rate_index(const int *rates, int len, int rate)
399 {
400         int i;
401
402         for (i = 0; i < len; i++)
403                 if (rate == rates[i])
404                         return i;
405
406         return -1;
407 }
408
409 static void intel_dp_set_common_rates(struct intel_dp *intel_dp)
410 {
411         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
412
413         drm_WARN_ON(&i915->drm,
414                     !intel_dp->num_source_rates || !intel_dp->num_sink_rates);
415
416         intel_dp->num_common_rates = intersect_rates(intel_dp->source_rates,
417                                                      intel_dp->num_source_rates,
418                                                      intel_dp->sink_rates,
419                                                      intel_dp->num_sink_rates,
420                                                      intel_dp->common_rates);
421
422         /* Paranoia, there should always be something in common. */
423         if (drm_WARN_ON(&i915->drm, intel_dp->num_common_rates == 0)) {
424                 intel_dp->common_rates[0] = 162000;
425                 intel_dp->num_common_rates = 1;
426         }
427 }
428
429 static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate,
430                                        u8 lane_count)
431 {
432         /*
433          * FIXME: we need to synchronize the current link parameters with
434          * hardware readout. Currently fast link training doesn't work on
435          * boot-up.
436          */
437         if (link_rate == 0 ||
438             link_rate > intel_dp->max_link_rate)
439                 return false;
440
441         if (lane_count == 0 ||
442             lane_count > intel_dp_max_lane_count(intel_dp))
443                 return false;
444
445         return true;
446 }
447
448 static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp,
449                                                      int link_rate,
450                                                      u8 lane_count)
451 {
452         const struct drm_display_mode *fixed_mode =
453                 intel_dp->attached_connector->panel.fixed_mode;
454         int mode_rate, max_rate;
455
456         mode_rate = intel_dp_link_required(fixed_mode->clock, 18);
457         max_rate = intel_dp_max_data_rate(link_rate, lane_count);
458         if (mode_rate > max_rate)
459                 return false;
460
461         return true;
462 }
463
464 int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
465                                             int link_rate, u8 lane_count)
466 {
467         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
468         int index;
469
470         /*
471          * TODO: Enable fallback on MST links once MST link compute can handle
472          * the fallback params.
473          */
474         if (intel_dp->is_mst) {
475                 drm_err(&i915->drm, "Link Training Unsuccessful\n");
476                 return -1;
477         }
478
479         index = intel_dp_rate_index(intel_dp->common_rates,
480                                     intel_dp->num_common_rates,
481                                     link_rate);
482         if (index > 0) {
483                 if (intel_dp_is_edp(intel_dp) &&
484                     !intel_dp_can_link_train_fallback_for_edp(intel_dp,
485                                                               intel_dp->common_rates[index - 1],
486                                                               lane_count)) {
487                         drm_dbg_kms(&i915->drm,
488                                     "Retrying Link training for eDP with same parameters\n");
489                         return 0;
490                 }
491                 intel_dp->max_link_rate = intel_dp->common_rates[index - 1];
492                 intel_dp->max_link_lane_count = lane_count;
493         } else if (lane_count > 1) {
494                 if (intel_dp_is_edp(intel_dp) &&
495                     !intel_dp_can_link_train_fallback_for_edp(intel_dp,
496                                                               intel_dp_max_common_rate(intel_dp),
497                                                               lane_count >> 1)) {
498                         drm_dbg_kms(&i915->drm,
499                                     "Retrying Link training for eDP with same parameters\n");
500                         return 0;
501                 }
502                 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
503                 intel_dp->max_link_lane_count = lane_count >> 1;
504         } else {
505                 drm_err(&i915->drm, "Link Training Unsuccessful\n");
506                 return -1;
507         }
508
509         return 0;
510 }
511
512 u32 intel_dp_mode_to_fec_clock(u32 mode_clock)
513 {
514         return div_u64(mul_u32_u32(mode_clock, 1000000U),
515                        DP_DSC_FEC_OVERHEAD_FACTOR);
516 }
517
518 static int
519 small_joiner_ram_size_bits(struct drm_i915_private *i915)
520 {
521         if (INTEL_GEN(i915) >= 11)
522                 return 7680 * 8;
523         else
524                 return 6144 * 8;
525 }
526
527 static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915,
528                                        u32 link_clock, u32 lane_count,
529                                        u32 mode_clock, u32 mode_hdisplay)
530 {
531         u32 bits_per_pixel, max_bpp_small_joiner_ram;
532         int i;
533
534         /*
535          * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)*
536          * (LinkSymbolClock)* 8 * (TimeSlotsPerMTP)
537          * for SST -> TimeSlotsPerMTP is 1,
538          * for MST -> TimeSlotsPerMTP has to be calculated
539          */
540         bits_per_pixel = (link_clock * lane_count * 8) /
541                          intel_dp_mode_to_fec_clock(mode_clock);
542         drm_dbg_kms(&i915->drm, "Max link bpp: %u\n", bits_per_pixel);
543
544         /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */
545         max_bpp_small_joiner_ram = small_joiner_ram_size_bits(i915) /
546                 mode_hdisplay;
547         drm_dbg_kms(&i915->drm, "Max small joiner bpp: %u\n",
548                     max_bpp_small_joiner_ram);
549
550         /*
551          * Greatest allowed DSC BPP = MIN (output BPP from available Link BW
552          * check, output bpp from small joiner RAM check)
553          */
554         bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram);
555
556         /* Error out if the max bpp is less than smallest allowed valid bpp */
557         if (bits_per_pixel < valid_dsc_bpp[0]) {
558                 drm_dbg_kms(&i915->drm, "Unsupported BPP %u, min %u\n",
559                             bits_per_pixel, valid_dsc_bpp[0]);
560                 return 0;
561         }
562
563         /* Find the nearest match in the array of known BPPs from VESA */
564         for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) {
565                 if (bits_per_pixel < valid_dsc_bpp[i + 1])
566                         break;
567         }
568         bits_per_pixel = valid_dsc_bpp[i];
569
570         /*
571          * Compressed BPP in U6.4 format so multiply by 16, for Gen 11,
572          * fractional part is 0
573          */
574         return bits_per_pixel << 4;
575 }
576
577 static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
578                                        int mode_clock, int mode_hdisplay)
579 {
580         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
581         u8 min_slice_count, i;
582         int max_slice_width;
583
584         if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE)
585                 min_slice_count = DIV_ROUND_UP(mode_clock,
586                                                DP_DSC_MAX_ENC_THROUGHPUT_0);
587         else
588                 min_slice_count = DIV_ROUND_UP(mode_clock,
589                                                DP_DSC_MAX_ENC_THROUGHPUT_1);
590
591         max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd);
592         if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) {
593                 drm_dbg_kms(&i915->drm,
594                             "Unsupported slice width %d by DP DSC Sink device\n",
595                             max_slice_width);
596                 return 0;
597         }
598         /* Also take into account max slice width */
599         min_slice_count = min_t(u8, min_slice_count,
600                                 DIV_ROUND_UP(mode_hdisplay,
601                                              max_slice_width));
602
603         /* Find the closest match to the valid slice count values */
604         for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) {
605                 if (valid_dsc_slicecount[i] >
606                     drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
607                                                     false))
608                         break;
609                 if (min_slice_count  <= valid_dsc_slicecount[i])
610                         return valid_dsc_slicecount[i];
611         }
612
613         drm_dbg_kms(&i915->drm, "Unsupported Slice Count %d\n",
614                     min_slice_count);
615         return 0;
616 }
617
618 static bool intel_dp_hdisplay_bad(struct drm_i915_private *dev_priv,
619                                   int hdisplay)
620 {
621         /*
622          * Older platforms don't like hdisplay==4096 with DP.
623          *
624          * On ILK/SNB/IVB the pipe seems to be somewhat running (scanline
625          * and frame counter increment), but we don't get vblank interrupts,
626          * and the pipe underruns immediately. The link also doesn't seem
627          * to get trained properly.
628          *
629          * On CHV the vblank interrupts don't seem to disappear but
630          * otherwise the symptoms are similar.
631          *
632          * TODO: confirm the behaviour on HSW+
633          */
634         return hdisplay == 4096 && !HAS_DDI(dev_priv);
635 }
636
637 static enum drm_mode_status
638 intel_dp_mode_valid(struct drm_connector *connector,
639                     struct drm_display_mode *mode)
640 {
641         struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
642         struct intel_connector *intel_connector = to_intel_connector(connector);
643         struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
644         struct drm_i915_private *dev_priv = to_i915(connector->dev);
645         int target_clock = mode->clock;
646         int max_rate, mode_rate, max_lanes, max_link_clock;
647         int max_dotclk;
648         u16 dsc_max_output_bpp = 0;
649         u8 dsc_slice_count = 0;
650
651         if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
652                 return MODE_NO_DBLESCAN;
653
654         max_dotclk = intel_dp_downstream_max_dotclock(intel_dp);
655
656         if (intel_dp_is_edp(intel_dp) && fixed_mode) {
657                 if (mode->hdisplay > fixed_mode->hdisplay)
658                         return MODE_PANEL;
659
660                 if (mode->vdisplay > fixed_mode->vdisplay)
661                         return MODE_PANEL;
662
663                 target_clock = fixed_mode->clock;
664         }
665
666         max_link_clock = intel_dp_max_link_rate(intel_dp);
667         max_lanes = intel_dp_max_lane_count(intel_dp);
668
669         max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
670         mode_rate = intel_dp_link_required(target_clock, 18);
671
672         if (intel_dp_hdisplay_bad(dev_priv, mode->hdisplay))
673                 return MODE_H_ILLEGAL;
674
675         /*
676          * Output bpp is stored in 6.4 format so right shift by 4 to get the
677          * integer value since we support only integer values of bpp.
678          */
679         if ((INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) &&
680             drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)) {
681                 if (intel_dp_is_edp(intel_dp)) {
682                         dsc_max_output_bpp =
683                                 drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4;
684                         dsc_slice_count =
685                                 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
686                                                                 true);
687                 } else if (drm_dp_sink_supports_fec(intel_dp->fec_capable)) {
688                         dsc_max_output_bpp =
689                                 intel_dp_dsc_get_output_bpp(dev_priv,
690                                                             max_link_clock,
691                                                             max_lanes,
692                                                             target_clock,
693                                                             mode->hdisplay) >> 4;
694                         dsc_slice_count =
695                                 intel_dp_dsc_get_slice_count(intel_dp,
696                                                              target_clock,
697                                                              mode->hdisplay);
698                 }
699         }
700
701         if ((mode_rate > max_rate && !(dsc_max_output_bpp && dsc_slice_count)) ||
702             target_clock > max_dotclk)
703                 return MODE_CLOCK_HIGH;
704
705         if (mode->clock < 10000)
706                 return MODE_CLOCK_LOW;
707
708         if (mode->flags & DRM_MODE_FLAG_DBLCLK)
709                 return MODE_H_ILLEGAL;
710
711         return intel_mode_valid_max_plane_size(dev_priv, mode);
712 }
713
714 u32 intel_dp_pack_aux(const u8 *src, int src_bytes)
715 {
716         int i;
717         u32 v = 0;
718
719         if (src_bytes > 4)
720                 src_bytes = 4;
721         for (i = 0; i < src_bytes; i++)
722                 v |= ((u32)src[i]) << ((3 - i) * 8);
723         return v;
724 }
725
726 static void intel_dp_unpack_aux(u32 src, u8 *dst, int dst_bytes)
727 {
728         int i;
729         if (dst_bytes > 4)
730                 dst_bytes = 4;
731         for (i = 0; i < dst_bytes; i++)
732                 dst[i] = src >> ((3-i) * 8);
733 }
734
735 static void
736 intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp);
737 static void
738 intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
739                                               bool force_disable_vdd);
740 static void
741 intel_dp_pps_init(struct intel_dp *intel_dp);
742
743 static intel_wakeref_t
744 pps_lock(struct intel_dp *intel_dp)
745 {
746         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
747         intel_wakeref_t wakeref;
748
749         /*
750          * See intel_power_sequencer_reset() why we need
751          * a power domain reference here.
752          */
753         wakeref = intel_display_power_get(dev_priv,
754                                           intel_aux_power_domain(dp_to_dig_port(intel_dp)));
755
756         mutex_lock(&dev_priv->pps_mutex);
757
758         return wakeref;
759 }
760
761 static intel_wakeref_t
762 pps_unlock(struct intel_dp *intel_dp, intel_wakeref_t wakeref)
763 {
764         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
765
766         mutex_unlock(&dev_priv->pps_mutex);
767         intel_display_power_put(dev_priv,
768                                 intel_aux_power_domain(dp_to_dig_port(intel_dp)),
769                                 wakeref);
770         return 0;
771 }
772
773 #define with_pps_lock(dp, wf) \
774         for ((wf) = pps_lock(dp); (wf); (wf) = pps_unlock((dp), (wf)))
775
776 static void
777 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
778 {
779         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
780         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
781         enum pipe pipe = intel_dp->pps_pipe;
782         bool pll_enabled, release_cl_override = false;
783         enum dpio_phy phy = DPIO_PHY(pipe);
784         enum dpio_channel ch = vlv_pipe_to_channel(pipe);
785         u32 DP;
786
787         if (drm_WARN(&dev_priv->drm,
788                      intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN,
789                      "skipping pipe %c power sequencer kick due to [ENCODER:%d:%s] being active\n",
790                      pipe_name(pipe), dig_port->base.base.base.id,
791                      dig_port->base.base.name))
792                 return;
793
794         drm_dbg_kms(&dev_priv->drm,
795                     "kicking pipe %c power sequencer for [ENCODER:%d:%s]\n",
796                     pipe_name(pipe), dig_port->base.base.base.id,
797                     dig_port->base.base.name);
798
799         /* Preserve the BIOS-computed detected bit. This is
800          * supposed to be read-only.
801          */
802         DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED;
803         DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
804         DP |= DP_PORT_WIDTH(1);
805         DP |= DP_LINK_TRAIN_PAT_1;
806
807         if (IS_CHERRYVIEW(dev_priv))
808                 DP |= DP_PIPE_SEL_CHV(pipe);
809         else
810                 DP |= DP_PIPE_SEL(pipe);
811
812         pll_enabled = intel_de_read(dev_priv, DPLL(pipe)) & DPLL_VCO_ENABLE;
813
814         /*
815          * The DPLL for the pipe must be enabled for this to work.
816          * So enable temporarily it if it's not already enabled.
817          */
818         if (!pll_enabled) {
819                 release_cl_override = IS_CHERRYVIEW(dev_priv) &&
820                         !chv_phy_powergate_ch(dev_priv, phy, ch, true);
821
822                 if (vlv_force_pll_on(dev_priv, pipe, IS_CHERRYVIEW(dev_priv) ?
823                                      &chv_dpll[0].dpll : &vlv_dpll[0].dpll)) {
824                         drm_err(&dev_priv->drm,
825                                 "Failed to force on pll for pipe %c!\n",
826                                 pipe_name(pipe));
827                         return;
828                 }
829         }
830
831         /*
832          * Similar magic as in intel_dp_enable_port().
833          * We _must_ do this port enable + disable trick
834          * to make this power sequencer lock onto the port.
835          * Otherwise even VDD force bit won't work.
836          */
837         intel_de_write(dev_priv, intel_dp->output_reg, DP);
838         intel_de_posting_read(dev_priv, intel_dp->output_reg);
839
840         intel_de_write(dev_priv, intel_dp->output_reg, DP | DP_PORT_EN);
841         intel_de_posting_read(dev_priv, intel_dp->output_reg);
842
843         intel_de_write(dev_priv, intel_dp->output_reg, DP & ~DP_PORT_EN);
844         intel_de_posting_read(dev_priv, intel_dp->output_reg);
845
846         if (!pll_enabled) {
847                 vlv_force_pll_off(dev_priv, pipe);
848
849                 if (release_cl_override)
850                         chv_phy_powergate_ch(dev_priv, phy, ch, false);
851         }
852 }
853
854 static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
855 {
856         struct intel_encoder *encoder;
857         unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
858
859         /*
860          * We don't have power sequencer currently.
861          * Pick one that's not used by other ports.
862          */
863         for_each_intel_dp(&dev_priv->drm, encoder) {
864                 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
865
866                 if (encoder->type == INTEL_OUTPUT_EDP) {
867                         drm_WARN_ON(&dev_priv->drm,
868                                     intel_dp->active_pipe != INVALID_PIPE &&
869                                     intel_dp->active_pipe !=
870                                     intel_dp->pps_pipe);
871
872                         if (intel_dp->pps_pipe != INVALID_PIPE)
873                                 pipes &= ~(1 << intel_dp->pps_pipe);
874                 } else {
875                         drm_WARN_ON(&dev_priv->drm,
876                                     intel_dp->pps_pipe != INVALID_PIPE);
877
878                         if (intel_dp->active_pipe != INVALID_PIPE)
879                                 pipes &= ~(1 << intel_dp->active_pipe);
880                 }
881         }
882
883         if (pipes == 0)
884                 return INVALID_PIPE;
885
886         return ffs(pipes) - 1;
887 }
888
889 static enum pipe
890 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
891 {
892         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
893         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
894         enum pipe pipe;
895
896         lockdep_assert_held(&dev_priv->pps_mutex);
897
898         /* We should never land here with regular DP ports */
899         drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
900
901         drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE &&
902                     intel_dp->active_pipe != intel_dp->pps_pipe);
903
904         if (intel_dp->pps_pipe != INVALID_PIPE)
905                 return intel_dp->pps_pipe;
906
907         pipe = vlv_find_free_pps(dev_priv);
908
909         /*
910          * Didn't find one. This should not happen since there
911          * are two power sequencers and up to two eDP ports.
912          */
913         if (drm_WARN_ON(&dev_priv->drm, pipe == INVALID_PIPE))
914                 pipe = PIPE_A;
915
916         vlv_steal_power_sequencer(dev_priv, pipe);
917         intel_dp->pps_pipe = pipe;
918
919         drm_dbg_kms(&dev_priv->drm,
920                     "picked pipe %c power sequencer for [ENCODER:%d:%s]\n",
921                     pipe_name(intel_dp->pps_pipe),
922                     dig_port->base.base.base.id,
923                     dig_port->base.base.name);
924
925         /* init power sequencer on this pipe and port */
926         intel_dp_init_panel_power_sequencer(intel_dp);
927         intel_dp_init_panel_power_sequencer_registers(intel_dp, true);
928
929         /*
930          * Even vdd force doesn't work until we've made
931          * the power sequencer lock in on the port.
932          */
933         vlv_power_sequencer_kick(intel_dp);
934
935         return intel_dp->pps_pipe;
936 }
937
938 static int
939 bxt_power_sequencer_idx(struct intel_dp *intel_dp)
940 {
941         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
942         int backlight_controller = dev_priv->vbt.backlight.controller;
943
944         lockdep_assert_held(&dev_priv->pps_mutex);
945
946         /* We should never land here with regular DP ports */
947         drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
948
949         if (!intel_dp->pps_reset)
950                 return backlight_controller;
951
952         intel_dp->pps_reset = false;
953
954         /*
955          * Only the HW needs to be reprogrammed, the SW state is fixed and
956          * has been setup during connector init.
957          */
958         intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
959
960         return backlight_controller;
961 }
962
963 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
964                                enum pipe pipe);
965
966 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
967                                enum pipe pipe)
968 {
969         return intel_de_read(dev_priv, PP_STATUS(pipe)) & PP_ON;
970 }
971
972 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
973                                 enum pipe pipe)
974 {
975         return intel_de_read(dev_priv, PP_CONTROL(pipe)) & EDP_FORCE_VDD;
976 }
977
978 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
979                          enum pipe pipe)
980 {
981         return true;
982 }
983
984 static enum pipe
985 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
986                      enum port port,
987                      vlv_pipe_check pipe_check)
988 {
989         enum pipe pipe;
990
991         for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
992                 u32 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(pipe)) &
993                         PANEL_PORT_SELECT_MASK;
994
995                 if (port_sel != PANEL_PORT_SELECT_VLV(port))
996                         continue;
997
998                 if (!pipe_check(dev_priv, pipe))
999                         continue;
1000
1001                 return pipe;
1002         }
1003
1004         return INVALID_PIPE;
1005 }
1006
1007 static void
1008 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
1009 {
1010         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1011         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1012         enum port port = dig_port->base.port;
1013
1014         lockdep_assert_held(&dev_priv->pps_mutex);
1015
1016         /* try to find a pipe with this port selected */
1017         /* first pick one where the panel is on */
1018         intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
1019                                                   vlv_pipe_has_pp_on);
1020         /* didn't find one? pick one where vdd is on */
1021         if (intel_dp->pps_pipe == INVALID_PIPE)
1022                 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
1023                                                           vlv_pipe_has_vdd_on);
1024         /* didn't find one? pick one with just the correct port */
1025         if (intel_dp->pps_pipe == INVALID_PIPE)
1026                 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
1027                                                           vlv_pipe_any);
1028
1029         /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
1030         if (intel_dp->pps_pipe == INVALID_PIPE) {
1031                 drm_dbg_kms(&dev_priv->drm,
1032                             "no initial power sequencer for [ENCODER:%d:%s]\n",
1033                             dig_port->base.base.base.id,
1034                             dig_port->base.base.name);
1035                 return;
1036         }
1037
1038         drm_dbg_kms(&dev_priv->drm,
1039                     "initial power sequencer for [ENCODER:%d:%s]: pipe %c\n",
1040                     dig_port->base.base.base.id,
1041                     dig_port->base.base.name,
1042                     pipe_name(intel_dp->pps_pipe));
1043
1044         intel_dp_init_panel_power_sequencer(intel_dp);
1045         intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
1046 }
1047
1048 void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
1049 {
1050         struct intel_encoder *encoder;
1051
1052         if (drm_WARN_ON(&dev_priv->drm,
1053                         !(IS_VALLEYVIEW(dev_priv) ||
1054                           IS_CHERRYVIEW(dev_priv) ||
1055                           IS_GEN9_LP(dev_priv))))
1056                 return;
1057
1058         /*
1059          * We can't grab pps_mutex here due to deadlock with power_domain
1060          * mutex when power_domain functions are called while holding pps_mutex.
1061          * That also means that in order to use pps_pipe the code needs to
1062          * hold both a power domain reference and pps_mutex, and the power domain
1063          * reference get/put must be done while _not_ holding pps_mutex.
1064          * pps_{lock,unlock}() do these steps in the correct order, so one
1065          * should use them always.
1066          */
1067
1068         for_each_intel_dp(&dev_priv->drm, encoder) {
1069                 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1070
1071                 drm_WARN_ON(&dev_priv->drm,
1072                             intel_dp->active_pipe != INVALID_PIPE);
1073
1074                 if (encoder->type != INTEL_OUTPUT_EDP)
1075                         continue;
1076
1077                 if (IS_GEN9_LP(dev_priv))
1078                         intel_dp->pps_reset = true;
1079                 else
1080                         intel_dp->pps_pipe = INVALID_PIPE;
1081         }
1082 }
1083
1084 struct pps_registers {
1085         i915_reg_t pp_ctrl;
1086         i915_reg_t pp_stat;
1087         i915_reg_t pp_on;
1088         i915_reg_t pp_off;
1089         i915_reg_t pp_div;
1090 };
1091
1092 static void intel_pps_get_registers(struct intel_dp *intel_dp,
1093                                     struct pps_registers *regs)
1094 {
1095         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1096         int pps_idx = 0;
1097
1098         memset(regs, 0, sizeof(*regs));
1099
1100         if (IS_GEN9_LP(dev_priv))
1101                 pps_idx = bxt_power_sequencer_idx(intel_dp);
1102         else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1103                 pps_idx = vlv_power_sequencer_pipe(intel_dp);
1104
1105         regs->pp_ctrl = PP_CONTROL(pps_idx);
1106         regs->pp_stat = PP_STATUS(pps_idx);
1107         regs->pp_on = PP_ON_DELAYS(pps_idx);
1108         regs->pp_off = PP_OFF_DELAYS(pps_idx);
1109
1110         /* Cycle delay moved from PP_DIVISOR to PP_CONTROL */
1111         if (IS_GEN9_LP(dev_priv) || INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
1112                 regs->pp_div = INVALID_MMIO_REG;
1113         else
1114                 regs->pp_div = PP_DIVISOR(pps_idx);
1115 }
1116
1117 static i915_reg_t
1118 _pp_ctrl_reg(struct intel_dp *intel_dp)
1119 {
1120         struct pps_registers regs;
1121
1122         intel_pps_get_registers(intel_dp, &regs);
1123
1124         return regs.pp_ctrl;
1125 }
1126
1127 static i915_reg_t
1128 _pp_stat_reg(struct intel_dp *intel_dp)
1129 {
1130         struct pps_registers regs;
1131
1132         intel_pps_get_registers(intel_dp, &regs);
1133
1134         return regs.pp_stat;
1135 }
1136
1137 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
1138    This function only applicable when panel PM state is not to be tracked */
1139 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
1140                               void *unused)
1141 {
1142         struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
1143                                                  edp_notifier);
1144         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1145         intel_wakeref_t wakeref;
1146
1147         if (!intel_dp_is_edp(intel_dp) || code != SYS_RESTART)
1148                 return 0;
1149
1150         with_pps_lock(intel_dp, wakeref) {
1151                 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1152                         enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
1153                         i915_reg_t pp_ctrl_reg, pp_div_reg;
1154                         u32 pp_div;
1155
1156                         pp_ctrl_reg = PP_CONTROL(pipe);
1157                         pp_div_reg  = PP_DIVISOR(pipe);
1158                         pp_div = intel_de_read(dev_priv, pp_div_reg);
1159                         pp_div &= PP_REFERENCE_DIVIDER_MASK;
1160
1161                         /* 0x1F write to PP_DIV_REG sets max cycle delay */
1162                         intel_de_write(dev_priv, pp_div_reg, pp_div | 0x1F);
1163                         intel_de_write(dev_priv, pp_ctrl_reg,
1164                                        PANEL_UNLOCK_REGS);
1165                         msleep(intel_dp->panel_power_cycle_delay);
1166                 }
1167         }
1168
1169         return 0;
1170 }
1171
1172 static bool edp_have_panel_power(struct intel_dp *intel_dp)
1173 {
1174         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1175
1176         lockdep_assert_held(&dev_priv->pps_mutex);
1177
1178         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
1179             intel_dp->pps_pipe == INVALID_PIPE)
1180                 return false;
1181
1182         return (intel_de_read(dev_priv, _pp_stat_reg(intel_dp)) & PP_ON) != 0;
1183 }
1184
1185 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
1186 {
1187         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1188
1189         lockdep_assert_held(&dev_priv->pps_mutex);
1190
1191         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
1192             intel_dp->pps_pipe == INVALID_PIPE)
1193                 return false;
1194
1195         return intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
1196 }
1197
1198 static void
1199 intel_dp_check_edp(struct intel_dp *intel_dp)
1200 {
1201         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1202
1203         if (!intel_dp_is_edp(intel_dp))
1204                 return;
1205
1206         if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
1207                 drm_WARN(&dev_priv->drm, 1,
1208                          "eDP powered off while attempting aux channel communication.\n");
1209                 drm_dbg_kms(&dev_priv->drm, "Status 0x%08x Control 0x%08x\n",
1210                             intel_de_read(dev_priv, _pp_stat_reg(intel_dp)),
1211                             intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)));
1212         }
1213 }
1214
1215 static u32
1216 intel_dp_aux_wait_done(struct intel_dp *intel_dp)
1217 {
1218         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1219         i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
1220         const unsigned int timeout_ms = 10;
1221         u32 status;
1222         bool done;
1223
1224 #define C (((status = intel_uncore_read_notrace(&i915->uncore, ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
1225         done = wait_event_timeout(i915->gmbus_wait_queue, C,
1226                                   msecs_to_jiffies_timeout(timeout_ms));
1227
1228         /* just trace the final value */
1229         trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
1230
1231         if (!done)
1232                 drm_err(&i915->drm,
1233                         "%s: did not complete or timeout within %ums (status 0x%08x)\n",
1234                         intel_dp->aux.name, timeout_ms, status);
1235 #undef C
1236
1237         return status;
1238 }
1239
1240 static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1241 {
1242         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1243
1244         if (index)
1245                 return 0;
1246
1247         /*
1248          * The clock divider is based off the hrawclk, and would like to run at
1249          * 2MHz.  So, take the hrawclk value and divide by 2000 and use that
1250          */
1251         return DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq, 2000);
1252 }
1253
1254 static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1255 {
1256         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1257         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1258         u32 freq;
1259
1260         if (index)
1261                 return 0;
1262
1263         /*
1264          * The clock divider is based off the cdclk or PCH rawclk, and would
1265          * like to run at 2MHz.  So, take the cdclk or PCH rawclk value and
1266          * divide by 2000 and use that
1267          */
1268         if (dig_port->aux_ch == AUX_CH_A)
1269                 freq = dev_priv->cdclk.hw.cdclk;
1270         else
1271                 freq = RUNTIME_INFO(dev_priv)->rawclk_freq;
1272         return DIV_ROUND_CLOSEST(freq, 2000);
1273 }
1274
1275 static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1276 {
1277         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1278         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1279
1280         if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) {
1281                 /* Workaround for non-ULT HSW */
1282                 switch (index) {
1283                 case 0: return 63;
1284                 case 1: return 72;
1285                 default: return 0;
1286                 }
1287         }
1288
1289         return ilk_get_aux_clock_divider(intel_dp, index);
1290 }
1291
1292 static u32 skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1293 {
1294         /*
1295          * SKL doesn't need us to program the AUX clock divider (Hardware will
1296          * derive the clock from CDCLK automatically). We still implement the
1297          * get_aux_clock_divider vfunc to plug-in into the existing code.
1298          */
1299         return index ? 0 : 1;
1300 }
1301
1302 static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
1303                                 int send_bytes,
1304                                 u32 aux_clock_divider)
1305 {
1306         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1307         struct drm_i915_private *dev_priv =
1308                         to_i915(dig_port->base.base.dev);
1309         u32 precharge, timeout;
1310
1311         if (IS_GEN(dev_priv, 6))
1312                 precharge = 3;
1313         else
1314                 precharge = 5;
1315
1316         if (IS_BROADWELL(dev_priv))
1317                 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
1318         else
1319                 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
1320
1321         return DP_AUX_CH_CTL_SEND_BUSY |
1322                DP_AUX_CH_CTL_DONE |
1323                DP_AUX_CH_CTL_INTERRUPT |
1324                DP_AUX_CH_CTL_TIME_OUT_ERROR |
1325                timeout |
1326                DP_AUX_CH_CTL_RECEIVE_ERROR |
1327                (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
1328                (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
1329                (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
1330 }
1331
1332 static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
1333                                 int send_bytes,
1334                                 u32 unused)
1335 {
1336         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1337         struct drm_i915_private *i915 =
1338                         to_i915(dig_port->base.base.dev);
1339         enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
1340         u32 ret;
1341
1342         ret = DP_AUX_CH_CTL_SEND_BUSY |
1343               DP_AUX_CH_CTL_DONE |
1344               DP_AUX_CH_CTL_INTERRUPT |
1345               DP_AUX_CH_CTL_TIME_OUT_ERROR |
1346               DP_AUX_CH_CTL_TIME_OUT_MAX |
1347               DP_AUX_CH_CTL_RECEIVE_ERROR |
1348               (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
1349               DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
1350               DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
1351
1352         if (intel_phy_is_tc(i915, phy) &&
1353             dig_port->tc_mode == TC_PORT_TBT_ALT)
1354                 ret |= DP_AUX_CH_CTL_TBT_IO;
1355
1356         return ret;
1357 }
1358
1359 static int
1360 intel_dp_aux_xfer(struct intel_dp *intel_dp,
1361                   const u8 *send, int send_bytes,
1362                   u8 *recv, int recv_size,
1363                   u32 aux_send_ctl_flags)
1364 {
1365         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1366         struct drm_i915_private *i915 =
1367                         to_i915(dig_port->base.base.dev);
1368         struct intel_uncore *uncore = &i915->uncore;
1369         enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
1370         bool is_tc_port = intel_phy_is_tc(i915, phy);
1371         i915_reg_t ch_ctl, ch_data[5];
1372         u32 aux_clock_divider;
1373         enum intel_display_power_domain aux_domain;
1374         intel_wakeref_t aux_wakeref;
1375         intel_wakeref_t pps_wakeref;
1376         int i, ret, recv_bytes;
1377         int try, clock = 0;
1378         u32 status;
1379         bool vdd;
1380
1381         ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
1382         for (i = 0; i < ARRAY_SIZE(ch_data); i++)
1383                 ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i);
1384
1385         if (is_tc_port)
1386                 intel_tc_port_lock(dig_port);
1387
1388         aux_domain = intel_aux_power_domain(dig_port);
1389
1390         aux_wakeref = intel_display_power_get(i915, aux_domain);
1391         pps_wakeref = pps_lock(intel_dp);
1392
1393         /*
1394          * We will be called with VDD already enabled for dpcd/edid/oui reads.
1395          * In such cases we want to leave VDD enabled and it's up to upper layers
1396          * to turn it off. But for eg. i2c-dev access we need to turn it on/off
1397          * ourselves.
1398          */
1399         vdd = edp_panel_vdd_on(intel_dp);
1400
1401         /* dp aux is extremely sensitive to irq latency, hence request the
1402          * lowest possible wakeup latency and so prevent the cpu from going into
1403          * deep sleep states.
1404          */
1405         cpu_latency_qos_update_request(&i915->pm_qos, 0);
1406
1407         intel_dp_check_edp(intel_dp);
1408
1409         /* Try to wait for any previous AUX channel activity */
1410         for (try = 0; try < 3; try++) {
1411                 status = intel_uncore_read_notrace(uncore, ch_ctl);
1412                 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
1413                         break;
1414                 msleep(1);
1415         }
1416         /* just trace the final value */
1417         trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
1418
1419         if (try == 3) {
1420                 const u32 status = intel_uncore_read(uncore, ch_ctl);
1421
1422                 if (status != intel_dp->aux_busy_last_status) {
1423                         drm_WARN(&i915->drm, 1,
1424                                  "%s: not started (status 0x%08x)\n",
1425                                  intel_dp->aux.name, status);
1426                         intel_dp->aux_busy_last_status = status;
1427                 }
1428
1429                 ret = -EBUSY;
1430                 goto out;
1431         }
1432
1433         /* Only 5 data registers! */
1434         if (drm_WARN_ON(&i915->drm, send_bytes > 20 || recv_size > 20)) {
1435                 ret = -E2BIG;
1436                 goto out;
1437         }
1438
1439         while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
1440                 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
1441                                                           send_bytes,
1442                                                           aux_clock_divider);
1443
1444                 send_ctl |= aux_send_ctl_flags;
1445
1446                 /* Must try at least 3 times according to DP spec */
1447                 for (try = 0; try < 5; try++) {
1448                         /* Load the send data into the aux channel data registers */
1449                         for (i = 0; i < send_bytes; i += 4)
1450                                 intel_uncore_write(uncore,
1451                                                    ch_data[i >> 2],
1452                                                    intel_dp_pack_aux(send + i,
1453                                                                      send_bytes - i));
1454
1455                         /* Send the command and wait for it to complete */
1456                         intel_uncore_write(uncore, ch_ctl, send_ctl);
1457
1458                         status = intel_dp_aux_wait_done(intel_dp);
1459
1460                         /* Clear done status and any errors */
1461                         intel_uncore_write(uncore,
1462                                            ch_ctl,
1463                                            status |
1464                                            DP_AUX_CH_CTL_DONE |
1465                                            DP_AUX_CH_CTL_TIME_OUT_ERROR |
1466                                            DP_AUX_CH_CTL_RECEIVE_ERROR);
1467
1468                         /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
1469                          *   400us delay required for errors and timeouts
1470                          *   Timeout errors from the HW already meet this
1471                          *   requirement so skip to next iteration
1472                          */
1473                         if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
1474                                 continue;
1475
1476                         if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1477                                 usleep_range(400, 500);
1478                                 continue;
1479                         }
1480                         if (status & DP_AUX_CH_CTL_DONE)
1481                                 goto done;
1482                 }
1483         }
1484
1485         if ((status & DP_AUX_CH_CTL_DONE) == 0) {
1486                 drm_err(&i915->drm, "%s: not done (status 0x%08x)\n",
1487                         intel_dp->aux.name, status);
1488                 ret = -EBUSY;
1489                 goto out;
1490         }
1491
1492 done:
1493         /* Check for timeout or receive error.
1494          * Timeouts occur when the sink is not connected
1495          */
1496         if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1497                 drm_err(&i915->drm, "%s: receive error (status 0x%08x)\n",
1498                         intel_dp->aux.name, status);
1499                 ret = -EIO;
1500                 goto out;
1501         }
1502
1503         /* Timeouts occur when the device isn't connected, so they're
1504          * "normal" -- don't fill the kernel log with these */
1505         if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
1506                 drm_dbg_kms(&i915->drm, "%s: timeout (status 0x%08x)\n",
1507                             intel_dp->aux.name, status);
1508                 ret = -ETIMEDOUT;
1509                 goto out;
1510         }
1511
1512         /* Unload any bytes sent back from the other side */
1513         recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
1514                       DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
1515
1516         /*
1517          * By BSpec: "Message sizes of 0 or >20 are not allowed."
1518          * We have no idea of what happened so we return -EBUSY so
1519          * drm layer takes care for the necessary retries.
1520          */
1521         if (recv_bytes == 0 || recv_bytes > 20) {
1522                 drm_dbg_kms(&i915->drm,
1523                             "%s: Forbidden recv_bytes = %d on aux transaction\n",
1524                             intel_dp->aux.name, recv_bytes);
1525                 ret = -EBUSY;
1526                 goto out;
1527         }
1528
1529         if (recv_bytes > recv_size)
1530                 recv_bytes = recv_size;
1531
1532         for (i = 0; i < recv_bytes; i += 4)
1533                 intel_dp_unpack_aux(intel_uncore_read(uncore, ch_data[i >> 2]),
1534                                     recv + i, recv_bytes - i);
1535
1536         ret = recv_bytes;
1537 out:
1538         cpu_latency_qos_update_request(&i915->pm_qos, PM_QOS_DEFAULT_VALUE);
1539
1540         if (vdd)
1541                 edp_panel_vdd_off(intel_dp, false);
1542
1543         pps_unlock(intel_dp, pps_wakeref);
1544         intel_display_power_put_async(i915, aux_domain, aux_wakeref);
1545
1546         if (is_tc_port)
1547                 intel_tc_port_unlock(dig_port);
1548
1549         return ret;
1550 }
1551
1552 #define BARE_ADDRESS_SIZE       3
1553 #define HEADER_SIZE             (BARE_ADDRESS_SIZE + 1)
1554
1555 static void
1556 intel_dp_aux_header(u8 txbuf[HEADER_SIZE],
1557                     const struct drm_dp_aux_msg *msg)
1558 {
1559         txbuf[0] = (msg->request << 4) | ((msg->address >> 16) & 0xf);
1560         txbuf[1] = (msg->address >> 8) & 0xff;
1561         txbuf[2] = msg->address & 0xff;
1562         txbuf[3] = msg->size - 1;
1563 }
1564
1565 static u32 intel_dp_aux_xfer_flags(const struct drm_dp_aux_msg *msg)
1566 {
1567         /*
1568          * If we're trying to send the HDCP Aksv, we need to set a the Aksv
1569          * select bit to inform the hardware to send the Aksv after our header
1570          * since we can't access that data from software.
1571          */
1572         if ((msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_NATIVE_WRITE &&
1573             msg->address == DP_AUX_HDCP_AKSV)
1574                 return DP_AUX_CH_CTL_AUX_AKSV_SELECT;
1575
1576         return 0;
1577 }
1578
1579 static ssize_t
1580 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
1581 {
1582         struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
1583         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1584         u8 txbuf[20], rxbuf[20];
1585         size_t txsize, rxsize;
1586         u32 flags = intel_dp_aux_xfer_flags(msg);
1587         int ret;
1588
1589         intel_dp_aux_header(txbuf, msg);
1590
1591         switch (msg->request & ~DP_AUX_I2C_MOT) {
1592         case DP_AUX_NATIVE_WRITE:
1593         case DP_AUX_I2C_WRITE:
1594         case DP_AUX_I2C_WRITE_STATUS_UPDATE:
1595                 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
1596                 rxsize = 2; /* 0 or 1 data bytes */
1597
1598                 if (drm_WARN_ON(&i915->drm, txsize > 20))
1599                         return -E2BIG;
1600
1601                 drm_WARN_ON(&i915->drm, !msg->buffer != !msg->size);
1602
1603                 if (msg->buffer)
1604                         memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
1605
1606                 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
1607                                         rxbuf, rxsize, flags);
1608                 if (ret > 0) {
1609                         msg->reply = rxbuf[0] >> 4;
1610
1611                         if (ret > 1) {
1612                                 /* Number of bytes written in a short write. */
1613                                 ret = clamp_t(int, rxbuf[1], 0, msg->size);
1614                         } else {
1615                                 /* Return payload size. */
1616                                 ret = msg->size;
1617                         }
1618                 }
1619                 break;
1620
1621         case DP_AUX_NATIVE_READ:
1622         case DP_AUX_I2C_READ:
1623                 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
1624                 rxsize = msg->size + 1;
1625
1626                 if (drm_WARN_ON(&i915->drm, rxsize > 20))
1627                         return -E2BIG;
1628
1629                 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
1630                                         rxbuf, rxsize, flags);
1631                 if (ret > 0) {
1632                         msg->reply = rxbuf[0] >> 4;
1633                         /*
1634                          * Assume happy day, and copy the data. The caller is
1635                          * expected to check msg->reply before touching it.
1636                          *
1637                          * Return payload size.
1638                          */
1639                         ret--;
1640                         memcpy(msg->buffer, rxbuf + 1, ret);
1641                 }
1642                 break;
1643
1644         default:
1645                 ret = -EINVAL;
1646                 break;
1647         }
1648
1649         return ret;
1650 }
1651
1652
1653 static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp)
1654 {
1655         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1656         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1657         enum aux_ch aux_ch = dig_port->aux_ch;
1658
1659         switch (aux_ch) {
1660         case AUX_CH_B:
1661         case AUX_CH_C:
1662         case AUX_CH_D:
1663                 return DP_AUX_CH_CTL(aux_ch);
1664         default:
1665                 MISSING_CASE(aux_ch);
1666                 return DP_AUX_CH_CTL(AUX_CH_B);
1667         }
1668 }
1669
1670 static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index)
1671 {
1672         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1673         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1674         enum aux_ch aux_ch = dig_port->aux_ch;
1675
1676         switch (aux_ch) {
1677         case AUX_CH_B:
1678         case AUX_CH_C:
1679         case AUX_CH_D:
1680                 return DP_AUX_CH_DATA(aux_ch, index);
1681         default:
1682                 MISSING_CASE(aux_ch);
1683                 return DP_AUX_CH_DATA(AUX_CH_B, index);
1684         }
1685 }
1686
1687 static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp)
1688 {
1689         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1690         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1691         enum aux_ch aux_ch = dig_port->aux_ch;
1692
1693         switch (aux_ch) {
1694         case AUX_CH_A:
1695                 return DP_AUX_CH_CTL(aux_ch);
1696         case AUX_CH_B:
1697         case AUX_CH_C:
1698         case AUX_CH_D:
1699                 return PCH_DP_AUX_CH_CTL(aux_ch);
1700         default:
1701                 MISSING_CASE(aux_ch);
1702                 return DP_AUX_CH_CTL(AUX_CH_A);
1703         }
1704 }
1705
1706 static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index)
1707 {
1708         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1709         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1710         enum aux_ch aux_ch = dig_port->aux_ch;
1711
1712         switch (aux_ch) {
1713         case AUX_CH_A:
1714                 return DP_AUX_CH_DATA(aux_ch, index);
1715         case AUX_CH_B:
1716         case AUX_CH_C:
1717         case AUX_CH_D:
1718                 return PCH_DP_AUX_CH_DATA(aux_ch, index);
1719         default:
1720                 MISSING_CASE(aux_ch);
1721                 return DP_AUX_CH_DATA(AUX_CH_A, index);
1722         }
1723 }
1724
1725 static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
1726 {
1727         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1728         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1729         enum aux_ch aux_ch = dig_port->aux_ch;
1730
1731         switch (aux_ch) {
1732         case AUX_CH_A:
1733         case AUX_CH_B:
1734         case AUX_CH_C:
1735         case AUX_CH_D:
1736         case AUX_CH_E:
1737         case AUX_CH_F:
1738         case AUX_CH_G:
1739                 return DP_AUX_CH_CTL(aux_ch);
1740         default:
1741                 MISSING_CASE(aux_ch);
1742                 return DP_AUX_CH_CTL(AUX_CH_A);
1743         }
1744 }
1745
1746 static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index)
1747 {
1748         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1749         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1750         enum aux_ch aux_ch = dig_port->aux_ch;
1751
1752         switch (aux_ch) {
1753         case AUX_CH_A:
1754         case AUX_CH_B:
1755         case AUX_CH_C:
1756         case AUX_CH_D:
1757         case AUX_CH_E:
1758         case AUX_CH_F:
1759         case AUX_CH_G:
1760                 return DP_AUX_CH_DATA(aux_ch, index);
1761         default:
1762                 MISSING_CASE(aux_ch);
1763                 return DP_AUX_CH_DATA(AUX_CH_A, index);
1764         }
1765 }
1766
1767 static void
1768 intel_dp_aux_fini(struct intel_dp *intel_dp)
1769 {
1770         kfree(intel_dp->aux.name);
1771 }
1772
1773 static void
1774 intel_dp_aux_init(struct intel_dp *intel_dp)
1775 {
1776         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1777         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1778         struct intel_encoder *encoder = &dig_port->base;
1779
1780         if (INTEL_GEN(dev_priv) >= 9) {
1781                 intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg;
1782                 intel_dp->aux_ch_data_reg = skl_aux_data_reg;
1783         } else if (HAS_PCH_SPLIT(dev_priv)) {
1784                 intel_dp->aux_ch_ctl_reg = ilk_aux_ctl_reg;
1785                 intel_dp->aux_ch_data_reg = ilk_aux_data_reg;
1786         } else {
1787                 intel_dp->aux_ch_ctl_reg = g4x_aux_ctl_reg;
1788                 intel_dp->aux_ch_data_reg = g4x_aux_data_reg;
1789         }
1790
1791         if (INTEL_GEN(dev_priv) >= 9)
1792                 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
1793         else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
1794                 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
1795         else if (HAS_PCH_SPLIT(dev_priv))
1796                 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
1797         else
1798                 intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
1799
1800         if (INTEL_GEN(dev_priv) >= 9)
1801                 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
1802         else
1803                 intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
1804
1805         drm_dp_aux_init(&intel_dp->aux);
1806
1807         /* Failure to allocate our preferred name is not critical */
1808         intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX %c/port %c",
1809                                        aux_ch_name(dig_port->aux_ch),
1810                                        port_name(encoder->port));
1811         intel_dp->aux.transfer = intel_dp_aux_transfer;
1812 }
1813
1814 bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
1815 {
1816         int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1];
1817
1818         return max_rate >= 540000;
1819 }
1820
1821 bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp)
1822 {
1823         int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1];
1824
1825         return max_rate >= 810000;
1826 }
1827
1828 static void
1829 intel_dp_set_clock(struct intel_encoder *encoder,
1830                    struct intel_crtc_state *pipe_config)
1831 {
1832         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1833         const struct dp_link_dpll *divisor = NULL;
1834         int i, count = 0;
1835
1836         if (IS_G4X(dev_priv)) {
1837                 divisor = g4x_dpll;
1838                 count = ARRAY_SIZE(g4x_dpll);
1839         } else if (HAS_PCH_SPLIT(dev_priv)) {
1840                 divisor = pch_dpll;
1841                 count = ARRAY_SIZE(pch_dpll);
1842         } else if (IS_CHERRYVIEW(dev_priv)) {
1843                 divisor = chv_dpll;
1844                 count = ARRAY_SIZE(chv_dpll);
1845         } else if (IS_VALLEYVIEW(dev_priv)) {
1846                 divisor = vlv_dpll;
1847                 count = ARRAY_SIZE(vlv_dpll);
1848         }
1849
1850         if (divisor && count) {
1851                 for (i = 0; i < count; i++) {
1852                         if (pipe_config->port_clock == divisor[i].clock) {
1853                                 pipe_config->dpll = divisor[i].dpll;
1854                                 pipe_config->clock_set = true;
1855                                 break;
1856                         }
1857                 }
1858         }
1859 }
1860
1861 static void snprintf_int_array(char *str, size_t len,
1862                                const int *array, int nelem)
1863 {
1864         int i;
1865
1866         str[0] = '\0';
1867
1868         for (i = 0; i < nelem; i++) {
1869                 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1870                 if (r >= len)
1871                         return;
1872                 str += r;
1873                 len -= r;
1874         }
1875 }
1876
1877 static void intel_dp_print_rates(struct intel_dp *intel_dp)
1878 {
1879         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1880         char str[128]; /* FIXME: too big for stack? */
1881
1882         if (!drm_debug_enabled(DRM_UT_KMS))
1883                 return;
1884
1885         snprintf_int_array(str, sizeof(str),
1886                            intel_dp->source_rates, intel_dp->num_source_rates);
1887         drm_dbg_kms(&i915->drm, "source rates: %s\n", str);
1888
1889         snprintf_int_array(str, sizeof(str),
1890                            intel_dp->sink_rates, intel_dp->num_sink_rates);
1891         drm_dbg_kms(&i915->drm, "sink rates: %s\n", str);
1892
1893         snprintf_int_array(str, sizeof(str),
1894                            intel_dp->common_rates, intel_dp->num_common_rates);
1895         drm_dbg_kms(&i915->drm, "common rates: %s\n", str);
1896 }
1897
1898 int
1899 intel_dp_max_link_rate(struct intel_dp *intel_dp)
1900 {
1901         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1902         int len;
1903
1904         len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->max_link_rate);
1905         if (drm_WARN_ON(&i915->drm, len <= 0))
1906                 return 162000;
1907
1908         return intel_dp->common_rates[len - 1];
1909 }
1910
1911 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1912 {
1913         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1914         int i = intel_dp_rate_index(intel_dp->sink_rates,
1915                                     intel_dp->num_sink_rates, rate);
1916
1917         if (drm_WARN_ON(&i915->drm, i < 0))
1918                 i = 0;
1919
1920         return i;
1921 }
1922
1923 void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1924                            u8 *link_bw, u8 *rate_select)
1925 {
1926         /* eDP 1.4 rate select method. */
1927         if (intel_dp->use_rate_select) {
1928                 *link_bw = 0;
1929                 *rate_select =
1930                         intel_dp_rate_select(intel_dp, port_clock);
1931         } else {
1932                 *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1933                 *rate_select = 0;
1934         }
1935 }
1936
1937 static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp,
1938                                          const struct intel_crtc_state *pipe_config)
1939 {
1940         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1941
1942         /* On TGL, FEC is supported on all Pipes */
1943         if (INTEL_GEN(dev_priv) >= 12)
1944                 return true;
1945
1946         if (IS_GEN(dev_priv, 11) && pipe_config->cpu_transcoder != TRANSCODER_A)
1947                 return true;
1948
1949         return false;
1950 }
1951
1952 static bool intel_dp_supports_fec(struct intel_dp *intel_dp,
1953                                   const struct intel_crtc_state *pipe_config)
1954 {
1955         return intel_dp_source_supports_fec(intel_dp, pipe_config) &&
1956                 drm_dp_sink_supports_fec(intel_dp->fec_capable);
1957 }
1958
1959 static bool intel_dp_supports_dsc(struct intel_dp *intel_dp,
1960                                   const struct intel_crtc_state *crtc_state)
1961 {
1962         struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
1963
1964         if (!intel_dp_is_edp(intel_dp) && !crtc_state->fec_enable)
1965                 return false;
1966
1967         return intel_dsc_source_support(encoder, crtc_state) &&
1968                 drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd);
1969 }
1970
1971 static int intel_dp_compute_bpp(struct intel_dp *intel_dp,
1972                                 struct intel_crtc_state *pipe_config)
1973 {
1974         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1975         struct intel_connector *intel_connector = intel_dp->attached_connector;
1976         int bpp;
1977
1978         bpp = pipe_config->pipe_bpp;
1979
1980         if (intel_dp->dfp.max_bpc)
1981                 bpp = min(bpp, 3 * intel_dp->dfp.max_bpc);
1982
1983         if (intel_dp_is_edp(intel_dp)) {
1984                 /* Get bpp from vbt only for panels that dont have bpp in edid */
1985                 if (intel_connector->base.display_info.bpc == 0 &&
1986                     dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp) {
1987                         drm_dbg_kms(&dev_priv->drm,
1988                                     "clamping bpp for eDP panel to BIOS-provided %i\n",
1989                                     dev_priv->vbt.edp.bpp);
1990                         bpp = dev_priv->vbt.edp.bpp;
1991                 }
1992         }
1993
1994         return bpp;
1995 }
1996
1997 /* Adjust link config limits based on compliance test requests. */
1998 void
1999 intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
2000                                   struct intel_crtc_state *pipe_config,
2001                                   struct link_config_limits *limits)
2002 {
2003         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2004
2005         /* For DP Compliance we override the computed bpp for the pipe */
2006         if (intel_dp->compliance.test_data.bpc != 0) {
2007                 int bpp = 3 * intel_dp->compliance.test_data.bpc;
2008
2009                 limits->min_bpp = limits->max_bpp = bpp;
2010                 pipe_config->dither_force_disable = bpp == 6 * 3;
2011
2012                 drm_dbg_kms(&i915->drm, "Setting pipe_bpp to %d\n", bpp);
2013         }
2014
2015         /* Use values requested by Compliance Test Request */
2016         if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
2017                 int index;
2018
2019                 /* Validate the compliance test data since max values
2020                  * might have changed due to link train fallback.
2021                  */
2022                 if (intel_dp_link_params_valid(intel_dp, intel_dp->compliance.test_link_rate,
2023                                                intel_dp->compliance.test_lane_count)) {
2024                         index = intel_dp_rate_index(intel_dp->common_rates,
2025                                                     intel_dp->num_common_rates,
2026                                                     intel_dp->compliance.test_link_rate);
2027                         if (index >= 0)
2028                                 limits->min_clock = limits->max_clock = index;
2029                         limits->min_lane_count = limits->max_lane_count =
2030                                 intel_dp->compliance.test_lane_count;
2031                 }
2032         }
2033 }
2034
2035 static int intel_dp_output_bpp(const struct intel_crtc_state *crtc_state, int bpp)
2036 {
2037         /*
2038          * bpp value was assumed to RGB format. And YCbCr 4:2:0 output
2039          * format of the number of bytes per pixel will be half the number
2040          * of bytes of RGB pixel.
2041          */
2042         if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
2043                 bpp /= 2;
2044
2045         return bpp;
2046 }
2047
2048 /* Optimize link config in order: max bpp, min clock, min lanes */
2049 static int
2050 intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
2051                                   struct intel_crtc_state *pipe_config,
2052                                   const struct link_config_limits *limits)
2053 {
2054         struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
2055         int bpp, clock, lane_count;
2056         int mode_rate, link_clock, link_avail;
2057
2058         for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
2059                 int output_bpp = intel_dp_output_bpp(pipe_config, bpp);
2060
2061                 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
2062                                                    output_bpp);
2063
2064                 for (clock = limits->min_clock; clock <= limits->max_clock; clock++) {
2065                         for (lane_count = limits->min_lane_count;
2066                              lane_count <= limits->max_lane_count;
2067                              lane_count <<= 1) {
2068                                 link_clock = intel_dp->common_rates[clock];
2069                                 link_avail = intel_dp_max_data_rate(link_clock,
2070                                                                     lane_count);
2071
2072                                 if (mode_rate <= link_avail) {
2073                                         pipe_config->lane_count = lane_count;
2074                                         pipe_config->pipe_bpp = bpp;
2075                                         pipe_config->port_clock = link_clock;
2076
2077                                         return 0;
2078                                 }
2079                         }
2080                 }
2081         }
2082
2083         return -EINVAL;
2084 }
2085
2086 static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc)
2087 {
2088         int i, num_bpc;
2089         u8 dsc_bpc[3] = {0};
2090
2091         num_bpc = drm_dp_dsc_sink_supported_input_bpcs(intel_dp->dsc_dpcd,
2092                                                        dsc_bpc);
2093         for (i = 0; i < num_bpc; i++) {
2094                 if (dsc_max_bpc >= dsc_bpc[i])
2095                         return dsc_bpc[i] * 3;
2096         }
2097
2098         return 0;
2099 }
2100
2101 #define DSC_SUPPORTED_VERSION_MIN               1
2102
2103 static int intel_dp_dsc_compute_params(struct intel_encoder *encoder,
2104                                        struct intel_crtc_state *crtc_state)
2105 {
2106         struct drm_i915_private *i915 = to_i915(encoder->base.dev);
2107         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2108         struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
2109         u8 line_buf_depth;
2110         int ret;
2111
2112         ret = intel_dsc_compute_params(encoder, crtc_state);
2113         if (ret)
2114                 return ret;
2115
2116         /*
2117          * Slice Height of 8 works for all currently available panels. So start
2118          * with that if pic_height is an integral multiple of 8. Eventually add
2119          * logic to try multiple slice heights.
2120          */
2121         if (vdsc_cfg->pic_height % 8 == 0)
2122                 vdsc_cfg->slice_height = 8;
2123         else if (vdsc_cfg->pic_height % 4 == 0)
2124                 vdsc_cfg->slice_height = 4;
2125         else
2126                 vdsc_cfg->slice_height = 2;
2127
2128         vdsc_cfg->dsc_version_major =
2129                 (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] &
2130                  DP_DSC_MAJOR_MASK) >> DP_DSC_MAJOR_SHIFT;
2131         vdsc_cfg->dsc_version_minor =
2132                 min(DSC_SUPPORTED_VERSION_MIN,
2133                     (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] &
2134                      DP_DSC_MINOR_MASK) >> DP_DSC_MINOR_SHIFT);
2135
2136         vdsc_cfg->convert_rgb = intel_dp->dsc_dpcd[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT] &
2137                 DP_DSC_RGB;
2138
2139         line_buf_depth = drm_dp_dsc_sink_line_buf_depth(intel_dp->dsc_dpcd);
2140         if (!line_buf_depth) {
2141                 drm_dbg_kms(&i915->drm,
2142                             "DSC Sink Line Buffer Depth invalid\n");
2143                 return -EINVAL;
2144         }
2145
2146         if (vdsc_cfg->dsc_version_minor == 2)
2147                 vdsc_cfg->line_buf_depth = (line_buf_depth == DSC_1_2_MAX_LINEBUF_DEPTH_BITS) ?
2148                         DSC_1_2_MAX_LINEBUF_DEPTH_VAL : line_buf_depth;
2149         else
2150                 vdsc_cfg->line_buf_depth = (line_buf_depth > DSC_1_1_MAX_LINEBUF_DEPTH_BITS) ?
2151                         DSC_1_1_MAX_LINEBUF_DEPTH_BITS : line_buf_depth;
2152
2153         vdsc_cfg->block_pred_enable =
2154                 intel_dp->dsc_dpcd[DP_DSC_BLK_PREDICTION_SUPPORT - DP_DSC_SUPPORT] &
2155                 DP_DSC_BLK_PREDICTION_IS_SUPPORTED;
2156
2157         return drm_dsc_compute_rc_parameters(vdsc_cfg);
2158 }
2159
2160 static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
2161                                        struct intel_crtc_state *pipe_config,
2162                                        struct drm_connector_state *conn_state,
2163                                        struct link_config_limits *limits)
2164 {
2165         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2166         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
2167         const struct drm_display_mode *adjusted_mode =
2168                 &pipe_config->hw.adjusted_mode;
2169         u8 dsc_max_bpc;
2170         int pipe_bpp;
2171         int ret;
2172
2173         pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) &&
2174                 intel_dp_supports_fec(intel_dp, pipe_config);
2175
2176         if (!intel_dp_supports_dsc(intel_dp, pipe_config))
2177                 return -EINVAL;
2178
2179         /* Max DSC Input BPC for ICL is 10 and for TGL+ is 12 */
2180         if (INTEL_GEN(dev_priv) >= 12)
2181                 dsc_max_bpc = min_t(u8, 12, conn_state->max_requested_bpc);
2182         else
2183                 dsc_max_bpc = min_t(u8, 10,
2184                                     conn_state->max_requested_bpc);
2185
2186         pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, dsc_max_bpc);
2187
2188         /* Min Input BPC for ICL+ is 8 */
2189         if (pipe_bpp < 8 * 3) {
2190                 drm_dbg_kms(&dev_priv->drm,
2191                             "No DSC support for less than 8bpc\n");
2192                 return -EINVAL;
2193         }
2194
2195         /*
2196          * For now enable DSC for max bpp, max link rate, max lane count.
2197          * Optimize this later for the minimum possible link rate/lane count
2198          * with DSC enabled for the requested mode.
2199          */
2200         pipe_config->pipe_bpp = pipe_bpp;
2201         pipe_config->port_clock = intel_dp->common_rates[limits->max_clock];
2202         pipe_config->lane_count = limits->max_lane_count;
2203
2204         if (intel_dp_is_edp(intel_dp)) {
2205                 pipe_config->dsc.compressed_bpp =
2206                         min_t(u16, drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4,
2207                               pipe_config->pipe_bpp);
2208                 pipe_config->dsc.slice_count =
2209                         drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
2210                                                         true);
2211         } else {
2212                 u16 dsc_max_output_bpp;
2213                 u8 dsc_dp_slice_count;
2214
2215                 dsc_max_output_bpp =
2216                         intel_dp_dsc_get_output_bpp(dev_priv,
2217                                                     pipe_config->port_clock,
2218                                                     pipe_config->lane_count,
2219                                                     adjusted_mode->crtc_clock,
2220                                                     adjusted_mode->crtc_hdisplay);
2221                 dsc_dp_slice_count =
2222                         intel_dp_dsc_get_slice_count(intel_dp,
2223                                                      adjusted_mode->crtc_clock,
2224                                                      adjusted_mode->crtc_hdisplay);
2225                 if (!dsc_max_output_bpp || !dsc_dp_slice_count) {
2226                         drm_dbg_kms(&dev_priv->drm,
2227                                     "Compressed BPP/Slice Count not supported\n");
2228                         return -EINVAL;
2229                 }
2230                 pipe_config->dsc.compressed_bpp = min_t(u16,
2231                                                                dsc_max_output_bpp >> 4,
2232                                                                pipe_config->pipe_bpp);
2233                 pipe_config->dsc.slice_count = dsc_dp_slice_count;
2234         }
2235         /*
2236          * VDSC engine operates at 1 Pixel per clock, so if peak pixel rate
2237          * is greater than the maximum Cdclock and if slice count is even
2238          * then we need to use 2 VDSC instances.
2239          */
2240         if (adjusted_mode->crtc_clock > dev_priv->max_cdclk_freq) {
2241                 if (pipe_config->dsc.slice_count > 1) {
2242                         pipe_config->dsc.dsc_split = true;
2243                 } else {
2244                         drm_dbg_kms(&dev_priv->drm,
2245                                     "Cannot split stream to use 2 VDSC instances\n");
2246                         return -EINVAL;
2247                 }
2248         }
2249
2250         ret = intel_dp_dsc_compute_params(&dig_port->base, pipe_config);
2251         if (ret < 0) {
2252                 drm_dbg_kms(&dev_priv->drm,
2253                             "Cannot compute valid DSC parameters for Input Bpp = %d "
2254                             "Compressed BPP = %d\n",
2255                             pipe_config->pipe_bpp,
2256                             pipe_config->dsc.compressed_bpp);
2257                 return ret;
2258         }
2259
2260         pipe_config->dsc.compression_enable = true;
2261         drm_dbg_kms(&dev_priv->drm, "DP DSC computed with Input Bpp = %d "
2262                     "Compressed Bpp = %d Slice Count = %d\n",
2263                     pipe_config->pipe_bpp,
2264                     pipe_config->dsc.compressed_bpp,
2265                     pipe_config->dsc.slice_count);
2266
2267         return 0;
2268 }
2269
2270 int intel_dp_min_bpp(const struct intel_crtc_state *crtc_state)
2271 {
2272         if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_RGB)
2273                 return 6 * 3;
2274         else
2275                 return 8 * 3;
2276 }
2277
2278 static int
2279 intel_dp_compute_link_config(struct intel_encoder *encoder,
2280                              struct intel_crtc_state *pipe_config,
2281                              struct drm_connector_state *conn_state)
2282 {
2283         struct drm_i915_private *i915 = to_i915(encoder->base.dev);
2284         const struct drm_display_mode *adjusted_mode =
2285                 &pipe_config->hw.adjusted_mode;
2286         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2287         struct link_config_limits limits;
2288         int common_len;
2289         int ret;
2290
2291         common_len = intel_dp_common_len_rate_limit(intel_dp,
2292                                                     intel_dp->max_link_rate);
2293
2294         /* No common link rates between source and sink */
2295         drm_WARN_ON(encoder->base.dev, common_len <= 0);
2296
2297         limits.min_clock = 0;
2298         limits.max_clock = common_len - 1;
2299
2300         limits.min_lane_count = 1;
2301         limits.max_lane_count = intel_dp_max_lane_count(intel_dp);
2302
2303         limits.min_bpp = intel_dp_min_bpp(pipe_config);
2304         limits.max_bpp = intel_dp_compute_bpp(intel_dp, pipe_config);
2305
2306         if (intel_dp_is_edp(intel_dp)) {
2307                 /*
2308                  * Use the maximum clock and number of lanes the eDP panel
2309                  * advertizes being capable of. The panels are generally
2310                  * designed to support only a single clock and lane
2311                  * configuration, and typically these values correspond to the
2312                  * native resolution of the panel.
2313                  */
2314                 limits.min_lane_count = limits.max_lane_count;
2315                 limits.min_clock = limits.max_clock;
2316         }
2317
2318         intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits);
2319
2320         drm_dbg_kms(&i915->drm, "DP link computation with max lane count %i "
2321                     "max rate %d max bpp %d pixel clock %iKHz\n",
2322                     limits.max_lane_count,
2323                     intel_dp->common_rates[limits.max_clock],
2324                     limits.max_bpp, adjusted_mode->crtc_clock);
2325
2326         /*
2327          * Optimize for slow and wide. This is the place to add alternative
2328          * optimization policy.
2329          */
2330         ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits);
2331
2332         /* enable compression if the mode doesn't fit available BW */
2333         drm_dbg_kms(&i915->drm, "Force DSC en = %d\n", intel_dp->force_dsc_en);
2334         if (ret || intel_dp->force_dsc_en) {
2335                 ret = intel_dp_dsc_compute_config(intel_dp, pipe_config,
2336                                                   conn_state, &limits);
2337                 if (ret < 0)
2338                         return ret;
2339         }
2340
2341         if (pipe_config->dsc.compression_enable) {
2342                 drm_dbg_kms(&i915->drm,
2343                             "DP lane count %d clock %d Input bpp %d Compressed bpp %d\n",
2344                             pipe_config->lane_count, pipe_config->port_clock,
2345                             pipe_config->pipe_bpp,
2346                             pipe_config->dsc.compressed_bpp);
2347
2348                 drm_dbg_kms(&i915->drm,
2349                             "DP link rate required %i available %i\n",
2350                             intel_dp_link_required(adjusted_mode->crtc_clock,
2351                                                    pipe_config->dsc.compressed_bpp),
2352                             intel_dp_max_data_rate(pipe_config->port_clock,
2353                                                    pipe_config->lane_count));
2354         } else {
2355                 drm_dbg_kms(&i915->drm, "DP lane count %d clock %d bpp %d\n",
2356                             pipe_config->lane_count, pipe_config->port_clock,
2357                             pipe_config->pipe_bpp);
2358
2359                 drm_dbg_kms(&i915->drm,
2360                             "DP link rate required %i available %i\n",
2361                             intel_dp_link_required(adjusted_mode->crtc_clock,
2362                                                    pipe_config->pipe_bpp),
2363                             intel_dp_max_data_rate(pipe_config->port_clock,
2364                                                    pipe_config->lane_count));
2365         }
2366         return 0;
2367 }
2368
2369 static int
2370 intel_dp_ycbcr420_config(struct intel_dp *intel_dp,
2371                          struct intel_crtc_state *crtc_state,
2372                          const struct drm_connector_state *conn_state)
2373 {
2374         struct drm_connector *connector = conn_state->connector;
2375         const struct drm_display_info *info = &connector->display_info;
2376         const struct drm_display_mode *adjusted_mode =
2377                 &crtc_state->hw.adjusted_mode;
2378
2379         if (!drm_mode_is_420_only(info, adjusted_mode) ||
2380             !intel_dp_get_colorimetry_status(intel_dp) ||
2381             !connector->ycbcr_420_allowed)
2382                 return 0;
2383
2384         crtc_state->output_format = INTEL_OUTPUT_FORMAT_YCBCR420;
2385
2386         return intel_pch_panel_fitting(crtc_state, conn_state);
2387 }
2388
2389 bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
2390                                   const struct drm_connector_state *conn_state)
2391 {
2392         const struct intel_digital_connector_state *intel_conn_state =
2393                 to_intel_digital_connector_state(conn_state);
2394         const struct drm_display_mode *adjusted_mode =
2395                 &crtc_state->hw.adjusted_mode;
2396
2397         /*
2398          * Our YCbCr output is always limited range.
2399          * crtc_state->limited_color_range only applies to RGB,
2400          * and it must never be set for YCbCr or we risk setting
2401          * some conflicting bits in PIPECONF which will mess up
2402          * the colors on the monitor.
2403          */
2404         if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
2405                 return false;
2406
2407         if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
2408                 /*
2409                  * See:
2410                  * CEA-861-E - 5.1 Default Encoding Parameters
2411                  * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
2412                  */
2413                 return crtc_state->pipe_bpp != 18 &&
2414                         drm_default_rgb_quant_range(adjusted_mode) ==
2415                         HDMI_QUANTIZATION_RANGE_LIMITED;
2416         } else {
2417                 return intel_conn_state->broadcast_rgb ==
2418                         INTEL_BROADCAST_RGB_LIMITED;
2419         }
2420 }
2421
2422 static bool intel_dp_port_has_audio(struct drm_i915_private *dev_priv,
2423                                     enum port port)
2424 {
2425         if (IS_G4X(dev_priv))
2426                 return false;
2427         if (INTEL_GEN(dev_priv) < 12 && port == PORT_A)
2428                 return false;
2429
2430         return true;
2431 }
2432
2433 static void intel_dp_compute_vsc_colorimetry(const struct intel_crtc_state *crtc_state,
2434                                              const struct drm_connector_state *conn_state,
2435                                              struct drm_dp_vsc_sdp *vsc)
2436 {
2437         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2438         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2439
2440         /*
2441          * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118
2442          * VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/
2443          * Colorimetry Format indication.
2444          */
2445         vsc->revision = 0x5;
2446         vsc->length = 0x13;
2447
2448         /* DP 1.4a spec, Table 2-120 */
2449         switch (crtc_state->output_format) {
2450         case INTEL_OUTPUT_FORMAT_YCBCR444:
2451                 vsc->pixelformat = DP_PIXELFORMAT_YUV444;
2452                 break;
2453         case INTEL_OUTPUT_FORMAT_YCBCR420:
2454                 vsc->pixelformat = DP_PIXELFORMAT_YUV420;
2455                 break;
2456         case INTEL_OUTPUT_FORMAT_RGB:
2457         default:
2458                 vsc->pixelformat = DP_PIXELFORMAT_RGB;
2459         }
2460
2461         switch (conn_state->colorspace) {
2462         case DRM_MODE_COLORIMETRY_BT709_YCC:
2463                 vsc->colorimetry = DP_COLORIMETRY_BT709_YCC;
2464                 break;
2465         case DRM_MODE_COLORIMETRY_XVYCC_601:
2466                 vsc->colorimetry = DP_COLORIMETRY_XVYCC_601;
2467                 break;
2468         case DRM_MODE_COLORIMETRY_XVYCC_709:
2469                 vsc->colorimetry = DP_COLORIMETRY_XVYCC_709;
2470                 break;
2471         case DRM_MODE_COLORIMETRY_SYCC_601:
2472                 vsc->colorimetry = DP_COLORIMETRY_SYCC_601;
2473                 break;
2474         case DRM_MODE_COLORIMETRY_OPYCC_601:
2475                 vsc->colorimetry = DP_COLORIMETRY_OPYCC_601;
2476                 break;
2477         case DRM_MODE_COLORIMETRY_BT2020_CYCC:
2478                 vsc->colorimetry = DP_COLORIMETRY_BT2020_CYCC;
2479                 break;
2480         case DRM_MODE_COLORIMETRY_BT2020_RGB:
2481                 vsc->colorimetry = DP_COLORIMETRY_BT2020_RGB;
2482                 break;
2483         case DRM_MODE_COLORIMETRY_BT2020_YCC:
2484                 vsc->colorimetry = DP_COLORIMETRY_BT2020_YCC;
2485                 break;
2486         case DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65:
2487         case DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER:
2488                 vsc->colorimetry = DP_COLORIMETRY_DCI_P3_RGB;
2489                 break;
2490         default:
2491                 /*
2492                  * RGB->YCBCR color conversion uses the BT.709
2493                  * color space.
2494                  */
2495                 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
2496                         vsc->colorimetry = DP_COLORIMETRY_BT709_YCC;
2497                 else
2498                         vsc->colorimetry = DP_COLORIMETRY_DEFAULT;
2499                 break;
2500         }
2501
2502         vsc->bpc = crtc_state->pipe_bpp / 3;
2503
2504         /* only RGB pixelformat supports 6 bpc */
2505         drm_WARN_ON(&dev_priv->drm,
2506                     vsc->bpc == 6 && vsc->pixelformat != DP_PIXELFORMAT_RGB);
2507
2508         /* all YCbCr are always limited range */
2509         vsc->dynamic_range = DP_DYNAMIC_RANGE_CTA;
2510         vsc->content_type = DP_CONTENT_TYPE_NOT_DEFINED;
2511 }
2512
2513 static void intel_dp_compute_vsc_sdp(struct intel_dp *intel_dp,
2514                                      struct intel_crtc_state *crtc_state,
2515                                      const struct drm_connector_state *conn_state)
2516 {
2517         struct drm_dp_vsc_sdp *vsc = &crtc_state->infoframes.vsc;
2518
2519         /* When a crtc state has PSR, VSC SDP will be handled by PSR routine */
2520         if (crtc_state->has_psr)
2521                 return;
2522
2523         if (!intel_dp_needs_vsc_sdp(crtc_state, conn_state))
2524                 return;
2525
2526         crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
2527         vsc->sdp_type = DP_SDP_VSC;
2528         intel_dp_compute_vsc_colorimetry(crtc_state, conn_state,
2529                                          &crtc_state->infoframes.vsc);
2530 }
2531
2532 void intel_dp_compute_psr_vsc_sdp(struct intel_dp *intel_dp,
2533                                   const struct intel_crtc_state *crtc_state,
2534                                   const struct drm_connector_state *conn_state,
2535                                   struct drm_dp_vsc_sdp *vsc)
2536 {
2537         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2538
2539         vsc->sdp_type = DP_SDP_VSC;
2540
2541         if (dev_priv->psr.psr2_enabled) {
2542                 if (dev_priv->psr.colorimetry_support &&
2543                     intel_dp_needs_vsc_sdp(crtc_state, conn_state)) {
2544                         /* [PSR2, +Colorimetry] */
2545                         intel_dp_compute_vsc_colorimetry(crtc_state, conn_state,
2546                                                          vsc);
2547                 } else {
2548                         /*
2549                          * [PSR2, -Colorimetry]
2550                          * Prepare VSC Header for SU as per eDP 1.4 spec, Table 6-11
2551                          * 3D stereo + PSR/PSR2 + Y-coordinate.
2552                          */
2553                         vsc->revision = 0x4;
2554                         vsc->length = 0xe;
2555                 }
2556         } else {
2557                 /*
2558                  * [PSR1]
2559                  * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118
2560                  * VSC SDP supporting 3D stereo + PSR (applies to eDP v1.3 or
2561                  * higher).
2562                  */
2563                 vsc->revision = 0x2;
2564                 vsc->length = 0x8;
2565         }
2566 }
2567
2568 static void
2569 intel_dp_compute_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp,
2570                                             struct intel_crtc_state *crtc_state,
2571                                             const struct drm_connector_state *conn_state)
2572 {
2573         int ret;
2574         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2575         struct hdmi_drm_infoframe *drm_infoframe = &crtc_state->infoframes.drm.drm;
2576
2577         if (!conn_state->hdr_output_metadata)
2578                 return;
2579
2580         ret = drm_hdmi_infoframe_set_hdr_metadata(drm_infoframe, conn_state);
2581
2582         if (ret) {
2583                 drm_dbg_kms(&dev_priv->drm, "couldn't set HDR metadata in infoframe\n");
2584                 return;
2585         }
2586
2587         crtc_state->infoframes.enable |=
2588                 intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA);
2589 }
2590
2591 static void
2592 intel_dp_drrs_compute_config(struct intel_dp *intel_dp,
2593                              struct intel_crtc_state *pipe_config,
2594                              int output_bpp, bool constant_n)
2595 {
2596         struct intel_connector *intel_connector = intel_dp->attached_connector;
2597         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2598
2599         /*
2600          * DRRS and PSR can't be enable together, so giving preference to PSR
2601          * as it allows more power-savings by complete shutting down display,
2602          * so to guarantee this, intel_dp_drrs_compute_config() must be called
2603          * after intel_psr_compute_config().
2604          */
2605         if (pipe_config->has_psr)
2606                 return;
2607
2608         if (!intel_connector->panel.downclock_mode ||
2609             dev_priv->drrs.type != SEAMLESS_DRRS_SUPPORT)
2610                 return;
2611
2612         pipe_config->has_drrs = true;
2613         intel_link_compute_m_n(output_bpp, pipe_config->lane_count,
2614                                intel_connector->panel.downclock_mode->clock,
2615                                pipe_config->port_clock, &pipe_config->dp_m2_n2,
2616                                constant_n, pipe_config->fec_enable);
2617 }
2618
2619 int
2620 intel_dp_compute_config(struct intel_encoder *encoder,
2621                         struct intel_crtc_state *pipe_config,
2622                         struct drm_connector_state *conn_state)
2623 {
2624         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2625         struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
2626         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2627         struct intel_lspcon *lspcon = enc_to_intel_lspcon(encoder);
2628         enum port port = encoder->port;
2629         struct intel_connector *intel_connector = intel_dp->attached_connector;
2630         struct intel_digital_connector_state *intel_conn_state =
2631                 to_intel_digital_connector_state(conn_state);
2632         bool constant_n = drm_dp_has_quirk(&intel_dp->desc, 0,
2633                                            DP_DPCD_QUIRK_CONSTANT_N);
2634         int ret = 0, output_bpp;
2635
2636         if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A)
2637                 pipe_config->has_pch_encoder = true;
2638
2639         pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
2640
2641         if (lspcon->active)
2642                 lspcon_ycbcr420_config(&intel_connector->base, pipe_config);
2643         else
2644                 ret = intel_dp_ycbcr420_config(intel_dp, pipe_config,
2645                                                conn_state);
2646         if (ret)
2647                 return ret;
2648
2649         if (!intel_dp_port_has_audio(dev_priv, port))
2650                 pipe_config->has_audio = false;
2651         else if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
2652                 pipe_config->has_audio = intel_dp->has_audio;
2653         else
2654                 pipe_config->has_audio = intel_conn_state->force_audio == HDMI_AUDIO_ON;
2655
2656         if (intel_dp_is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
2657                 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
2658                                        adjusted_mode);
2659
2660                 if (HAS_GMCH(dev_priv))
2661                         ret = intel_gmch_panel_fitting(pipe_config, conn_state);
2662                 else
2663                         ret = intel_pch_panel_fitting(pipe_config, conn_state);
2664                 if (ret)
2665                         return ret;
2666         }
2667
2668         if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
2669                 return -EINVAL;
2670
2671         if (HAS_GMCH(dev_priv) &&
2672             adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
2673                 return -EINVAL;
2674
2675         if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
2676                 return -EINVAL;
2677
2678         if (intel_dp_hdisplay_bad(dev_priv, adjusted_mode->crtc_hdisplay))
2679                 return -EINVAL;
2680
2681         ret = intel_dp_compute_link_config(encoder, pipe_config, conn_state);
2682         if (ret < 0)
2683                 return ret;
2684
2685         pipe_config->limited_color_range =
2686                 intel_dp_limited_color_range(pipe_config, conn_state);
2687
2688         if (pipe_config->dsc.compression_enable)
2689                 output_bpp = pipe_config->dsc.compressed_bpp;
2690         else
2691                 output_bpp = intel_dp_output_bpp(pipe_config, pipe_config->pipe_bpp);
2692
2693         intel_link_compute_m_n(output_bpp,
2694                                pipe_config->lane_count,
2695                                adjusted_mode->crtc_clock,
2696                                pipe_config->port_clock,
2697                                &pipe_config->dp_m_n,
2698                                constant_n, pipe_config->fec_enable);
2699
2700         if (!HAS_DDI(dev_priv))
2701                 intel_dp_set_clock(encoder, pipe_config);
2702
2703         intel_psr_compute_config(intel_dp, pipe_config);
2704         intel_dp_drrs_compute_config(intel_dp, pipe_config, output_bpp,
2705                                      constant_n);
2706         intel_dp_compute_vsc_sdp(intel_dp, pipe_config, conn_state);
2707         intel_dp_compute_hdr_metadata_infoframe_sdp(intel_dp, pipe_config, conn_state);
2708
2709         return 0;
2710 }
2711
2712 void intel_dp_set_link_params(struct intel_dp *intel_dp,
2713                               int link_rate, u8 lane_count,
2714                               bool link_mst)
2715 {
2716         intel_dp->link_trained = false;
2717         intel_dp->link_rate = link_rate;
2718         intel_dp->lane_count = lane_count;
2719         intel_dp->link_mst = link_mst;
2720 }
2721
2722 static void intel_dp_prepare(struct intel_encoder *encoder,
2723                              const struct intel_crtc_state *pipe_config)
2724 {
2725         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2726         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2727         enum port port = encoder->port;
2728         struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
2729         const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
2730
2731         intel_dp_set_link_params(intel_dp, pipe_config->port_clock,
2732                                  pipe_config->lane_count,
2733                                  intel_crtc_has_type(pipe_config,
2734                                                      INTEL_OUTPUT_DP_MST));
2735
2736         /*
2737          * There are four kinds of DP registers:
2738          *
2739          *      IBX PCH
2740          *      SNB CPU
2741          *      IVB CPU
2742          *      CPT PCH
2743          *
2744          * IBX PCH and CPU are the same for almost everything,
2745          * except that the CPU DP PLL is configured in this
2746          * register
2747          *
2748          * CPT PCH is quite different, having many bits moved
2749          * to the TRANS_DP_CTL register instead. That
2750          * configuration happens (oddly) in ilk_pch_enable
2751          */
2752
2753         /* Preserve the BIOS-computed detected bit. This is
2754          * supposed to be read-only.
2755          */
2756         intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED;
2757
2758         /* Handle DP bits in common between all three register formats */
2759         intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
2760         intel_dp->DP |= DP_PORT_WIDTH(pipe_config->lane_count);
2761
2762         /* Split out the IBX/CPU vs CPT settings */
2763
2764         if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
2765                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
2766                         intel_dp->DP |= DP_SYNC_HS_HIGH;
2767                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
2768                         intel_dp->DP |= DP_SYNC_VS_HIGH;
2769                 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
2770
2771                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2772                         intel_dp->DP |= DP_ENHANCED_FRAMING;
2773
2774                 intel_dp->DP |= DP_PIPE_SEL_IVB(crtc->pipe);
2775         } else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
2776                 u32 trans_dp;
2777
2778                 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
2779
2780                 trans_dp = intel_de_read(dev_priv, TRANS_DP_CTL(crtc->pipe));
2781                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2782                         trans_dp |= TRANS_DP_ENH_FRAMING;
2783                 else
2784                         trans_dp &= ~TRANS_DP_ENH_FRAMING;
2785                 intel_de_write(dev_priv, TRANS_DP_CTL(crtc->pipe), trans_dp);
2786         } else {
2787                 if (IS_G4X(dev_priv) && pipe_config->limited_color_range)
2788                         intel_dp->DP |= DP_COLOR_RANGE_16_235;
2789
2790                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
2791                         intel_dp->DP |= DP_SYNC_HS_HIGH;
2792                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
2793                         intel_dp->DP |= DP_SYNC_VS_HIGH;
2794                 intel_dp->DP |= DP_LINK_TRAIN_OFF;
2795
2796                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2797                         intel_dp->DP |= DP_ENHANCED_FRAMING;
2798
2799                 if (IS_CHERRYVIEW(dev_priv))
2800                         intel_dp->DP |= DP_PIPE_SEL_CHV(crtc->pipe);
2801                 else
2802                         intel_dp->DP |= DP_PIPE_SEL(crtc->pipe);
2803         }
2804 }
2805
2806 #define IDLE_ON_MASK            (PP_ON | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
2807 #define IDLE_ON_VALUE           (PP_ON | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
2808
2809 #define IDLE_OFF_MASK           (PP_ON | PP_SEQUENCE_MASK | 0                     | 0)
2810 #define IDLE_OFF_VALUE          (0     | PP_SEQUENCE_NONE | 0                     | 0)
2811
2812 #define IDLE_CYCLE_MASK         (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
2813 #define IDLE_CYCLE_VALUE        (0     | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
2814
2815 static void intel_pps_verify_state(struct intel_dp *intel_dp);
2816
2817 static void wait_panel_status(struct intel_dp *intel_dp,
2818                                        u32 mask,
2819                                        u32 value)
2820 {
2821         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2822         i915_reg_t pp_stat_reg, pp_ctrl_reg;
2823
2824         lockdep_assert_held(&dev_priv->pps_mutex);
2825
2826         intel_pps_verify_state(intel_dp);
2827
2828         pp_stat_reg = _pp_stat_reg(intel_dp);
2829         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2830
2831         drm_dbg_kms(&dev_priv->drm,
2832                     "mask %08x value %08x status %08x control %08x\n",
2833                     mask, value,
2834                     intel_de_read(dev_priv, pp_stat_reg),
2835                     intel_de_read(dev_priv, pp_ctrl_reg));
2836
2837         if (intel_de_wait_for_register(dev_priv, pp_stat_reg,
2838                                        mask, value, 5000))
2839                 drm_err(&dev_priv->drm,
2840                         "Panel status timeout: status %08x control %08x\n",
2841                         intel_de_read(dev_priv, pp_stat_reg),
2842                         intel_de_read(dev_priv, pp_ctrl_reg));
2843
2844         drm_dbg_kms(&dev_priv->drm, "Wait complete\n");
2845 }
2846
2847 static void wait_panel_on(struct intel_dp *intel_dp)
2848 {
2849         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2850
2851         drm_dbg_kms(&i915->drm, "Wait for panel power on\n");
2852         wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
2853 }
2854
2855 static void wait_panel_off(struct intel_dp *intel_dp)
2856 {
2857         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2858
2859         drm_dbg_kms(&i915->drm, "Wait for panel power off time\n");
2860         wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
2861 }
2862
2863 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
2864 {
2865         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2866         ktime_t panel_power_on_time;
2867         s64 panel_power_off_duration;
2868
2869         drm_dbg_kms(&i915->drm, "Wait for panel power cycle\n");
2870
2871         /* take the difference of currrent time and panel power off time
2872          * and then make panel wait for t11_t12 if needed. */
2873         panel_power_on_time = ktime_get_boottime();
2874         panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->panel_power_off_time);
2875
2876         /* When we disable the VDD override bit last we have to do the manual
2877          * wait. */
2878         if (panel_power_off_duration < (s64)intel_dp->panel_power_cycle_delay)
2879                 wait_remaining_ms_from_jiffies(jiffies,
2880                                        intel_dp->panel_power_cycle_delay - panel_power_off_duration);
2881
2882         wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
2883 }
2884
2885 static void wait_backlight_on(struct intel_dp *intel_dp)
2886 {
2887         wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
2888                                        intel_dp->backlight_on_delay);
2889 }
2890
2891 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
2892 {
2893         wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
2894                                        intel_dp->backlight_off_delay);
2895 }
2896
2897 /* Read the current pp_control value, unlocking the register if it
2898  * is locked
2899  */
2900
2901 static  u32 ilk_get_pp_control(struct intel_dp *intel_dp)
2902 {
2903         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2904         u32 control;
2905
2906         lockdep_assert_held(&dev_priv->pps_mutex);
2907
2908         control = intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp));
2909         if (drm_WARN_ON(&dev_priv->drm, !HAS_DDI(dev_priv) &&
2910                         (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) {
2911                 control &= ~PANEL_UNLOCK_MASK;
2912                 control |= PANEL_UNLOCK_REGS;
2913         }
2914         return control;
2915 }
2916
2917 /*
2918  * Must be paired with edp_panel_vdd_off().
2919  * Must hold pps_mutex around the whole on/off sequence.
2920  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
2921  */
2922 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
2923 {
2924         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2925         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2926         u32 pp;
2927         i915_reg_t pp_stat_reg, pp_ctrl_reg;
2928         bool need_to_disable = !intel_dp->want_panel_vdd;
2929
2930         lockdep_assert_held(&dev_priv->pps_mutex);
2931
2932         if (!intel_dp_is_edp(intel_dp))
2933                 return false;
2934
2935         cancel_delayed_work(&intel_dp->panel_vdd_work);
2936         intel_dp->want_panel_vdd = true;
2937
2938         if (edp_have_panel_vdd(intel_dp))
2939                 return need_to_disable;
2940
2941         intel_display_power_get(dev_priv,
2942                                 intel_aux_power_domain(dig_port));
2943
2944         drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD on\n",
2945                     dig_port->base.base.base.id,
2946                     dig_port->base.base.name);
2947
2948         if (!edp_have_panel_power(intel_dp))
2949                 wait_panel_power_cycle(intel_dp);
2950
2951         pp = ilk_get_pp_control(intel_dp);
2952         pp |= EDP_FORCE_VDD;
2953
2954         pp_stat_reg = _pp_stat_reg(intel_dp);
2955         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2956
2957         intel_de_write(dev_priv, pp_ctrl_reg, pp);
2958         intel_de_posting_read(dev_priv, pp_ctrl_reg);
2959         drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
2960                     intel_de_read(dev_priv, pp_stat_reg),
2961                     intel_de_read(dev_priv, pp_ctrl_reg));
2962         /*
2963          * If the panel wasn't on, delay before accessing aux channel
2964          */
2965         if (!edp_have_panel_power(intel_dp)) {
2966                 drm_dbg_kms(&dev_priv->drm,
2967                             "[ENCODER:%d:%s] panel power wasn't enabled\n",
2968                             dig_port->base.base.base.id,
2969                             dig_port->base.base.name);
2970                 msleep(intel_dp->panel_power_up_delay);
2971         }
2972
2973         return need_to_disable;
2974 }
2975
2976 /*
2977  * Must be paired with intel_edp_panel_vdd_off() or
2978  * intel_edp_panel_off().
2979  * Nested calls to these functions are not allowed since
2980  * we drop the lock. Caller must use some higher level
2981  * locking to prevent nested calls from other threads.
2982  */
2983 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
2984 {
2985         intel_wakeref_t wakeref;
2986         bool vdd;
2987
2988         if (!intel_dp_is_edp(intel_dp))
2989                 return;
2990
2991         vdd = false;
2992         with_pps_lock(intel_dp, wakeref)
2993                 vdd = edp_panel_vdd_on(intel_dp);
2994         I915_STATE_WARN(!vdd, "[ENCODER:%d:%s] VDD already requested on\n",
2995                         dp_to_dig_port(intel_dp)->base.base.base.id,
2996                         dp_to_dig_port(intel_dp)->base.base.name);
2997 }
2998
2999 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
3000 {
3001         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3002         struct intel_digital_port *dig_port =
3003                 dp_to_dig_port(intel_dp);
3004         u32 pp;
3005         i915_reg_t pp_stat_reg, pp_ctrl_reg;
3006
3007         lockdep_assert_held(&dev_priv->pps_mutex);
3008
3009         drm_WARN_ON(&dev_priv->drm, intel_dp->want_panel_vdd);
3010
3011         if (!edp_have_panel_vdd(intel_dp))
3012                 return;
3013
3014         drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD off\n",
3015                     dig_port->base.base.base.id,
3016                     dig_port->base.base.name);
3017
3018         pp = ilk_get_pp_control(intel_dp);
3019         pp &= ~EDP_FORCE_VDD;
3020
3021         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
3022         pp_stat_reg = _pp_stat_reg(intel_dp);
3023
3024         intel_de_write(dev_priv, pp_ctrl_reg, pp);
3025         intel_de_posting_read(dev_priv, pp_ctrl_reg);
3026
3027         /* Make sure sequencer is idle before allowing subsequent activity */
3028         drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
3029                     intel_de_read(dev_priv, pp_stat_reg),
3030                     intel_de_read(dev_priv, pp_ctrl_reg));
3031
3032         if ((pp & PANEL_POWER_ON) == 0)
3033                 intel_dp->panel_power_off_time = ktime_get_boottime();
3034
3035         intel_display_power_put_unchecked(dev_priv,
3036                                           intel_aux_power_domain(dig_port));
3037 }
3038
3039 static void edp_panel_vdd_work(struct work_struct *__work)
3040 {
3041         struct intel_dp *intel_dp =
3042                 container_of(to_delayed_work(__work),
3043                              struct intel_dp, panel_vdd_work);
3044         intel_wakeref_t wakeref;
3045
3046         with_pps_lock(intel_dp, wakeref) {
3047                 if (!intel_dp->want_panel_vdd)
3048                         edp_panel_vdd_off_sync(intel_dp);
3049         }
3050 }
3051
3052 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
3053 {
3054         unsigned long delay;
3055
3056         /*
3057          * Queue the timer to fire a long time from now (relative to the power
3058          * down delay) to keep the panel power up across a sequence of
3059          * operations.
3060          */
3061         delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
3062         schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
3063 }
3064
3065 /*
3066  * Must be paired with edp_panel_vdd_on().
3067  * Must hold pps_mutex around the whole on/off sequence.
3068  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
3069  */
3070 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
3071 {
3072         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3073
3074         lockdep_assert_held(&dev_priv->pps_mutex);
3075
3076         if (!intel_dp_is_edp(intel_dp))
3077                 return;
3078
3079         I915_STATE_WARN(!intel_dp->want_panel_vdd, "[ENCODER:%d:%s] VDD not forced on",
3080                         dp_to_dig_port(intel_dp)->base.base.base.id,
3081                         dp_to_dig_port(intel_dp)->base.base.name);
3082
3083         intel_dp->want_panel_vdd = false;
3084
3085         if (sync)
3086                 edp_panel_vdd_off_sync(intel_dp);
3087         else
3088                 edp_panel_vdd_schedule_off(intel_dp);
3089 }
3090
3091 static void edp_panel_on(struct intel_dp *intel_dp)
3092 {
3093         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3094         u32 pp;
3095         i915_reg_t pp_ctrl_reg;
3096
3097         lockdep_assert_held(&dev_priv->pps_mutex);
3098
3099         if (!intel_dp_is_edp(intel_dp))
3100                 return;
3101
3102         drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power on\n",
3103                     dp_to_dig_port(intel_dp)->base.base.base.id,
3104                     dp_to_dig_port(intel_dp)->base.base.name);
3105
3106         if (drm_WARN(&dev_priv->drm, edp_have_panel_power(intel_dp),
3107                      "[ENCODER:%d:%s] panel power already on\n",
3108                      dp_to_dig_port(intel_dp)->base.base.base.id,
3109                      dp_to_dig_port(intel_dp)->base.base.name))
3110                 return;
3111
3112         wait_panel_power_cycle(intel_dp);
3113
3114         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
3115         pp = ilk_get_pp_control(intel_dp);
3116         if (IS_GEN(dev_priv, 5)) {
3117                 /* ILK workaround: disable reset around power sequence */
3118                 pp &= ~PANEL_POWER_RESET;
3119                 intel_de_write(dev_priv, pp_ctrl_reg, pp);
3120                 intel_de_posting_read(dev_priv, pp_ctrl_reg);
3121         }
3122
3123         pp |= PANEL_POWER_ON;
3124         if (!IS_GEN(dev_priv, 5))
3125                 pp |= PANEL_POWER_RESET;
3126
3127         intel_de_write(dev_priv, pp_ctrl_reg, pp);
3128         intel_de_posting_read(dev_priv, pp_ctrl_reg);
3129
3130         wait_panel_on(intel_dp);
3131         intel_dp->last_power_on = jiffies;
3132
3133         if (IS_GEN(dev_priv, 5)) {
3134                 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
3135                 intel_de_write(dev_priv, pp_ctrl_reg, pp);
3136                 intel_de_posting_read(dev_priv, pp_ctrl_reg);
3137         }
3138 }
3139
3140 void intel_edp_panel_on(struct intel_dp *intel_dp)
3141 {
3142         intel_wakeref_t wakeref;
3143
3144         if (!intel_dp_is_edp(intel_dp))
3145                 return;
3146
3147         with_pps_lock(intel_dp, wakeref)
3148                 edp_panel_on(intel_dp);
3149 }
3150
3151
3152 static void edp_panel_off(struct intel_dp *intel_dp)
3153 {
3154         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3155         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3156         u32 pp;
3157         i915_reg_t pp_ctrl_reg;
3158
3159         lockdep_assert_held(&dev_priv->pps_mutex);
3160
3161         if (!intel_dp_is_edp(intel_dp))
3162                 return;
3163
3164         drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power off\n",
3165                     dig_port->base.base.base.id, dig_port->base.base.name);
3166
3167         drm_WARN(&dev_priv->drm, !intel_dp->want_panel_vdd,
3168                  "Need [ENCODER:%d:%s] VDD to turn off panel\n",
3169                  dig_port->base.base.base.id, dig_port->base.base.name);
3170
3171         pp = ilk_get_pp_control(intel_dp);
3172         /* We need to switch off panel power _and_ force vdd, for otherwise some
3173          * panels get very unhappy and cease to work. */
3174         pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
3175                 EDP_BLC_ENABLE);
3176
3177         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
3178
3179         intel_dp->want_panel_vdd = false;
3180
3181         intel_de_write(dev_priv, pp_ctrl_reg, pp);
3182         intel_de_posting_read(dev_priv, pp_ctrl_reg);
3183
3184         wait_panel_off(intel_dp);
3185         intel_dp->panel_power_off_time = ktime_get_boottime();
3186
3187         /* We got a reference when we enabled the VDD. */
3188         intel_display_power_put_unchecked(dev_priv, intel_aux_power_domain(dig_port));
3189 }
3190
3191 void intel_edp_panel_off(struct intel_dp *intel_dp)
3192 {
3193         intel_wakeref_t wakeref;
3194
3195         if (!intel_dp_is_edp(intel_dp))
3196                 return;
3197
3198         with_pps_lock(intel_dp, wakeref)
3199                 edp_panel_off(intel_dp);
3200 }
3201
3202 /* Enable backlight in the panel power control. */
3203 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
3204 {
3205         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3206         intel_wakeref_t wakeref;
3207
3208         /*
3209          * If we enable the backlight right away following a panel power
3210          * on, we may see slight flicker as the panel syncs with the eDP
3211          * link.  So delay a bit to make sure the image is solid before
3212          * allowing it to appear.
3213          */
3214         wait_backlight_on(intel_dp);
3215
3216         with_pps_lock(intel_dp, wakeref) {
3217                 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
3218                 u32 pp;
3219
3220                 pp = ilk_get_pp_control(intel_dp);
3221                 pp |= EDP_BLC_ENABLE;
3222
3223                 intel_de_write(dev_priv, pp_ctrl_reg, pp);
3224                 intel_de_posting_read(dev_priv, pp_ctrl_reg);
3225         }
3226 }
3227
3228 /* Enable backlight PWM and backlight PP control. */
3229 void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
3230                             const struct drm_connector_state *conn_state)
3231 {
3232         struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(conn_state->best_encoder));
3233         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3234
3235         if (!intel_dp_is_edp(intel_dp))
3236                 return;
3237
3238         drm_dbg_kms(&i915->drm, "\n");
3239
3240         intel_panel_enable_backlight(crtc_state, conn_state);
3241         _intel_edp_backlight_on(intel_dp);
3242 }
3243
3244 /* Disable backlight in the panel power control. */
3245 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
3246 {
3247         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3248         intel_wakeref_t wakeref;
3249
3250         if (!intel_dp_is_edp(intel_dp))
3251                 return;
3252
3253         with_pps_lock(intel_dp, wakeref) {
3254                 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
3255                 u32 pp;
3256
3257                 pp = ilk_get_pp_control(intel_dp);
3258                 pp &= ~EDP_BLC_ENABLE;
3259
3260                 intel_de_write(dev_priv, pp_ctrl_reg, pp);
3261                 intel_de_posting_read(dev_priv, pp_ctrl_reg);
3262         }
3263
3264         intel_dp->last_backlight_off = jiffies;
3265         edp_wait_backlight_off(intel_dp);
3266 }
3267
3268 /* Disable backlight PP control and backlight PWM. */
3269 void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state)
3270 {
3271         struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(old_conn_state->best_encoder));
3272         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3273
3274         if (!intel_dp_is_edp(intel_dp))
3275                 return;
3276
3277         drm_dbg_kms(&i915->drm, "\n");
3278
3279         _intel_edp_backlight_off(intel_dp);
3280         intel_panel_disable_backlight(old_conn_state);
3281 }
3282
3283 /*
3284  * Hook for controlling the panel power control backlight through the bl_power
3285  * sysfs attribute. Take care to handle multiple calls.
3286  */
3287 static void intel_edp_backlight_power(struct intel_connector *connector,
3288                                       bool enable)
3289 {
3290         struct drm_i915_private *i915 = to_i915(connector->base.dev);
3291         struct intel_dp *intel_dp = intel_attached_dp(connector);
3292         intel_wakeref_t wakeref;
3293         bool is_enabled;
3294
3295         is_enabled = false;
3296         with_pps_lock(intel_dp, wakeref)
3297                 is_enabled = ilk_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
3298         if (is_enabled == enable)
3299                 return;
3300
3301         drm_dbg_kms(&i915->drm, "panel power control backlight %s\n",
3302                     enable ? "enable" : "disable");
3303
3304         if (enable)
3305                 _intel_edp_backlight_on(intel_dp);
3306         else
3307                 _intel_edp_backlight_off(intel_dp);
3308 }
3309
3310 static void assert_dp_port(struct intel_dp *intel_dp, bool state)
3311 {
3312         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3313         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
3314         bool cur_state = intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN;
3315
3316         I915_STATE_WARN(cur_state != state,
3317                         "[ENCODER:%d:%s] state assertion failure (expected %s, current %s)\n",
3318                         dig_port->base.base.base.id, dig_port->base.base.name,
3319                         onoff(state), onoff(cur_state));
3320 }
3321 #define assert_dp_port_disabled(d) assert_dp_port((d), false)
3322
3323 static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
3324 {
3325         bool cur_state = intel_de_read(dev_priv, DP_A) & DP_PLL_ENABLE;
3326
3327         I915_STATE_WARN(cur_state != state,
3328                         "eDP PLL state assertion failure (expected %s, current %s)\n",
3329                         onoff(state), onoff(cur_state));
3330 }
3331 #define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
3332 #define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
3333
3334 static void ilk_edp_pll_on(struct intel_dp *intel_dp,
3335                            const struct intel_crtc_state *pipe_config)
3336 {
3337         struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
3338         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3339
3340         assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
3341         assert_dp_port_disabled(intel_dp);
3342         assert_edp_pll_disabled(dev_priv);
3343
3344         drm_dbg_kms(&dev_priv->drm, "enabling eDP PLL for clock %d\n",
3345                     pipe_config->port_clock);
3346
3347         intel_dp->DP &= ~DP_PLL_FREQ_MASK;
3348
3349         if (pipe_config->port_clock == 162000)
3350                 intel_dp->DP |= DP_PLL_FREQ_162MHZ;
3351         else
3352                 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
3353
3354         intel_de_write(dev_priv, DP_A, intel_dp->DP);
3355         intel_de_posting_read(dev_priv, DP_A);
3356         udelay(500);
3357
3358         /*
3359          * [DevILK] Work around required when enabling DP PLL
3360          * while a pipe is enabled going to FDI:
3361          * 1. Wait for the start of vertical blank on the enabled pipe going to FDI
3362          * 2. Program DP PLL enable
3363          */
3364         if (IS_GEN(dev_priv, 5))
3365                 intel_wait_for_vblank_if_active(dev_priv, !crtc->pipe);
3366
3367         intel_dp->DP |= DP_PLL_ENABLE;
3368
3369         intel_de_write(dev_priv, DP_A, intel_dp->DP);
3370         intel_de_posting_read(dev_priv, DP_A);
3371         udelay(200);
3372 }
3373
3374 static void ilk_edp_pll_off(struct intel_dp *intel_dp,
3375                             const struct intel_crtc_state *old_crtc_state)
3376 {
3377         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
3378         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3379
3380         assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder);
3381         assert_dp_port_disabled(intel_dp);
3382         assert_edp_pll_enabled(dev_priv);
3383
3384         drm_dbg_kms(&dev_priv->drm, "disabling eDP PLL\n");
3385
3386         intel_dp->DP &= ~DP_PLL_ENABLE;
3387
3388         intel_de_write(dev_priv, DP_A, intel_dp->DP);
3389         intel_de_posting_read(dev_priv, DP_A);
3390         udelay(200);
3391 }
3392
3393 static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp)
3394 {
3395         /*
3396          * DPCD 1.2+ should support BRANCH_DEVICE_CTRL, and thus
3397          * be capable of signalling downstream hpd with a long pulse.
3398          * Whether or not that means D3 is safe to use is not clear,
3399          * but let's assume so until proven otherwise.
3400          *
3401          * FIXME should really check all downstream ports...
3402          */
3403         return intel_dp->dpcd[DP_DPCD_REV] == 0x11 &&
3404                 drm_dp_is_branch(intel_dp->dpcd) &&
3405                 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD;
3406 }
3407
3408 void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
3409                                            const struct intel_crtc_state *crtc_state,
3410                                            bool enable)
3411 {
3412         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3413         int ret;
3414
3415         if (!crtc_state->dsc.compression_enable)
3416                 return;
3417
3418         ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_DSC_ENABLE,
3419                                  enable ? DP_DECOMPRESSION_EN : 0);
3420         if (ret < 0)
3421                 drm_dbg_kms(&i915->drm,
3422                             "Failed to %s sink decompression state\n",
3423                             enable ? "enable" : "disable");
3424 }
3425
3426 /* If the sink supports it, try to set the power state appropriately */
3427 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
3428 {
3429         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3430         int ret, i;
3431
3432         /* Should have a valid DPCD by this point */
3433         if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
3434                 return;
3435
3436         if (mode != DRM_MODE_DPMS_ON) {
3437                 if (downstream_hpd_needs_d0(intel_dp))
3438                         return;
3439
3440                 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
3441                                          DP_SET_POWER_D3);
3442         } else {
3443                 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
3444
3445                 /*
3446                  * When turning on, we need to retry for 1ms to give the sink
3447                  * time to wake up.
3448                  */
3449                 for (i = 0; i < 3; i++) {
3450                         ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
3451                                                  DP_SET_POWER_D0);
3452                         if (ret == 1)
3453                                 break;
3454                         msleep(1);
3455                 }
3456
3457                 if (ret == 1 && lspcon->active)
3458                         lspcon_wait_pcon_mode(lspcon);
3459         }
3460
3461         if (ret != 1)
3462                 drm_dbg_kms(&i915->drm, "failed to %s sink power state\n",
3463                             mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
3464 }
3465
3466 static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv,
3467                                  enum port port, enum pipe *pipe)
3468 {
3469         enum pipe p;
3470
3471         for_each_pipe(dev_priv, p) {
3472                 u32 val = intel_de_read(dev_priv, TRANS_DP_CTL(p));
3473
3474                 if ((val & TRANS_DP_PORT_SEL_MASK) == TRANS_DP_PORT_SEL(port)) {
3475                         *pipe = p;
3476                         return true;
3477                 }
3478         }
3479
3480         drm_dbg_kms(&dev_priv->drm, "No pipe for DP port %c found\n",
3481                     port_name(port));
3482
3483         /* must initialize pipe to something for the asserts */
3484         *pipe = PIPE_A;
3485
3486         return false;
3487 }
3488
3489 bool intel_dp_port_enabled(struct drm_i915_private *dev_priv,
3490                            i915_reg_t dp_reg, enum port port,
3491                            enum pipe *pipe)
3492 {
3493         bool ret;
3494         u32 val;
3495
3496         val = intel_de_read(dev_priv, dp_reg);
3497
3498         ret = val & DP_PORT_EN;
3499
3500         /* asserts want to know the pipe even if the port is disabled */
3501         if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
3502                 *pipe = (val & DP_PIPE_SEL_MASK_IVB) >> DP_PIPE_SEL_SHIFT_IVB;
3503         else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
3504                 ret &= cpt_dp_port_selected(dev_priv, port, pipe);
3505         else if (IS_CHERRYVIEW(dev_priv))
3506                 *pipe = (val & DP_PIPE_SEL_MASK_CHV) >> DP_PIPE_SEL_SHIFT_CHV;
3507         else
3508                 *pipe = (val & DP_PIPE_SEL_MASK) >> DP_PIPE_SEL_SHIFT;
3509
3510         return ret;
3511 }
3512
3513 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
3514                                   enum pipe *pipe)
3515 {
3516         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3517         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3518         intel_wakeref_t wakeref;
3519         bool ret;
3520
3521         wakeref = intel_display_power_get_if_enabled(dev_priv,
3522                                                      encoder->power_domain);
3523         if (!wakeref)
3524                 return false;
3525
3526         ret = intel_dp_port_enabled(dev_priv, intel_dp->output_reg,
3527                                     encoder->port, pipe);
3528
3529         intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
3530
3531         return ret;
3532 }
3533
3534 static void intel_dp_get_config(struct intel_encoder *encoder,
3535                                 struct intel_crtc_state *pipe_config)
3536 {
3537         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3538         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3539         u32 tmp, flags = 0;
3540         enum port port = encoder->port;
3541         struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
3542
3543         if (encoder->type == INTEL_OUTPUT_EDP)
3544                 pipe_config->output_types |= BIT(INTEL_OUTPUT_EDP);
3545         else
3546                 pipe_config->output_types |= BIT(INTEL_OUTPUT_DP);
3547
3548         tmp = intel_de_read(dev_priv, intel_dp->output_reg);
3549
3550         pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
3551
3552         if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
3553                 u32 trans_dp = intel_de_read(dev_priv,
3554                                              TRANS_DP_CTL(crtc->pipe));
3555
3556                 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
3557                         flags |= DRM_MODE_FLAG_PHSYNC;
3558                 else
3559                         flags |= DRM_MODE_FLAG_NHSYNC;
3560
3561                 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
3562                         flags |= DRM_MODE_FLAG_PVSYNC;
3563                 else
3564                         flags |= DRM_MODE_FLAG_NVSYNC;
3565         } else {
3566                 if (tmp & DP_SYNC_HS_HIGH)
3567                         flags |= DRM_MODE_FLAG_PHSYNC;
3568                 else
3569                         flags |= DRM_MODE_FLAG_NHSYNC;
3570
3571                 if (tmp & DP_SYNC_VS_HIGH)
3572                         flags |= DRM_MODE_FLAG_PVSYNC;
3573                 else
3574                         flags |= DRM_MODE_FLAG_NVSYNC;
3575         }
3576
3577         pipe_config->hw.adjusted_mode.flags |= flags;
3578
3579         if (IS_G4X(dev_priv) && tmp & DP_COLOR_RANGE_16_235)
3580                 pipe_config->limited_color_range = true;
3581
3582         pipe_config->lane_count =
3583                 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
3584
3585         intel_dp_get_m_n(crtc, pipe_config);
3586
3587         if (port == PORT_A) {
3588                 if ((intel_de_read(dev_priv, DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
3589                         pipe_config->port_clock = 162000;
3590                 else
3591                         pipe_config->port_clock = 270000;
3592         }
3593
3594         pipe_config->hw.adjusted_mode.crtc_clock =
3595                 intel_dotclock_calculate(pipe_config->port_clock,
3596                                          &pipe_config->dp_m_n);
3597
3598         if (intel_dp_is_edp(intel_dp) && dev_priv->vbt.edp.bpp &&
3599             pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) {
3600                 /*
3601                  * This is a big fat ugly hack.
3602                  *
3603                  * Some machines in UEFI boot mode provide us a VBT that has 18
3604                  * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
3605                  * unknown we fail to light up. Yet the same BIOS boots up with
3606                  * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
3607                  * max, not what it tells us to use.
3608                  *
3609                  * Note: This will still be broken if the eDP panel is not lit
3610                  * up by the BIOS, and thus we can't get the mode at module
3611                  * load.
3612                  */
3613                 drm_dbg_kms(&dev_priv->drm,
3614                             "pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
3615                             pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp);
3616                 dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp;
3617         }
3618 }
3619
3620 static void intel_disable_dp(struct intel_atomic_state *state,
3621                              struct intel_encoder *encoder,
3622                              const struct intel_crtc_state *old_crtc_state,
3623                              const struct drm_connector_state *old_conn_state)
3624 {
3625         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3626
3627         intel_dp->link_trained = false;
3628
3629         if (old_crtc_state->has_audio)
3630                 intel_audio_codec_disable(encoder,
3631                                           old_crtc_state, old_conn_state);
3632
3633         /* Make sure the panel is off before trying to change the mode. But also
3634          * ensure that we have vdd while we switch off the panel. */
3635         intel_edp_panel_vdd_on(intel_dp);
3636         intel_edp_backlight_off(old_conn_state);
3637         intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
3638         intel_edp_panel_off(intel_dp);
3639 }
3640
3641 static void g4x_disable_dp(struct intel_atomic_state *state,
3642                            struct intel_encoder *encoder,
3643                            const struct intel_crtc_state *old_crtc_state,
3644                            const struct drm_connector_state *old_conn_state)
3645 {
3646         intel_disable_dp(state, encoder, old_crtc_state, old_conn_state);
3647 }
3648
3649 static void vlv_disable_dp(struct intel_atomic_state *state,
3650                            struct intel_encoder *encoder,
3651                            const struct intel_crtc_state *old_crtc_state,
3652                            const struct drm_connector_state *old_conn_state)
3653 {
3654         intel_disable_dp(state, encoder, old_crtc_state, old_conn_state);
3655 }
3656
3657 static void g4x_post_disable_dp(struct intel_atomic_state *state,
3658                                 struct intel_encoder *encoder,
3659                                 const struct intel_crtc_state *old_crtc_state,
3660                                 const struct drm_connector_state *old_conn_state)
3661 {
3662         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3663         enum port port = encoder->port;
3664
3665         /*
3666          * Bspec does not list a specific disable sequence for g4x DP.
3667          * Follow the ilk+ sequence (disable pipe before the port) for
3668          * g4x DP as it does not suffer from underruns like the normal
3669          * g4x modeset sequence (disable pipe after the port).
3670          */
3671         intel_dp_link_down(encoder, old_crtc_state);
3672
3673         /* Only ilk+ has port A */
3674         if (port == PORT_A)
3675                 ilk_edp_pll_off(intel_dp, old_crtc_state);
3676 }
3677
3678 static void vlv_post_disable_dp(struct intel_atomic_state *state,
3679                                 struct intel_encoder *encoder,
3680                                 const struct intel_crtc_state *old_crtc_state,
3681                                 const struct drm_connector_state *old_conn_state)
3682 {
3683         intel_dp_link_down(encoder, old_crtc_state);
3684 }
3685
3686 static void chv_post_disable_dp(struct intel_atomic_state *state,
3687                                 struct intel_encoder *encoder,
3688                                 const struct intel_crtc_state *old_crtc_state,
3689                                 const struct drm_connector_state *old_conn_state)
3690 {
3691         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3692
3693         intel_dp_link_down(encoder, old_crtc_state);
3694
3695         vlv_dpio_get(dev_priv);
3696
3697         /* Assert data lane reset */
3698         chv_data_lane_soft_reset(encoder, old_crtc_state, true);
3699
3700         vlv_dpio_put(dev_priv);
3701 }
3702
3703 static void
3704 cpt_set_link_train(struct intel_dp *intel_dp,
3705                    u8 dp_train_pat)
3706 {
3707         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3708         u32 *DP = &intel_dp->DP;
3709
3710         *DP &= ~DP_LINK_TRAIN_MASK_CPT;
3711
3712         switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
3713         case DP_TRAINING_PATTERN_DISABLE:
3714                 *DP |= DP_LINK_TRAIN_OFF_CPT;
3715                 break;
3716         case DP_TRAINING_PATTERN_1:
3717                 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
3718                 break;
3719         case DP_TRAINING_PATTERN_2:
3720                 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
3721                 break;
3722         case DP_TRAINING_PATTERN_3:
3723                 drm_dbg_kms(&dev_priv->drm,
3724                             "TPS3 not supported, using TPS2 instead\n");
3725                 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
3726                 break;
3727         }
3728
3729         intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
3730         intel_de_posting_read(dev_priv, intel_dp->output_reg);
3731 }
3732
3733 static void
3734 g4x_set_link_train(struct intel_dp *intel_dp,
3735                    u8 dp_train_pat)
3736 {
3737         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3738         u32 *DP = &intel_dp->DP;
3739
3740         *DP &= ~DP_LINK_TRAIN_MASK;
3741
3742         switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
3743         case DP_TRAINING_PATTERN_DISABLE:
3744                 *DP |= DP_LINK_TRAIN_OFF;
3745                 break;
3746         case DP_TRAINING_PATTERN_1:
3747                 *DP |= DP_LINK_TRAIN_PAT_1;
3748                 break;
3749         case DP_TRAINING_PATTERN_2:
3750                 *DP |= DP_LINK_TRAIN_PAT_2;
3751                 break;
3752         case DP_TRAINING_PATTERN_3:
3753                 drm_dbg_kms(&dev_priv->drm,
3754                             "TPS3 not supported, using TPS2 instead\n");
3755                 *DP |= DP_LINK_TRAIN_PAT_2;
3756                 break;
3757         }
3758
3759         intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
3760         intel_de_posting_read(dev_priv, intel_dp->output_reg);
3761 }
3762
3763 static void intel_dp_enable_port(struct intel_dp *intel_dp,
3764                                  const struct intel_crtc_state *old_crtc_state)
3765 {
3766         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3767
3768         /* enable with pattern 1 (as per spec) */
3769
3770         intel_dp_program_link_training_pattern(intel_dp, DP_TRAINING_PATTERN_1);
3771
3772         /*
3773          * Magic for VLV/CHV. We _must_ first set up the register
3774          * without actually enabling the port, and then do another
3775          * write to enable the port. Otherwise link training will
3776          * fail when the power sequencer is freshly used for this port.
3777          */
3778         intel_dp->DP |= DP_PORT_EN;
3779         if (old_crtc_state->has_audio)
3780                 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
3781
3782         intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
3783         intel_de_posting_read(dev_priv, intel_dp->output_reg);
3784 }
3785
3786 static void intel_enable_dp(struct intel_atomic_state *state,
3787                             struct intel_encoder *encoder,
3788                             const struct intel_crtc_state *pipe_config,
3789                             const struct drm_connector_state *conn_state)
3790 {
3791         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3792         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3793         struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
3794         u32 dp_reg = intel_de_read(dev_priv, intel_dp->output_reg);
3795         enum pipe pipe = crtc->pipe;
3796         intel_wakeref_t wakeref;
3797
3798         if (drm_WARN_ON(&dev_priv->drm, dp_reg & DP_PORT_EN))
3799                 return;
3800
3801         with_pps_lock(intel_dp, wakeref) {
3802                 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
3803                         vlv_init_panel_power_sequencer(encoder, pipe_config);
3804
3805                 intel_dp_enable_port(intel_dp, pipe_config);
3806
3807                 edp_panel_vdd_on(intel_dp);
3808                 edp_panel_on(intel_dp);
3809                 edp_panel_vdd_off(intel_dp, true);
3810         }
3811
3812         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
3813                 unsigned int lane_mask = 0x0;
3814
3815                 if (IS_CHERRYVIEW(dev_priv))
3816                         lane_mask = intel_dp_unused_lane_mask(pipe_config->lane_count);
3817
3818                 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
3819                                     lane_mask);
3820         }
3821
3822         intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
3823         intel_dp_start_link_train(intel_dp);
3824         intel_dp_stop_link_train(intel_dp);
3825
3826         if (pipe_config->has_audio) {
3827                 drm_dbg(&dev_priv->drm, "Enabling DP audio on pipe %c\n",
3828                         pipe_name(pipe));
3829                 intel_audio_codec_enable(encoder, pipe_config, conn_state);
3830         }
3831 }
3832
3833 static void g4x_enable_dp(struct intel_atomic_state *state,
3834                           struct intel_encoder *encoder,
3835                           const struct intel_crtc_state *pipe_config,
3836                           const struct drm_connector_state *conn_state)
3837 {
3838         intel_enable_dp(state, encoder, pipe_config, conn_state);
3839         intel_edp_backlight_on(pipe_config, conn_state);
3840 }
3841
3842 static void vlv_enable_dp(struct intel_atomic_state *state,
3843                           struct intel_encoder *encoder,
3844                           const struct intel_crtc_state *pipe_config,
3845                           const struct drm_connector_state *conn_state)
3846 {
3847         intel_edp_backlight_on(pipe_config, conn_state);
3848 }
3849
3850 static void g4x_pre_enable_dp(struct intel_atomic_state *state,
3851                               struct intel_encoder *encoder,
3852                               const struct intel_crtc_state *pipe_config,
3853                               const struct drm_connector_state *conn_state)
3854 {
3855         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3856         enum port port = encoder->port;
3857
3858         intel_dp_prepare(encoder, pipe_config);
3859
3860         /* Only ilk+ has port A */
3861         if (port == PORT_A)
3862                 ilk_edp_pll_on(intel_dp, pipe_config);
3863 }
3864
3865 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
3866 {
3867         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3868         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
3869         enum pipe pipe = intel_dp->pps_pipe;
3870         i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe);
3871
3872         drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE);
3873
3874         if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B))
3875                 return;
3876
3877         edp_panel_vdd_off_sync(intel_dp);
3878
3879         /*
3880          * VLV seems to get confused when multiple power sequencers
3881          * have the same port selected (even if only one has power/vdd
3882          * enabled). The failure manifests as vlv_wait_port_ready() failing
3883          * CHV on the other hand doesn't seem to mind having the same port
3884          * selected in multiple power sequencers, but let's clear the
3885          * port select always when logically disconnecting a power sequencer
3886          * from a port.
3887          */
3888         drm_dbg_kms(&dev_priv->drm,
3889                     "detaching pipe %c power sequencer from [ENCODER:%d:%s]\n",
3890                     pipe_name(pipe), dig_port->base.base.base.id,
3891                     dig_port->base.base.name);
3892         intel_de_write(dev_priv, pp_on_reg, 0);
3893         intel_de_posting_read(dev_priv, pp_on_reg);
3894
3895         intel_dp->pps_pipe = INVALID_PIPE;
3896 }
3897
3898 static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
3899                                       enum pipe pipe)
3900 {
3901         struct intel_encoder *encoder;
3902
3903         lockdep_assert_held(&dev_priv->pps_mutex);
3904
3905         for_each_intel_dp(&dev_priv->drm, encoder) {
3906                 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3907
3908                 drm_WARN(&dev_priv->drm, intel_dp->active_pipe == pipe,
3909                          "stealing pipe %c power sequencer from active [ENCODER:%d:%s]\n",
3910                          pipe_name(pipe), encoder->base.base.id,
3911                          encoder->base.name);
3912
3913                 if (intel_dp->pps_pipe != pipe)
3914                         continue;
3915
3916                 drm_dbg_kms(&dev_priv->drm,
3917                             "stealing pipe %c power sequencer from [ENCODER:%d:%s]\n",
3918                             pipe_name(pipe), encoder->base.base.id,
3919                             encoder->base.name);
3920
3921                 /* make sure vdd is off before we steal it */
3922                 vlv_detach_power_sequencer(intel_dp);
3923         }
3924 }
3925
3926 static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
3927                                            const struct intel_crtc_state *crtc_state)
3928 {
3929         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3930         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3931         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3932
3933         lockdep_assert_held(&dev_priv->pps_mutex);
3934
3935         drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE);
3936
3937         if (intel_dp->pps_pipe != INVALID_PIPE &&
3938             intel_dp->pps_pipe != crtc->pipe) {
3939                 /*
3940                  * If another power sequencer was being used on this
3941                  * port previously make sure to turn off vdd there while
3942                  * we still have control of it.
3943                  */
3944                 vlv_detach_power_sequencer(intel_dp);
3945         }
3946
3947         /*
3948          * We may be stealing the power
3949          * sequencer from another port.
3950          */
3951         vlv_steal_power_sequencer(dev_priv, crtc->pipe);
3952
3953         intel_dp->active_pipe = crtc->pipe;
3954
3955         if (!intel_dp_is_edp(intel_dp))
3956                 return;
3957
3958         /* now it's all ours */
3959         intel_dp->pps_pipe = crtc->pipe;
3960
3961         drm_dbg_kms(&dev_priv->drm,
3962                     "initializing pipe %c power sequencer for [ENCODER:%d:%s]\n",
3963                     pipe_name(intel_dp->pps_pipe), encoder->base.base.id,
3964                     encoder->base.name);
3965
3966         /* init power sequencer on this pipe and port */
3967         intel_dp_init_panel_power_sequencer(intel_dp);
3968         intel_dp_init_panel_power_sequencer_registers(intel_dp, true);
3969 }
3970
3971 static void vlv_pre_enable_dp(struct intel_atomic_state *state,
3972                               struct intel_encoder *encoder,
3973                               const struct intel_crtc_state *pipe_config,
3974                               const struct drm_connector_state *conn_state)
3975 {
3976         vlv_phy_pre_encoder_enable(encoder, pipe_config);
3977
3978         intel_enable_dp(state, encoder, pipe_config, conn_state);
3979 }
3980
3981 static void vlv_dp_pre_pll_enable(struct intel_atomic_state *state,
3982                                   struct intel_encoder *encoder,
3983                                   const struct intel_crtc_state *pipe_config,
3984                                   const struct drm_connector_state *conn_state)
3985 {
3986         intel_dp_prepare(encoder, pipe_config);
3987
3988         vlv_phy_pre_pll_enable(encoder, pipe_config);
3989 }
3990
3991 static void chv_pre_enable_dp(struct intel_atomic_state *state,
3992                               struct intel_encoder *encoder,
3993                               const struct intel_crtc_state *pipe_config,
3994                               const struct drm_connector_state *conn_state)
3995 {
3996         chv_phy_pre_encoder_enable(encoder, pipe_config);
3997
3998         intel_enable_dp(state, encoder, pipe_config, conn_state);
3999
4000         /* Second common lane will stay alive on its own now */
4001         chv_phy_release_cl2_override(encoder);
4002 }
4003
4004 static void chv_dp_pre_pll_enable(struct intel_atomic_state *state,
4005                                   struct intel_encoder *encoder,
4006                                   const struct intel_crtc_state *pipe_config,
4007                                   const struct drm_connector_state *conn_state)
4008 {
4009         intel_dp_prepare(encoder, pipe_config);
4010
4011         chv_phy_pre_pll_enable(encoder, pipe_config);
4012 }
4013
4014 static void chv_dp_post_pll_disable(struct intel_atomic_state *state,
4015                                     struct intel_encoder *encoder,
4016                                     const struct intel_crtc_state *old_crtc_state,
4017                                     const struct drm_connector_state *old_conn_state)
4018 {
4019         chv_phy_post_pll_disable(encoder, old_crtc_state);
4020 }
4021
4022 /*
4023  * Fetch AUX CH registers 0x202 - 0x207 which contain
4024  * link status information
4025  */
4026 bool
4027 intel_dp_get_link_status(struct intel_dp *intel_dp, u8 link_status[DP_LINK_STATUS_SIZE])
4028 {
4029         return drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS, link_status,
4030                                 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
4031 }
4032
4033 static u8 intel_dp_voltage_max_2(struct intel_dp *intel_dp)
4034 {
4035         return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
4036 }
4037
4038 static u8 intel_dp_voltage_max_3(struct intel_dp *intel_dp)
4039 {
4040         return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
4041 }
4042
4043 static u8 intel_dp_pre_empemph_max_2(struct intel_dp *intel_dp)
4044 {
4045         return DP_TRAIN_PRE_EMPH_LEVEL_2;
4046 }
4047
4048 static u8 intel_dp_pre_empemph_max_3(struct intel_dp *intel_dp)
4049 {
4050         return DP_TRAIN_PRE_EMPH_LEVEL_3;
4051 }
4052
4053 static void vlv_set_signal_levels(struct intel_dp *intel_dp)
4054 {
4055         struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
4056         unsigned long demph_reg_value, preemph_reg_value,
4057                 uniqtranscale_reg_value;
4058         u8 train_set = intel_dp->train_set[0];
4059
4060         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
4061         case DP_TRAIN_PRE_EMPH_LEVEL_0:
4062                 preemph_reg_value = 0x0004000;
4063                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
4064                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
4065                         demph_reg_value = 0x2B405555;
4066                         uniqtranscale_reg_value = 0x552AB83A;
4067                         break;
4068                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
4069                         demph_reg_value = 0x2B404040;
4070                         uniqtranscale_reg_value = 0x5548B83A;
4071                         break;
4072                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
4073                         demph_reg_value = 0x2B245555;
4074                         uniqtranscale_reg_value = 0x5560B83A;
4075                         break;
4076                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
4077                         demph_reg_value = 0x2B405555;
4078                         uniqtranscale_reg_value = 0x5598DA3A;
4079                         break;
4080                 default:
4081                         return;
4082                 }
4083                 break;
4084         case DP_TRAIN_PRE_EMPH_LEVEL_1:
4085                 preemph_reg_value = 0x0002000;
4086                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
4087                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
4088                         demph_reg_value = 0x2B404040;
4089                         uniqtranscale_reg_value = 0x5552B83A;
4090                         break;
4091                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
4092                         demph_reg_value = 0x2B404848;
4093                         uniqtranscale_reg_value = 0x5580B83A;
4094                         break;
4095                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
4096                         demph_reg_value = 0x2B404040;
4097                         uniqtranscale_reg_value = 0x55ADDA3A;
4098                         break;
4099                 default:
4100                         return;
4101                 }
4102                 break;
4103         case DP_TRAIN_PRE_EMPH_LEVEL_2:
4104                 preemph_reg_value = 0x0000000;
4105                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
4106                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
4107                         demph_reg_value = 0x2B305555;
4108                         uniqtranscale_reg_value = 0x5570B83A;
4109                         break;
4110                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
4111                         demph_reg_value = 0x2B2B4040;
4112                         uniqtranscale_reg_value = 0x55ADDA3A;
4113                         break;
4114                 default:
4115                         return;
4116                 }
4117                 break;
4118         case DP_TRAIN_PRE_EMPH_LEVEL_3:
4119                 preemph_reg_value = 0x0006000;
4120                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
4121                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
4122                         demph_reg_value = 0x1B405555;
4123                         uniqtranscale_reg_value = 0x55ADDA3A;
4124                         break;
4125                 default:
4126                         return;
4127                 }
4128                 break;
4129         default:
4130                 return;
4131         }
4132
4133         vlv_set_phy_signal_level(encoder, demph_reg_value, preemph_reg_value,
4134                                  uniqtranscale_reg_value, 0);
4135 }
4136
4137 static void chv_set_signal_levels(struct intel_dp *intel_dp)
4138 {
4139         struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
4140         u32 deemph_reg_value, margin_reg_value;
4141         bool uniq_trans_scale = false;
4142         u8 train_set = intel_dp->train_set[0];
4143
4144         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
4145         case DP_TRAIN_PRE_EMPH_LEVEL_0:
4146                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
4147                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
4148                         deemph_reg_value = 128;
4149                         margin_reg_value = 52;
4150                         break;
4151                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
4152                         deemph_reg_value = 128;
4153                         margin_reg_value = 77;
4154                         break;
4155                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
4156                         deemph_reg_value = 128;
4157                         margin_reg_value = 102;
4158                         break;
4159                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
4160                         deemph_reg_value = 128;
4161                         margin_reg_value = 154;
4162                         uniq_trans_scale = true;
4163                         break;
4164                 default:
4165                         return;
4166                 }
4167                 break;
4168         case DP_TRAIN_PRE_EMPH_LEVEL_1:
4169                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
4170                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
4171                         deemph_reg_value = 85;
4172                         margin_reg_value = 78;
4173                         break;
4174                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
4175                         deemph_reg_value = 85;
4176                         margin_reg_value = 116;
4177                         break;
4178                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
4179                         deemph_reg_value = 85;
4180                         margin_reg_value = 154;
4181                         break;
4182                 default:
4183                         return;
4184                 }
4185                 break;
4186         case DP_TRAIN_PRE_EMPH_LEVEL_2:
4187                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
4188                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
4189                         deemph_reg_value = 64;
4190                         margin_reg_value = 104;
4191                         break;
4192                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
4193                         deemph_reg_value = 64;
4194                         margin_reg_value = 154;
4195                         break;
4196                 default:
4197                         return;
4198                 }
4199                 break;
4200         case DP_TRAIN_PRE_EMPH_LEVEL_3:
4201                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
4202                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
4203                         deemph_reg_value = 43;
4204                         margin_reg_value = 154;
4205                         break;
4206                 default:
4207                         return;
4208                 }
4209                 break;
4210         default:
4211                 return;
4212         }
4213
4214         chv_set_phy_signal_level(encoder, deemph_reg_value,
4215                                  margin_reg_value, uniq_trans_scale);
4216 }
4217
4218 static u32 g4x_signal_levels(u8 train_set)
4219 {
4220         u32 signal_levels = 0;
4221
4222         switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
4223         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
4224         default:
4225                 signal_levels |= DP_VOLTAGE_0_4;
4226                 break;
4227         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
4228                 signal_levels |= DP_VOLTAGE_0_6;
4229                 break;
4230         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
4231                 signal_levels |= DP_VOLTAGE_0_8;
4232                 break;
4233         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
4234                 signal_levels |= DP_VOLTAGE_1_2;
4235                 break;
4236         }
4237         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
4238         case DP_TRAIN_PRE_EMPH_LEVEL_0:
4239         default:
4240                 signal_levels |= DP_PRE_EMPHASIS_0;
4241                 break;
4242         case DP_TRAIN_PRE_EMPH_LEVEL_1:
4243                 signal_levels |= DP_PRE_EMPHASIS_3_5;
4244                 break;
4245         case DP_TRAIN_PRE_EMPH_LEVEL_2:
4246                 signal_levels |= DP_PRE_EMPHASIS_6;
4247                 break;
4248         case DP_TRAIN_PRE_EMPH_LEVEL_3:
4249                 signal_levels |= DP_PRE_EMPHASIS_9_5;
4250                 break;
4251         }
4252         return signal_levels;
4253 }
4254
4255 static void
4256 g4x_set_signal_levels(struct intel_dp *intel_dp)
4257 {
4258         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4259         u8 train_set = intel_dp->train_set[0];
4260         u32 signal_levels;
4261
4262         signal_levels = g4x_signal_levels(train_set);
4263
4264         drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
4265                     signal_levels);
4266
4267         intel_dp->DP &= ~(DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK);
4268         intel_dp->DP |= signal_levels;
4269
4270         intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
4271         intel_de_posting_read(dev_priv, intel_dp->output_reg);
4272 }
4273
4274 /* SNB CPU eDP voltage swing and pre-emphasis control */
4275 static u32 snb_cpu_edp_signal_levels(u8 train_set)
4276 {
4277         u8 signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
4278                                         DP_TRAIN_PRE_EMPHASIS_MASK);
4279
4280         switch (signal_levels) {
4281         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
4282         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
4283                 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
4284         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
4285                 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
4286         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
4287         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
4288                 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
4289         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
4290         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
4291                 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
4292         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
4293         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
4294                 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
4295         default:
4296                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
4297                               "0x%x\n", signal_levels);
4298                 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
4299         }
4300 }
4301
4302 static void
4303 snb_cpu_edp_set_signal_levels(struct intel_dp *intel_dp)
4304 {
4305         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4306         u8 train_set = intel_dp->train_set[0];
4307         u32 signal_levels;
4308
4309         signal_levels = snb_cpu_edp_signal_levels(train_set);
4310
4311         drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
4312                     signal_levels);
4313
4314         intel_dp->DP &= ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
4315         intel_dp->DP |= signal_levels;
4316
4317         intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
4318         intel_de_posting_read(dev_priv, intel_dp->output_reg);
4319 }
4320
4321 /* IVB CPU eDP voltage swing and pre-emphasis control */
4322 static u32 ivb_cpu_edp_signal_levels(u8 train_set)
4323 {
4324         u8 signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
4325                                         DP_TRAIN_PRE_EMPHASIS_MASK);
4326
4327         switch (signal_levels) {
4328         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
4329                 return EDP_LINK_TRAIN_400MV_0DB_IVB;
4330         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
4331                 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
4332         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
4333         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
4334                 return EDP_LINK_TRAIN_400MV_6DB_IVB;
4335
4336         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
4337                 return EDP_LINK_TRAIN_600MV_0DB_IVB;
4338         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
4339                 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
4340
4341         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
4342                 return EDP_LINK_TRAIN_800MV_0DB_IVB;
4343         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
4344                 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
4345
4346         default:
4347                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
4348                               "0x%x\n", signal_levels);
4349                 return EDP_LINK_TRAIN_500MV_0DB_IVB;
4350         }
4351 }
4352
4353 static void
4354 ivb_cpu_edp_set_signal_levels(struct intel_dp *intel_dp)
4355 {
4356         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4357         u8 train_set = intel_dp->train_set[0];
4358         u32 signal_levels;
4359
4360         signal_levels = ivb_cpu_edp_signal_levels(train_set);
4361
4362         drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
4363                     signal_levels);
4364
4365         intel_dp->DP &= ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
4366         intel_dp->DP |= signal_levels;
4367
4368         intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
4369         intel_de_posting_read(dev_priv, intel_dp->output_reg);
4370 }
4371
4372 void intel_dp_set_signal_levels(struct intel_dp *intel_dp)
4373 {
4374         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4375         u8 train_set = intel_dp->train_set[0];
4376
4377         drm_dbg_kms(&dev_priv->drm, "Using vswing level %d%s\n",
4378                     train_set & DP_TRAIN_VOLTAGE_SWING_MASK,
4379                     train_set & DP_TRAIN_MAX_SWING_REACHED ? " (max)" : "");
4380         drm_dbg_kms(&dev_priv->drm, "Using pre-emphasis level %d%s\n",
4381                     (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
4382                     DP_TRAIN_PRE_EMPHASIS_SHIFT,
4383                     train_set & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED ?
4384                     " (max)" : "");
4385
4386         intel_dp->set_signal_levels(intel_dp);
4387 }
4388
4389 void
4390 intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
4391                                        u8 dp_train_pat)
4392 {
4393         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4394         u8 train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd);
4395
4396         if (dp_train_pat & train_pat_mask)
4397                 drm_dbg_kms(&dev_priv->drm,
4398                             "Using DP training pattern TPS%d\n",
4399                             dp_train_pat & train_pat_mask);
4400
4401         intel_dp->set_link_train(intel_dp, dp_train_pat);
4402 }
4403
4404 void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
4405 {
4406         if (intel_dp->set_idle_link_train)
4407                 intel_dp->set_idle_link_train(intel_dp);
4408 }
4409
4410 static void
4411 intel_dp_link_down(struct intel_encoder *encoder,
4412                    const struct intel_crtc_state *old_crtc_state)
4413 {
4414         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4415         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
4416         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
4417         enum port port = encoder->port;
4418         u32 DP = intel_dp->DP;
4419
4420         if (drm_WARN_ON(&dev_priv->drm,
4421                         (intel_de_read(dev_priv, intel_dp->output_reg) &
4422                          DP_PORT_EN) == 0))
4423                 return;
4424
4425         drm_dbg_kms(&dev_priv->drm, "\n");
4426
4427         if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
4428             (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
4429                 DP &= ~DP_LINK_TRAIN_MASK_CPT;
4430                 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
4431         } else {
4432                 DP &= ~DP_LINK_TRAIN_MASK;
4433                 DP |= DP_LINK_TRAIN_PAT_IDLE;
4434         }
4435         intel_de_write(dev_priv, intel_dp->output_reg, DP);
4436         intel_de_posting_read(dev_priv, intel_dp->output_reg);
4437
4438         DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
4439         intel_de_write(dev_priv, intel_dp->output_reg, DP);
4440         intel_de_posting_read(dev_priv, intel_dp->output_reg);
4441
4442         /*
4443          * HW workaround for IBX, we need to move the port
4444          * to transcoder A after disabling it to allow the
4445          * matching HDMI port to be enabled on transcoder A.
4446          */
4447         if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B && port != PORT_A) {
4448                 /*
4449                  * We get CPU/PCH FIFO underruns on the other pipe when
4450                  * doing the workaround. Sweep them under the rug.
4451                  */
4452                 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
4453                 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
4454
4455                 /* always enable with pattern 1 (as per spec) */
4456                 DP &= ~(DP_PIPE_SEL_MASK | DP_LINK_TRAIN_MASK);
4457                 DP |= DP_PORT_EN | DP_PIPE_SEL(PIPE_A) |
4458                         DP_LINK_TRAIN_PAT_1;
4459                 intel_de_write(dev_priv, intel_dp->output_reg, DP);
4460                 intel_de_posting_read(dev_priv, intel_dp->output_reg);
4461
4462                 DP &= ~DP_PORT_EN;
4463                 intel_de_write(dev_priv, intel_dp->output_reg, DP);
4464                 intel_de_posting_read(dev_priv, intel_dp->output_reg);
4465
4466                 intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
4467                 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
4468                 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
4469         }
4470
4471         msleep(intel_dp->panel_power_down_delay);
4472
4473         intel_dp->DP = DP;
4474
4475         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
4476                 intel_wakeref_t wakeref;
4477
4478                 with_pps_lock(intel_dp, wakeref)
4479                         intel_dp->active_pipe = INVALID_PIPE;
4480         }
4481 }
4482
4483 bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
4484 {
4485         u8 dprx = 0;
4486
4487         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST,
4488                               &dprx) != 1)
4489                 return false;
4490         return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED;
4491 }
4492
4493 static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp)
4494 {
4495         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
4496
4497         /*
4498          * Clear the cached register set to avoid using stale values
4499          * for the sinks that do not support DSC.
4500          */
4501         memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd));
4502
4503         /* Clear fec_capable to avoid using stale values */
4504         intel_dp->fec_capable = 0;
4505
4506         /* Cache the DSC DPCD if eDP or DP rev >= 1.4 */
4507         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x14 ||
4508             intel_dp->edp_dpcd[0] >= DP_EDP_14) {
4509                 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DSC_SUPPORT,
4510                                      intel_dp->dsc_dpcd,
4511                                      sizeof(intel_dp->dsc_dpcd)) < 0)
4512                         drm_err(&i915->drm,
4513                                 "Failed to read DPCD register 0x%x\n",
4514                                 DP_DSC_SUPPORT);
4515
4516                 drm_dbg_kms(&i915->drm, "DSC DPCD: %*ph\n",
4517                             (int)sizeof(intel_dp->dsc_dpcd),
4518                             intel_dp->dsc_dpcd);
4519
4520                 /* FEC is supported only on DP 1.4 */
4521                 if (!intel_dp_is_edp(intel_dp) &&
4522                     drm_dp_dpcd_readb(&intel_dp->aux, DP_FEC_CAPABILITY,
4523                                       &intel_dp->fec_capable) < 0)
4524                         drm_err(&i915->drm,
4525                                 "Failed to read FEC DPCD register\n");
4526
4527                 drm_dbg_kms(&i915->drm, "FEC CAPABILITY: %x\n",
4528                             intel_dp->fec_capable);
4529         }
4530 }
4531
4532 static bool
4533 intel_edp_init_dpcd(struct intel_dp *intel_dp)
4534 {
4535         struct drm_i915_private *dev_priv =
4536                 to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
4537
4538         /* this function is meant to be called only once */
4539         drm_WARN_ON(&dev_priv->drm, intel_dp->dpcd[DP_DPCD_REV] != 0);
4540
4541         if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd) != 0)
4542                 return false;
4543
4544         drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
4545                          drm_dp_is_branch(intel_dp->dpcd));
4546
4547         /*
4548          * Read the eDP display control registers.
4549          *
4550          * Do this independent of DP_DPCD_DISPLAY_CONTROL_CAPABLE bit in
4551          * DP_EDP_CONFIGURATION_CAP, because some buggy displays do not have it
4552          * set, but require eDP 1.4+ detection (e.g. for supported link rates
4553          * method). The display control registers should read zero if they're
4554          * not supported anyway.
4555          */
4556         if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
4557                              intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
4558                              sizeof(intel_dp->edp_dpcd))
4559                 drm_dbg_kms(&dev_priv->drm, "eDP DPCD: %*ph\n",
4560                             (int)sizeof(intel_dp->edp_dpcd),
4561                             intel_dp->edp_dpcd);
4562
4563         /*
4564          * This has to be called after intel_dp->edp_dpcd is filled, PSR checks
4565          * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1]
4566          */
4567         intel_psr_init_dpcd(intel_dp);
4568
4569         /* Read the eDP 1.4+ supported link rates. */
4570         if (intel_dp->edp_dpcd[0] >= DP_EDP_14) {
4571                 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
4572                 int i;
4573
4574                 drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES,
4575                                 sink_rates, sizeof(sink_rates));
4576
4577                 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
4578                         int val = le16_to_cpu(sink_rates[i]);
4579
4580                         if (val == 0)
4581                                 break;
4582
4583                         /* Value read multiplied by 200kHz gives the per-lane
4584                          * link rate in kHz. The source rates are, however,
4585                          * stored in terms of LS_Clk kHz. The full conversion
4586                          * back to symbols is
4587                          * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte)
4588                          */
4589                         intel_dp->sink_rates[i] = (val * 200) / 10;
4590                 }
4591                 intel_dp->num_sink_rates = i;
4592         }
4593
4594         /*
4595          * Use DP_LINK_RATE_SET if DP_SUPPORTED_LINK_RATES are available,
4596          * default to DP_MAX_LINK_RATE and DP_LINK_BW_SET otherwise.
4597          */
4598         if (intel_dp->num_sink_rates)
4599                 intel_dp->use_rate_select = true;
4600         else
4601                 intel_dp_set_sink_rates(intel_dp);
4602
4603         intel_dp_set_common_rates(intel_dp);
4604
4605         /* Read the eDP DSC DPCD registers */
4606         if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
4607                 intel_dp_get_dsc_sink_cap(intel_dp);
4608
4609         return true;
4610 }
4611
4612 static bool
4613 intel_dp_has_sink_count(struct intel_dp *intel_dp)
4614 {
4615         if (!intel_dp->attached_connector)
4616                 return false;
4617
4618         return drm_dp_read_sink_count_cap(&intel_dp->attached_connector->base,
4619                                           intel_dp->dpcd,
4620                                           &intel_dp->desc);
4621 }
4622
4623 static bool
4624 intel_dp_get_dpcd(struct intel_dp *intel_dp)
4625 {
4626         int ret;
4627
4628         if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd))
4629                 return false;
4630
4631         /*
4632          * Don't clobber cached eDP rates. Also skip re-reading
4633          * the OUI/ID since we know it won't change.
4634          */
4635         if (!intel_dp_is_edp(intel_dp)) {
4636                 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
4637                                  drm_dp_is_branch(intel_dp->dpcd));
4638
4639                 intel_dp_set_sink_rates(intel_dp);
4640                 intel_dp_set_common_rates(intel_dp);
4641         }
4642
4643         if (intel_dp_has_sink_count(intel_dp)) {
4644                 ret = drm_dp_read_sink_count(&intel_dp->aux);
4645                 if (ret < 0)
4646                         return false;
4647
4648                 /*
4649                  * Sink count can change between short pulse hpd hence
4650                  * a member variable in intel_dp will track any changes
4651                  * between short pulse interrupts.
4652                  */
4653                 intel_dp->sink_count = ret;
4654
4655                 /*
4656                  * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that
4657                  * a dongle is present but no display. Unless we require to know
4658                  * if a dongle is present or not, we don't need to update
4659                  * downstream port information. So, an early return here saves
4660                  * time from performing other operations which are not required.
4661                  */
4662                 if (!intel_dp->sink_count)
4663                         return false;
4664         }
4665
4666         return drm_dp_read_downstream_info(&intel_dp->aux, intel_dp->dpcd,
4667                                            intel_dp->downstream_ports) == 0;
4668 }
4669
4670 static bool
4671 intel_dp_can_mst(struct intel_dp *intel_dp)
4672 {
4673         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
4674
4675         return i915->params.enable_dp_mst &&
4676                 intel_dp->can_mst &&
4677                 drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd);
4678 }
4679
4680 static void
4681 intel_dp_configure_mst(struct intel_dp *intel_dp)
4682 {
4683         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
4684         struct intel_encoder *encoder =
4685                 &dp_to_dig_port(intel_dp)->base;
4686         bool sink_can_mst = drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd);
4687
4688         drm_dbg_kms(&i915->drm,
4689                     "[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s\n",
4690                     encoder->base.base.id, encoder->base.name,
4691                     yesno(intel_dp->can_mst), yesno(sink_can_mst),
4692                     yesno(i915->params.enable_dp_mst));
4693
4694         if (!intel_dp->can_mst)
4695                 return;
4696
4697         intel_dp->is_mst = sink_can_mst &&
4698                 i915->params.enable_dp_mst;
4699
4700         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
4701                                         intel_dp->is_mst);
4702 }
4703
4704 static bool
4705 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4706 {
4707         return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI,
4708                                 sink_irq_vector, DP_DPRX_ESI_LEN) ==
4709                 DP_DPRX_ESI_LEN;
4710 }
4711
4712 bool
4713 intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state,
4714                        const struct drm_connector_state *conn_state)
4715 {
4716         /*
4717          * As per DP 1.4a spec section 2.2.4.3 [MSA Field for Indication
4718          * of Color Encoding Format and Content Color Gamut], in order to
4719          * sending YCBCR 420 or HDR BT.2020 signals we should use DP VSC SDP.
4720          */
4721         if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
4722                 return true;
4723
4724         switch (conn_state->colorspace) {
4725         case DRM_MODE_COLORIMETRY_SYCC_601:
4726         case DRM_MODE_COLORIMETRY_OPYCC_601:
4727         case DRM_MODE_COLORIMETRY_BT2020_YCC:
4728         case DRM_MODE_COLORIMETRY_BT2020_RGB:
4729         case DRM_MODE_COLORIMETRY_BT2020_CYCC:
4730                 return true;
4731         default:
4732                 break;
4733         }
4734
4735         return false;
4736 }
4737
4738 static ssize_t intel_dp_vsc_sdp_pack(const struct drm_dp_vsc_sdp *vsc,
4739                                      struct dp_sdp *sdp, size_t size)
4740 {
4741         size_t length = sizeof(struct dp_sdp);
4742
4743         if (size < length)
4744                 return -ENOSPC;
4745
4746         memset(sdp, 0, size);
4747
4748         /*
4749          * Prepare VSC Header for SU as per DP 1.4a spec, Table 2-119
4750          * VSC SDP Header Bytes
4751          */
4752         sdp->sdp_header.HB0 = 0; /* Secondary-Data Packet ID = 0 */
4753         sdp->sdp_header.HB1 = vsc->sdp_type; /* Secondary-data Packet Type */
4754         sdp->sdp_header.HB2 = vsc->revision; /* Revision Number */
4755         sdp->sdp_header.HB3 = vsc->length; /* Number of Valid Data Bytes */
4756
4757         /*
4758          * Only revision 0x5 supports Pixel Encoding/Colorimetry Format as
4759          * per DP 1.4a spec.
4760          */
4761         if (vsc->revision != 0x5)
4762                 goto out;
4763
4764         /* VSC SDP Payload for DB16 through DB18 */
4765         /* Pixel Encoding and Colorimetry Formats  */
4766         sdp->db[16] = (vsc->pixelformat & 0xf) << 4; /* DB16[7:4] */
4767         sdp->db[16] |= vsc->colorimetry & 0xf; /* DB16[3:0] */
4768
4769         switch (vsc->bpc) {
4770         case 6:
4771                 /* 6bpc: 0x0 */
4772                 break;
4773         case 8:
4774                 sdp->db[17] = 0x1; /* DB17[3:0] */
4775                 break;
4776         case 10:
4777                 sdp->db[17] = 0x2;
4778                 break;
4779         case 12:
4780                 sdp->db[17] = 0x3;
4781                 break;
4782         case 16:
4783                 sdp->db[17] = 0x4;
4784                 break;
4785         default:
4786                 MISSING_CASE(vsc->bpc);
4787                 break;
4788         }
4789         /* Dynamic Range and Component Bit Depth */
4790         if (vsc->dynamic_range == DP_DYNAMIC_RANGE_CTA)
4791                 sdp->db[17] |= 0x80;  /* DB17[7] */
4792
4793         /* Content Type */
4794         sdp->db[18] = vsc->content_type & 0x7;
4795
4796 out:
4797         return length;
4798 }
4799
4800 static ssize_t
4801 intel_dp_hdr_metadata_infoframe_sdp_pack(const struct hdmi_drm_infoframe *drm_infoframe,
4802                                          struct dp_sdp *sdp,
4803                                          size_t size)
4804 {
4805         size_t length = sizeof(struct dp_sdp);
4806         const int infoframe_size = HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE;
4807         unsigned char buf[HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE];
4808         ssize_t len;
4809
4810         if (size < length)
4811                 return -ENOSPC;
4812
4813         memset(sdp, 0, size);
4814
4815         len = hdmi_drm_infoframe_pack_only(drm_infoframe, buf, sizeof(buf));
4816         if (len < 0) {
4817                 DRM_DEBUG_KMS("buffer size is smaller than hdr metadata infoframe\n");
4818                 return -ENOSPC;
4819         }
4820
4821         if (len != infoframe_size) {
4822                 DRM_DEBUG_KMS("wrong static hdr metadata size\n");
4823                 return -ENOSPC;
4824         }
4825
4826         /*
4827          * Set up the infoframe sdp packet for HDR static metadata.
4828          * Prepare VSC Header for SU as per DP 1.4a spec,
4829          * Table 2-100 and Table 2-101
4830          */
4831
4832         /* Secondary-Data Packet ID, 00h for non-Audio INFOFRAME */
4833         sdp->sdp_header.HB0 = 0;
4834         /*
4835          * Packet Type 80h + Non-audio INFOFRAME Type value
4836          * HDMI_INFOFRAME_TYPE_DRM: 0x87
4837          * - 80h + Non-audio INFOFRAME Type value
4838          * - InfoFrame Type: 0x07
4839          *    [CTA-861-G Table-42 Dynamic Range and Mastering InfoFrame]
4840          */
4841         sdp->sdp_header.HB1 = drm_infoframe->type;
4842         /*
4843          * Least Significant Eight Bits of (Data Byte Count – 1)
4844          * infoframe_size - 1
4845          */
4846         sdp->sdp_header.HB2 = 0x1D;
4847         /* INFOFRAME SDP Version Number */
4848         sdp->sdp_header.HB3 = (0x13 << 2);
4849         /* CTA Header Byte 2 (INFOFRAME Version Number) */
4850         sdp->db[0] = drm_infoframe->version;
4851         /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */
4852         sdp->db[1] = drm_infoframe->length;
4853         /*
4854          * Copy HDMI_DRM_INFOFRAME_SIZE size from a buffer after
4855          * HDMI_INFOFRAME_HEADER_SIZE
4856          */
4857         BUILD_BUG_ON(sizeof(sdp->db) < HDMI_DRM_INFOFRAME_SIZE + 2);
4858         memcpy(&sdp->db[2], &buf[HDMI_INFOFRAME_HEADER_SIZE],
4859                HDMI_DRM_INFOFRAME_SIZE);
4860
4861         /*
4862          * Size of DP infoframe sdp packet for HDR static metadata consists of
4863          * - DP SDP Header(struct dp_sdp_header): 4 bytes
4864          * - Two Data Blocks: 2 bytes
4865          *    CTA Header Byte2 (INFOFRAME Version Number)
4866          *    CTA Header Byte3 (Length of INFOFRAME)
4867          * - HDMI_DRM_INFOFRAME_SIZE: 26 bytes
4868          *
4869          * Prior to GEN11's GMP register size is identical to DP HDR static metadata
4870          * infoframe size. But GEN11+ has larger than that size, write_infoframe
4871          * will pad rest of the size.
4872          */
4873         return sizeof(struct dp_sdp_header) + 2 + HDMI_DRM_INFOFRAME_SIZE;
4874 }
4875
4876 static void intel_write_dp_sdp(struct intel_encoder *encoder,
4877                                const struct intel_crtc_state *crtc_state,
4878                                unsigned int type)
4879 {
4880         struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
4881         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4882         struct dp_sdp sdp = {};
4883         ssize_t len;
4884
4885         if ((crtc_state->infoframes.enable &
4886              intel_hdmi_infoframe_enable(type)) == 0)
4887                 return;
4888
4889         switch (type) {
4890         case DP_SDP_VSC:
4891                 len = intel_dp_vsc_sdp_pack(&crtc_state->infoframes.vsc, &sdp,
4892                                             sizeof(sdp));
4893                 break;
4894         case HDMI_PACKET_TYPE_GAMUT_METADATA:
4895                 len = intel_dp_hdr_metadata_infoframe_sdp_pack(&crtc_state->infoframes.drm.drm,
4896                                                                &sdp, sizeof(sdp));
4897                 break;
4898         default:
4899                 MISSING_CASE(type);
4900                 return;
4901         }
4902
4903         if (drm_WARN_ON(&dev_priv->drm, len < 0))
4904                 return;
4905
4906         dig_port->write_infoframe(encoder, crtc_state, type, &sdp, len);
4907 }
4908
4909 void intel_write_dp_vsc_sdp(struct intel_encoder *encoder,
4910                             const struct intel_crtc_state *crtc_state,
4911                             struct drm_dp_vsc_sdp *vsc)
4912 {
4913         struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
4914         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4915         struct dp_sdp sdp = {};
4916         ssize_t len;
4917
4918         len = intel_dp_vsc_sdp_pack(vsc, &sdp, sizeof(sdp));
4919
4920         if (drm_WARN_ON(&dev_priv->drm, len < 0))
4921                 return;
4922
4923         dig_port->write_infoframe(encoder, crtc_state, DP_SDP_VSC,
4924                                         &sdp, len);
4925 }
4926
4927 void intel_dp_set_infoframes(struct intel_encoder *encoder,
4928                              bool enable,
4929                              const struct intel_crtc_state *crtc_state,
4930                              const struct drm_connector_state *conn_state)
4931 {
4932         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4933         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
4934         i915_reg_t reg = HSW_TVIDEO_DIP_CTL(crtc_state->cpu_transcoder);
4935         u32 dip_enable = VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_GCP_HSW |
4936                          VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW |
4937                          VIDEO_DIP_ENABLE_SPD_HSW | VIDEO_DIP_ENABLE_DRM_GLK;
4938         u32 val = intel_de_read(dev_priv, reg);
4939
4940         /* TODO: Add DSC case (DIP_ENABLE_PPS) */
4941         /* When PSR is enabled, this routine doesn't disable VSC DIP */
4942         if (intel_psr_enabled(intel_dp))
4943                 val &= ~dip_enable;
4944         else
4945                 val &= ~(dip_enable | VIDEO_DIP_ENABLE_VSC_HSW);
4946
4947         if (!enable) {
4948                 intel_de_write(dev_priv, reg, val);
4949                 intel_de_posting_read(dev_priv, reg);
4950                 return;
4951         }
4952
4953         intel_de_write(dev_priv, reg, val);
4954         intel_de_posting_read(dev_priv, reg);
4955
4956         /* When PSR is enabled, VSC SDP is handled by PSR routine */
4957         if (!intel_psr_enabled(intel_dp))
4958                 intel_write_dp_sdp(encoder, crtc_state, DP_SDP_VSC);
4959
4960         intel_write_dp_sdp(encoder, crtc_state, HDMI_PACKET_TYPE_GAMUT_METADATA);
4961 }
4962
4963 static int intel_dp_vsc_sdp_unpack(struct drm_dp_vsc_sdp *vsc,
4964                                    const void *buffer, size_t size)
4965 {
4966         const struct dp_sdp *sdp = buffer;
4967
4968         if (size < sizeof(struct dp_sdp))
4969                 return -EINVAL;
4970
4971         memset(vsc, 0, size);
4972
4973         if (sdp->sdp_header.HB0 != 0)
4974                 return -EINVAL;
4975
4976         if (sdp->sdp_header.HB1 != DP_SDP_VSC)
4977                 return -EINVAL;
4978
4979         vsc->sdp_type = sdp->sdp_header.HB1;
4980         vsc->revision = sdp->sdp_header.HB2;
4981         vsc->length = sdp->sdp_header.HB3;
4982
4983         if ((sdp->sdp_header.HB2 == 0x2 && sdp->sdp_header.HB3 == 0x8) ||
4984             (sdp->sdp_header.HB2 == 0x4 && sdp->sdp_header.HB3 == 0xe)) {
4985                 /*
4986                  * - HB2 = 0x2, HB3 = 0x8
4987                  *   VSC SDP supporting 3D stereo + PSR
4988                  * - HB2 = 0x4, HB3 = 0xe
4989                  *   VSC SDP supporting 3D stereo + PSR2 with Y-coordinate of
4990                  *   first scan line of the SU region (applies to eDP v1.4b
4991                  *   and higher).
4992                  */
4993                 return 0;
4994         } else if (sdp->sdp_header.HB2 == 0x5 && sdp->sdp_header.HB3 == 0x13) {
4995                 /*
4996                  * - HB2 = 0x5, HB3 = 0x13
4997                  *   VSC SDP supporting 3D stereo + PSR2 + Pixel Encoding/Colorimetry
4998                  *   Format.
4999                  */
5000                 vsc->pixelformat = (sdp->db[16] >> 4) & 0xf;
5001                 vsc->colorimetry = sdp->db[16] & 0xf;
5002                 vsc->dynamic_range = (sdp->db[17] >> 7) & 0x1;
5003
5004                 switch (sdp->db[17] & 0x7) {
5005                 case 0x0:
5006                         vsc->bpc = 6;
5007                         break;
5008                 case 0x1:
5009                         vsc->bpc = 8;
5010                         break;
5011                 case 0x2:
5012                         vsc->bpc = 10;
5013                         break;
5014                 case 0x3:
5015                         vsc->bpc = 12;
5016                         break;
5017                 case 0x4:
5018                         vsc->bpc = 16;
5019                         break;
5020                 default:
5021                         MISSING_CASE(sdp->db[17] & 0x7);
5022                         return -EINVAL;
5023                 }
5024
5025                 vsc->content_type = sdp->db[18] & 0x7;
5026         } else {
5027                 return -EINVAL;
5028         }
5029
5030         return 0;
5031 }
5032
5033 static int
5034 intel_dp_hdr_metadata_infoframe_sdp_unpack(struct hdmi_drm_infoframe *drm_infoframe,
5035                                            const void *buffer, size_t size)
5036 {
5037         int ret;
5038
5039         const struct dp_sdp *sdp = buffer;
5040
5041         if (size < sizeof(struct dp_sdp))
5042                 return -EINVAL;
5043
5044         if (sdp->sdp_header.HB0 != 0)
5045                 return -EINVAL;
5046
5047         if (sdp->sdp_header.HB1 != HDMI_INFOFRAME_TYPE_DRM)
5048                 return -EINVAL;
5049
5050         /*
5051          * Least Significant Eight Bits of (Data Byte Count – 1)
5052          * 1Dh (i.e., Data Byte Count = 30 bytes).
5053          */
5054         if (sdp->sdp_header.HB2 != 0x1D)
5055                 return -EINVAL;
5056
5057         /* Most Significant Two Bits of (Data Byte Count – 1), Clear to 00b. */
5058         if ((sdp->sdp_header.HB3 & 0x3) != 0)
5059                 return -EINVAL;
5060
5061         /* INFOFRAME SDP Version Number */
5062         if (((sdp->sdp_header.HB3 >> 2) & 0x3f) != 0x13)
5063                 return -EINVAL;
5064
5065         /* CTA Header Byte 2 (INFOFRAME Version Number) */
5066         if (sdp->db[0] != 1)
5067                 return -EINVAL;
5068
5069         /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */
5070         if (sdp->db[1] != HDMI_DRM_INFOFRAME_SIZE)
5071                 return -EINVAL;
5072
5073         ret = hdmi_drm_infoframe_unpack_only(drm_infoframe, &sdp->db[2],
5074                                              HDMI_DRM_INFOFRAME_SIZE);
5075
5076         return ret;
5077 }
5078
5079 static void intel_read_dp_vsc_sdp(struct intel_encoder *encoder,
5080                                   struct intel_crtc_state *crtc_state,
5081                                   struct drm_dp_vsc_sdp *vsc)
5082 {
5083         struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
5084         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
5085         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5086         unsigned int type = DP_SDP_VSC;
5087         struct dp_sdp sdp = {};
5088         int ret;
5089
5090         /* When PSR is enabled, VSC SDP is handled by PSR routine */
5091         if (intel_psr_enabled(intel_dp))
5092                 return;
5093
5094         if ((crtc_state->infoframes.enable &
5095              intel_hdmi_infoframe_enable(type)) == 0)
5096                 return;
5097
5098         dig_port->read_infoframe(encoder, crtc_state, type, &sdp, sizeof(sdp));
5099
5100         ret = intel_dp_vsc_sdp_unpack(vsc, &sdp, sizeof(sdp));
5101
5102         if (ret)
5103                 drm_dbg_kms(&dev_priv->drm, "Failed to unpack DP VSC SDP\n");
5104 }
5105
5106 static void intel_read_dp_hdr_metadata_infoframe_sdp(struct intel_encoder *encoder,
5107                                                      struct intel_crtc_state *crtc_state,
5108                                                      struct hdmi_drm_infoframe *drm_infoframe)
5109 {
5110         struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
5111         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5112         unsigned int type = HDMI_PACKET_TYPE_GAMUT_METADATA;
5113         struct dp_sdp sdp = {};
5114         int ret;
5115
5116         if ((crtc_state->infoframes.enable &
5117             intel_hdmi_infoframe_enable(type)) == 0)
5118                 return;
5119
5120         dig_port->read_infoframe(encoder, crtc_state, type, &sdp,
5121                                  sizeof(sdp));
5122
5123         ret = intel_dp_hdr_metadata_infoframe_sdp_unpack(drm_infoframe, &sdp,
5124                                                          sizeof(sdp));
5125
5126         if (ret)
5127                 drm_dbg_kms(&dev_priv->drm,
5128                             "Failed to unpack DP HDR Metadata Infoframe SDP\n");
5129 }
5130
5131 void intel_read_dp_sdp(struct intel_encoder *encoder,
5132                        struct intel_crtc_state *crtc_state,
5133                        unsigned int type)
5134 {
5135         if (encoder->type != INTEL_OUTPUT_DDI)
5136                 return;
5137
5138         switch (type) {
5139         case DP_SDP_VSC:
5140                 intel_read_dp_vsc_sdp(encoder, crtc_state,
5141                                       &crtc_state->infoframes.vsc);
5142                 break;
5143         case HDMI_PACKET_TYPE_GAMUT_METADATA:
5144                 intel_read_dp_hdr_metadata_infoframe_sdp(encoder, crtc_state,
5145                                                          &crtc_state->infoframes.drm.drm);
5146                 break;
5147         default:
5148                 MISSING_CASE(type);
5149                 break;
5150         }
5151 }
5152
5153 static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp)
5154 {
5155         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5156         int status = 0;
5157         int test_link_rate;
5158         u8 test_lane_count, test_link_bw;
5159         /* (DP CTS 1.2)
5160          * 4.3.1.11
5161          */
5162         /* Read the TEST_LANE_COUNT and TEST_LINK_RTAE fields (DP CTS 3.1.4) */
5163         status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LANE_COUNT,
5164                                    &test_lane_count);
5165
5166         if (status <= 0) {
5167                 drm_dbg_kms(&i915->drm, "Lane count read failed\n");
5168                 return DP_TEST_NAK;
5169         }
5170         test_lane_count &= DP_MAX_LANE_COUNT_MASK;
5171
5172         status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE,
5173                                    &test_link_bw);
5174         if (status <= 0) {
5175                 drm_dbg_kms(&i915->drm, "Link Rate read failed\n");
5176                 return DP_TEST_NAK;
5177         }
5178         test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw);
5179
5180         /* Validate the requested link rate and lane count */
5181         if (!intel_dp_link_params_valid(intel_dp, test_link_rate,
5182                                         test_lane_count))
5183                 return DP_TEST_NAK;
5184
5185         intel_dp->compliance.test_lane_count = test_lane_count;
5186         intel_dp->compliance.test_link_rate = test_link_rate;
5187
5188         return DP_TEST_ACK;
5189 }
5190
5191 static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
5192 {
5193         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5194         u8 test_pattern;
5195         u8 test_misc;
5196         __be16 h_width, v_height;
5197         int status = 0;
5198
5199         /* Read the TEST_PATTERN (DP CTS 3.1.5) */
5200         status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_PATTERN,
5201                                    &test_pattern);
5202         if (status <= 0) {
5203                 drm_dbg_kms(&i915->drm, "Test pattern read failed\n");
5204                 return DP_TEST_NAK;
5205         }
5206         if (test_pattern != DP_COLOR_RAMP)
5207                 return DP_TEST_NAK;
5208
5209         status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_H_WIDTH_HI,
5210                                   &h_width, 2);
5211         if (status <= 0) {
5212                 drm_dbg_kms(&i915->drm, "H Width read failed\n");
5213                 return DP_TEST_NAK;
5214         }
5215
5216         status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_V_HEIGHT_HI,
5217                                   &v_height, 2);
5218         if (status <= 0) {
5219                 drm_dbg_kms(&i915->drm, "V Height read failed\n");
5220                 return DP_TEST_NAK;
5221         }
5222
5223         status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_MISC0,
5224                                    &test_misc);
5225         if (status <= 0) {
5226                 drm_dbg_kms(&i915->drm, "TEST MISC read failed\n");
5227                 return DP_TEST_NAK;
5228         }
5229         if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB)
5230                 return DP_TEST_NAK;
5231         if (test_misc & DP_TEST_DYNAMIC_RANGE_CEA)
5232                 return DP_TEST_NAK;
5233         switch (test_misc & DP_TEST_BIT_DEPTH_MASK) {
5234         case DP_TEST_BIT_DEPTH_6:
5235                 intel_dp->compliance.test_data.bpc = 6;
5236                 break;
5237         case DP_TEST_BIT_DEPTH_8:
5238                 intel_dp->compliance.test_data.bpc = 8;
5239                 break;
5240         default:
5241                 return DP_TEST_NAK;
5242         }
5243
5244         intel_dp->compliance.test_data.video_pattern = test_pattern;
5245         intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width);
5246         intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height);
5247         /* Set test active flag here so userspace doesn't interrupt things */
5248         intel_dp->compliance.test_active = true;
5249
5250         return DP_TEST_ACK;
5251 }
5252
5253 static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp)
5254 {
5255         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5256         u8 test_result = DP_TEST_ACK;
5257         struct intel_connector *intel_connector = intel_dp->attached_connector;
5258         struct drm_connector *connector = &intel_connector->base;
5259
5260         if (intel_connector->detect_edid == NULL ||
5261             connector->edid_corrupt ||
5262             intel_dp->aux.i2c_defer_count > 6) {
5263                 /* Check EDID read for NACKs, DEFERs and corruption
5264                  * (DP CTS 1.2 Core r1.1)
5265                  *    4.2.2.4 : Failed EDID read, I2C_NAK
5266                  *    4.2.2.5 : Failed EDID read, I2C_DEFER
5267                  *    4.2.2.6 : EDID corruption detected
5268                  * Use failsafe mode for all cases
5269                  */
5270                 if (intel_dp->aux.i2c_nack_count > 0 ||
5271                         intel_dp->aux.i2c_defer_count > 0)
5272                         drm_dbg_kms(&i915->drm,
5273                                     "EDID read had %d NACKs, %d DEFERs\n",
5274                                     intel_dp->aux.i2c_nack_count,
5275                                     intel_dp->aux.i2c_defer_count);
5276                 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE;
5277         } else {
5278                 struct edid *block = intel_connector->detect_edid;
5279
5280                 /* We have to write the checksum
5281                  * of the last block read
5282                  */
5283                 block += intel_connector->detect_edid->extensions;
5284
5285                 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_EDID_CHECKSUM,
5286                                        block->checksum) <= 0)
5287                         drm_dbg_kms(&i915->drm,
5288                                     "Failed to write EDID checksum\n");
5289
5290                 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
5291                 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED;
5292         }
5293
5294         /* Set test active flag here so userspace doesn't interrupt things */
5295         intel_dp->compliance.test_active = true;
5296
5297         return test_result;
5298 }
5299
5300 static u8 intel_dp_prepare_phytest(struct intel_dp *intel_dp)
5301 {
5302         struct drm_dp_phy_test_params *data =
5303                 &intel_dp->compliance.test_data.phytest;
5304
5305         if (drm_dp_get_phy_test_pattern(&intel_dp->aux, data)) {
5306                 DRM_DEBUG_KMS("DP Phy Test pattern AUX read failure\n");
5307                 return DP_TEST_NAK;
5308         }
5309
5310         /*
5311          * link_mst is set to false to avoid executing mst related code
5312          * during compliance testing.
5313          */
5314         intel_dp->link_mst = false;
5315
5316         return DP_TEST_ACK;
5317 }
5318
5319 static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp)
5320 {
5321         struct drm_i915_private *dev_priv =
5322                         to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
5323         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5324         struct drm_dp_phy_test_params *data =
5325                         &intel_dp->compliance.test_data.phytest;
5326         struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
5327         enum pipe pipe = crtc->pipe;
5328         u32 pattern_val;
5329
5330         switch (data->phy_pattern) {
5331         case DP_PHY_TEST_PATTERN_NONE:
5332                 DRM_DEBUG_KMS("Disable Phy Test Pattern\n");
5333                 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 0x0);
5334                 break;
5335         case DP_PHY_TEST_PATTERN_D10_2:
5336                 DRM_DEBUG_KMS("Set D10.2 Phy Test Pattern\n");
5337                 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
5338                                DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_D10_2);
5339                 break;
5340         case DP_PHY_TEST_PATTERN_ERROR_COUNT:
5341                 DRM_DEBUG_KMS("Set Error Count Phy Test Pattern\n");
5342                 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
5343                                DDI_DP_COMP_CTL_ENABLE |
5344                                DDI_DP_COMP_CTL_SCRAMBLED_0);
5345                 break;
5346         case DP_PHY_TEST_PATTERN_PRBS7:
5347                 DRM_DEBUG_KMS("Set PRBS7 Phy Test Pattern\n");
5348                 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
5349                                DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_PRBS7);
5350                 break;
5351         case DP_PHY_TEST_PATTERN_80BIT_CUSTOM:
5352                 /*
5353                  * FIXME: Ideally pattern should come from DPCD 0x250. As
5354                  * current firmware of DPR-100 could not set it, so hardcoding
5355                  * now for complaince test.
5356                  */
5357                 DRM_DEBUG_KMS("Set 80Bit Custom Phy Test Pattern 0x3e0f83e0 0x0f83e0f8 0x0000f83e\n");
5358                 pattern_val = 0x3e0f83e0;
5359                 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 0), pattern_val);
5360                 pattern_val = 0x0f83e0f8;
5361                 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 1), pattern_val);
5362                 pattern_val = 0x0000f83e;
5363                 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 2), pattern_val);
5364                 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
5365                                DDI_DP_COMP_CTL_ENABLE |
5366                                DDI_DP_COMP_CTL_CUSTOM80);
5367                 break;
5368         case DP_PHY_TEST_PATTERN_CP2520:
5369                 /*
5370                  * FIXME: Ideally pattern should come from DPCD 0x24A. As
5371                  * current firmware of DPR-100 could not set it, so hardcoding
5372                  * now for complaince test.
5373                  */
5374                 DRM_DEBUG_KMS("Set HBR2 compliance Phy Test Pattern\n");
5375                 pattern_val = 0xFB;
5376                 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
5377                                DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_HBR2 |
5378                                pattern_val);
5379                 break;
5380         default:
5381                 WARN(1, "Invalid Phy Test Pattern\n");
5382         }
5383 }
5384
5385 static void
5386 intel_dp_autotest_phy_ddi_disable(struct intel_dp *intel_dp)
5387 {
5388         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5389         struct drm_device *dev = dig_port->base.base.dev;
5390         struct drm_i915_private *dev_priv = to_i915(dev);
5391         struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
5392         enum pipe pipe = crtc->pipe;
5393         u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value;
5394
5395         trans_ddi_func_ctl_value = intel_de_read(dev_priv,
5396                                                  TRANS_DDI_FUNC_CTL(pipe));
5397         trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe));
5398         dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe));
5399
5400         trans_ddi_func_ctl_value &= ~(TRANS_DDI_FUNC_ENABLE |
5401                                       TGL_TRANS_DDI_PORT_MASK);
5402         trans_conf_value &= ~PIPECONF_ENABLE;
5403         dp_tp_ctl_value &= ~DP_TP_CTL_ENABLE;
5404
5405         intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value);
5406         intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe),
5407                        trans_ddi_func_ctl_value);
5408         intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value);
5409 }
5410
5411 static void
5412 intel_dp_autotest_phy_ddi_enable(struct intel_dp *intel_dp, uint8_t lane_cnt)
5413 {
5414         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5415         struct drm_device *dev = dig_port->base.base.dev;
5416         struct drm_i915_private *dev_priv = to_i915(dev);
5417         enum port port = dig_port->base.port;
5418         struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
5419         enum pipe pipe = crtc->pipe;
5420         u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value;
5421
5422         trans_ddi_func_ctl_value = intel_de_read(dev_priv,
5423                                                  TRANS_DDI_FUNC_CTL(pipe));
5424         trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe));
5425         dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe));
5426
5427         trans_ddi_func_ctl_value |= TRANS_DDI_FUNC_ENABLE |
5428                                     TGL_TRANS_DDI_SELECT_PORT(port);
5429         trans_conf_value |= PIPECONF_ENABLE;
5430         dp_tp_ctl_value |= DP_TP_CTL_ENABLE;
5431
5432         intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value);
5433         intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value);
5434         intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe),
5435                        trans_ddi_func_ctl_value);
5436 }
5437
5438 void intel_dp_process_phy_request(struct intel_dp *intel_dp)
5439 {
5440         struct drm_dp_phy_test_params *data =
5441                 &intel_dp->compliance.test_data.phytest;
5442         u8 link_status[DP_LINK_STATUS_SIZE];
5443
5444         if (!intel_dp_get_link_status(intel_dp, link_status)) {
5445                 DRM_DEBUG_KMS("failed to get link status\n");
5446                 return;
5447         }
5448
5449         /* retrieve vswing & pre-emphasis setting */
5450         intel_dp_get_adjust_train(intel_dp, link_status);
5451
5452         intel_dp_autotest_phy_ddi_disable(intel_dp);
5453
5454         intel_dp_set_signal_levels(intel_dp);
5455
5456         intel_dp_phy_pattern_update(intel_dp);
5457
5458         intel_dp_autotest_phy_ddi_enable(intel_dp, data->num_lanes);
5459
5460         drm_dp_set_phy_test_pattern(&intel_dp->aux, data,
5461                                     link_status[DP_DPCD_REV]);
5462 }
5463
5464 static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
5465 {
5466         u8 test_result;
5467
5468         test_result = intel_dp_prepare_phytest(intel_dp);
5469         if (test_result != DP_TEST_ACK)
5470                 DRM_ERROR("Phy test preparation failed\n");
5471
5472         intel_dp_process_phy_request(intel_dp);
5473
5474         return test_result;
5475 }
5476
5477 static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
5478 {
5479         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5480         u8 response = DP_TEST_NAK;
5481         u8 request = 0;
5482         int status;
5483
5484         status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request);
5485         if (status <= 0) {
5486                 drm_dbg_kms(&i915->drm,
5487                             "Could not read test request from sink\n");
5488                 goto update_status;
5489         }
5490
5491         switch (request) {
5492         case DP_TEST_LINK_TRAINING:
5493                 drm_dbg_kms(&i915->drm, "LINK_TRAINING test requested\n");
5494                 response = intel_dp_autotest_link_training(intel_dp);
5495                 break;
5496         case DP_TEST_LINK_VIDEO_PATTERN:
5497                 drm_dbg_kms(&i915->drm, "TEST_PATTERN test requested\n");
5498                 response = intel_dp_autotest_video_pattern(intel_dp);
5499                 break;
5500         case DP_TEST_LINK_EDID_READ:
5501                 drm_dbg_kms(&i915->drm, "EDID test requested\n");
5502                 response = intel_dp_autotest_edid(intel_dp);
5503                 break;
5504         case DP_TEST_LINK_PHY_TEST_PATTERN:
5505                 drm_dbg_kms(&i915->drm, "PHY_PATTERN test requested\n");
5506                 response = intel_dp_autotest_phy_pattern(intel_dp);
5507                 break;
5508         default:
5509                 drm_dbg_kms(&i915->drm, "Invalid test request '%02x'\n",
5510                             request);
5511                 break;
5512         }
5513
5514         if (response & DP_TEST_ACK)
5515                 intel_dp->compliance.test_type = request;
5516
5517 update_status:
5518         status = drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, response);
5519         if (status <= 0)
5520                 drm_dbg_kms(&i915->drm,
5521                             "Could not write test response to sink\n");
5522 }
5523
5524 /**
5525  * intel_dp_check_mst_status - service any pending MST interrupts, check link status
5526  * @intel_dp: Intel DP struct
5527  *
5528  * Read any pending MST interrupts, call MST core to handle these and ack the
5529  * interrupts. Check if the main and AUX link state is ok.
5530  *
5531  * Returns:
5532  * - %true if pending interrupts were serviced (or no interrupts were
5533  *   pending) w/o detecting an error condition.
5534  * - %false if an error condition - like AUX failure or a loss of link - is
5535  *   detected, which needs servicing from the hotplug work.
5536  */
5537 static bool
5538 intel_dp_check_mst_status(struct intel_dp *intel_dp)
5539 {
5540         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5541         bool link_ok = true;
5542
5543         drm_WARN_ON_ONCE(&i915->drm, intel_dp->active_mst_links < 0);
5544
5545         for (;;) {
5546                 u8 esi[DP_DPRX_ESI_LEN] = {};
5547                 bool handled;
5548                 int retry;
5549
5550                 if (!intel_dp_get_sink_irq_esi(intel_dp, esi)) {
5551                         drm_dbg_kms(&i915->drm,
5552                                     "failed to get ESI - device may have failed\n");
5553                         link_ok = false;
5554
5555                         break;
5556                 }
5557
5558                 /* check link status - esi[10] = 0x200c */
5559                 if (intel_dp->active_mst_links > 0 && link_ok &&
5560                     !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
5561                         drm_dbg_kms(&i915->drm,
5562                                     "channel EQ not ok, retraining\n");
5563                         link_ok = false;
5564                 }
5565
5566                 drm_dbg_kms(&i915->drm, "got esi %3ph\n", esi);
5567
5568                 drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
5569                 if (!handled)
5570                         break;
5571
5572                 for (retry = 0; retry < 3; retry++) {
5573                         int wret;
5574
5575                         wret = drm_dp_dpcd_write(&intel_dp->aux,
5576                                                  DP_SINK_COUNT_ESI+1,
5577                                                  &esi[1], 3);
5578                         if (wret == 3)
5579                                 break;
5580                 }
5581         }
5582
5583         return link_ok;
5584 }
5585
5586 static bool
5587 intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
5588 {
5589         u8 link_status[DP_LINK_STATUS_SIZE];
5590
5591         if (!intel_dp->link_trained)
5592                 return false;
5593
5594         /*
5595          * While PSR source HW is enabled, it will control main-link sending
5596          * frames, enabling and disabling it so trying to do a retrain will fail
5597          * as the link would or not be on or it could mix training patterns
5598          * and frame data at the same time causing retrain to fail.
5599          * Also when exiting PSR, HW will retrain the link anyways fixing
5600          * any link status error.
5601          */
5602         if (intel_psr_enabled(intel_dp))
5603                 return false;
5604
5605         if (!intel_dp_get_link_status(intel_dp, link_status))
5606                 return false;
5607
5608         /*
5609          * Validate the cached values of intel_dp->link_rate and
5610          * intel_dp->lane_count before attempting to retrain.
5611          */
5612         if (!intel_dp_link_params_valid(intel_dp, intel_dp->link_rate,
5613                                         intel_dp->lane_count))
5614                 return false;
5615
5616         /* Retrain if Channel EQ or CR not ok */
5617         return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count);
5618 }
5619
5620 static bool intel_dp_has_connector(struct intel_dp *intel_dp,
5621                                    const struct drm_connector_state *conn_state)
5622 {
5623         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5624         struct intel_encoder *encoder;
5625         enum pipe pipe;
5626
5627         if (!conn_state->best_encoder)
5628                 return false;
5629
5630         /* SST */
5631         encoder = &dp_to_dig_port(intel_dp)->base;
5632         if (conn_state->best_encoder == &encoder->base)
5633                 return true;
5634
5635         /* MST */
5636         for_each_pipe(i915, pipe) {
5637                 encoder = &intel_dp->mst_encoders[pipe]->base;
5638                 if (conn_state->best_encoder == &encoder->base)
5639                         return true;
5640         }
5641
5642         return false;
5643 }
5644
5645 static int intel_dp_prep_link_retrain(struct intel_dp *intel_dp,
5646                                       struct drm_modeset_acquire_ctx *ctx,
5647                                       u32 *crtc_mask)
5648 {
5649         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5650         struct drm_connector_list_iter conn_iter;
5651         struct intel_connector *connector;
5652         int ret = 0;
5653
5654         *crtc_mask = 0;
5655
5656         if (!intel_dp_needs_link_retrain(intel_dp))
5657                 return 0;
5658
5659         drm_connector_list_iter_begin(&i915->drm, &conn_iter);
5660         for_each_intel_connector_iter(connector, &conn_iter) {
5661                 struct drm_connector_state *conn_state =
5662                         connector->base.state;
5663                 struct intel_crtc_state *crtc_state;
5664                 struct intel_crtc *crtc;
5665
5666                 if (!intel_dp_has_connector(intel_dp, conn_state))
5667                         continue;
5668
5669                 crtc = to_intel_crtc(conn_state->crtc);
5670                 if (!crtc)
5671                         continue;
5672
5673                 ret = drm_modeset_lock(&crtc->base.mutex, ctx);
5674                 if (ret)
5675                         break;
5676
5677                 crtc_state = to_intel_crtc_state(crtc->base.state);
5678
5679                 drm_WARN_ON(&i915->drm, !intel_crtc_has_dp_encoder(crtc_state));
5680
5681                 if (!crtc_state->hw.active)
5682                         continue;
5683
5684                 if (conn_state->commit &&
5685                     !try_wait_for_completion(&conn_state->commit->hw_done))
5686                         continue;
5687
5688                 *crtc_mask |= drm_crtc_mask(&crtc->base);
5689         }
5690         drm_connector_list_iter_end(&conn_iter);
5691
5692         if (!intel_dp_needs_link_retrain(intel_dp))
5693                 *crtc_mask = 0;
5694
5695         return ret;
5696 }
5697
5698 static bool intel_dp_is_connected(struct intel_dp *intel_dp)
5699 {
5700         struct intel_connector *connector = intel_dp->attached_connector;
5701
5702         return connector->base.status == connector_status_connected ||
5703                 intel_dp->is_mst;
5704 }
5705
5706 int intel_dp_retrain_link(struct intel_encoder *encoder,
5707                           struct drm_modeset_acquire_ctx *ctx)
5708 {
5709         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5710         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
5711         struct intel_crtc *crtc;
5712         u32 crtc_mask;
5713         int ret;
5714
5715         if (!intel_dp_is_connected(intel_dp))
5716                 return 0;
5717
5718         ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
5719                                ctx);
5720         if (ret)
5721                 return ret;
5722
5723         ret = intel_dp_prep_link_retrain(intel_dp, ctx, &crtc_mask);
5724         if (ret)
5725                 return ret;
5726
5727         if (crtc_mask == 0)
5728                 return 0;
5729
5730         drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] retraining link\n",
5731                     encoder->base.base.id, encoder->base.name);
5732
5733         for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) {
5734                 const struct intel_crtc_state *crtc_state =
5735                         to_intel_crtc_state(crtc->base.state);
5736
5737                 /* Suppress underruns caused by re-training */
5738                 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
5739                 if (crtc_state->has_pch_encoder)
5740                         intel_set_pch_fifo_underrun_reporting(dev_priv,
5741                                                               intel_crtc_pch_transcoder(crtc), false);
5742         }
5743
5744         intel_dp_start_link_train(intel_dp);
5745         intel_dp_stop_link_train(intel_dp);
5746
5747         for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) {
5748                 const struct intel_crtc_state *crtc_state =
5749                         to_intel_crtc_state(crtc->base.state);
5750
5751                 /* Keep underrun reporting disabled until things are stable */
5752                 intel_wait_for_vblank(dev_priv, crtc->pipe);
5753
5754                 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
5755                 if (crtc_state->has_pch_encoder)
5756                         intel_set_pch_fifo_underrun_reporting(dev_priv,
5757                                                               intel_crtc_pch_transcoder(crtc), true);
5758         }
5759
5760         return 0;
5761 }
5762
5763 /*
5764  * If display is now connected check links status,
5765  * there has been known issues of link loss triggering
5766  * long pulse.
5767  *
5768  * Some sinks (eg. ASUS PB287Q) seem to perform some
5769  * weird HPD ping pong during modesets. So we can apparently
5770  * end up with HPD going low during a modeset, and then
5771  * going back up soon after. And once that happens we must
5772  * retrain the link to get a picture. That's in case no
5773  * userspace component reacted to intermittent HPD dip.
5774  */
5775 static enum intel_hotplug_state
5776 intel_dp_hotplug(struct intel_encoder *encoder,
5777                  struct intel_connector *connector)
5778 {
5779         struct drm_modeset_acquire_ctx ctx;
5780         enum intel_hotplug_state state;
5781         int ret;
5782
5783         state = intel_encoder_hotplug(encoder, connector);
5784
5785         drm_modeset_acquire_init(&ctx, 0);
5786
5787         for (;;) {
5788                 ret = intel_dp_retrain_link(encoder, &ctx);
5789
5790                 if (ret == -EDEADLK) {
5791                         drm_modeset_backoff(&ctx);
5792                         continue;
5793                 }
5794
5795                 break;
5796         }
5797
5798         drm_modeset_drop_locks(&ctx);
5799         drm_modeset_acquire_fini(&ctx);
5800         drm_WARN(encoder->base.dev, ret,
5801                  "Acquiring modeset locks failed with %i\n", ret);
5802
5803         /*
5804          * Keeping it consistent with intel_ddi_hotplug() and
5805          * intel_hdmi_hotplug().
5806          */
5807         if (state == INTEL_HOTPLUG_UNCHANGED && !connector->hotplug_retries)
5808                 state = INTEL_HOTPLUG_RETRY;
5809
5810         return state;
5811 }
5812
5813 static void intel_dp_check_service_irq(struct intel_dp *intel_dp)
5814 {
5815         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5816         u8 val;
5817
5818         if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
5819                 return;
5820
5821         if (drm_dp_dpcd_readb(&intel_dp->aux,
5822                               DP_DEVICE_SERVICE_IRQ_VECTOR, &val) != 1 || !val)
5823                 return;
5824
5825         drm_dp_dpcd_writeb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, val);
5826
5827         if (val & DP_AUTOMATED_TEST_REQUEST)
5828                 intel_dp_handle_test_request(intel_dp);
5829
5830         if (val & DP_CP_IRQ)
5831                 intel_hdcp_handle_cp_irq(intel_dp->attached_connector);
5832
5833         if (val & DP_SINK_SPECIFIC_IRQ)
5834                 drm_dbg_kms(&i915->drm, "Sink specific irq unhandled\n");
5835 }
5836
5837 /*
5838  * According to DP spec
5839  * 5.1.2:
5840  *  1. Read DPCD
5841  *  2. Configure link according to Receiver Capabilities
5842  *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
5843  *  4. Check link status on receipt of hot-plug interrupt
5844  *
5845  * intel_dp_short_pulse -  handles short pulse interrupts
5846  * when full detection is not required.
5847  * Returns %true if short pulse is handled and full detection
5848  * is NOT required and %false otherwise.
5849  */
5850 static bool
5851 intel_dp_short_pulse(struct intel_dp *intel_dp)
5852 {
5853         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
5854         u8 old_sink_count = intel_dp->sink_count;
5855         bool ret;
5856
5857         /*
5858          * Clearing compliance test variables to allow capturing
5859          * of values for next automated test request.
5860          */
5861         memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
5862
5863         /*
5864          * Now read the DPCD to see if it's actually running
5865          * If the current value of sink count doesn't match with
5866          * the value that was stored earlier or dpcd read failed
5867          * we need to do full detection
5868          */
5869         ret = intel_dp_get_dpcd(intel_dp);
5870
5871         if ((old_sink_count != intel_dp->sink_count) || !ret) {
5872                 /* No need to proceed if we are going to do full detect */
5873                 return false;
5874         }
5875
5876         intel_dp_check_service_irq(intel_dp);
5877
5878         /* Handle CEC interrupts, if any */
5879         drm_dp_cec_irq(&intel_dp->aux);
5880
5881         /* defer to the hotplug work for link retraining if needed */
5882         if (intel_dp_needs_link_retrain(intel_dp))
5883                 return false;
5884
5885         intel_psr_short_pulse(intel_dp);
5886
5887         if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
5888                 drm_dbg_kms(&dev_priv->drm,
5889                             "Link Training Compliance Test requested\n");
5890                 /* Send a Hotplug Uevent to userspace to start modeset */
5891                 drm_kms_helper_hotplug_event(&dev_priv->drm);
5892         }
5893
5894         return true;
5895 }
5896
5897 /* XXX this is probably wrong for multiple downstream ports */
5898 static enum drm_connector_status
5899 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
5900 {
5901         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5902         struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
5903         u8 *dpcd = intel_dp->dpcd;
5904         u8 type;
5905
5906         if (drm_WARN_ON(&i915->drm, intel_dp_is_edp(intel_dp)))
5907                 return connector_status_connected;
5908
5909         if (lspcon->active)
5910                 lspcon_resume(lspcon);
5911
5912         if (!intel_dp_get_dpcd(intel_dp))
5913                 return connector_status_disconnected;
5914
5915         /* if there's no downstream port, we're done */
5916         if (!drm_dp_is_branch(dpcd))
5917                 return connector_status_connected;
5918
5919         /* If we're HPD-aware, SINK_COUNT changes dynamically */
5920         if (intel_dp_has_sink_count(intel_dp) &&
5921             intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
5922                 return intel_dp->sink_count ?
5923                 connector_status_connected : connector_status_disconnected;
5924         }
5925
5926         if (intel_dp_can_mst(intel_dp))
5927                 return connector_status_connected;
5928
5929         /* If no HPD, poke DDC gently */
5930         if (drm_probe_ddc(&intel_dp->aux.ddc))
5931                 return connector_status_connected;
5932
5933         /* Well we tried, say unknown for unreliable port types */
5934         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
5935                 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
5936                 if (type == DP_DS_PORT_TYPE_VGA ||
5937                     type == DP_DS_PORT_TYPE_NON_EDID)
5938                         return connector_status_unknown;
5939         } else {
5940                 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
5941                         DP_DWN_STRM_PORT_TYPE_MASK;
5942                 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
5943                     type == DP_DWN_STRM_PORT_TYPE_OTHER)
5944                         return connector_status_unknown;
5945         }
5946
5947         /* Anything else is out of spec, warn and ignore */
5948         drm_dbg_kms(&i915->drm, "Broken DP branch device, ignoring\n");
5949         return connector_status_disconnected;
5950 }
5951
5952 static enum drm_connector_status
5953 edp_detect(struct intel_dp *intel_dp)
5954 {
5955         return connector_status_connected;
5956 }
5957
5958 static bool ibx_digital_port_connected(struct intel_encoder *encoder)
5959 {
5960         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5961         u32 bit = dev_priv->hotplug.pch_hpd[encoder->hpd_pin];
5962
5963         return intel_de_read(dev_priv, SDEISR) & bit;
5964 }
5965
5966 static bool g4x_digital_port_connected(struct intel_encoder *encoder)
5967 {
5968         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5969         u32 bit;
5970
5971         switch (encoder->hpd_pin) {
5972         case HPD_PORT_B:
5973                 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
5974                 break;
5975         case HPD_PORT_C:
5976                 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
5977                 break;
5978         case HPD_PORT_D:
5979                 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
5980                 break;
5981         default:
5982                 MISSING_CASE(encoder->hpd_pin);
5983                 return false;
5984         }
5985
5986         return intel_de_read(dev_priv, PORT_HOTPLUG_STAT) & bit;
5987 }
5988
5989 static bool gm45_digital_port_connected(struct intel_encoder *encoder)
5990 {
5991         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5992         u32 bit;
5993
5994         switch (encoder->hpd_pin) {
5995         case HPD_PORT_B:
5996                 bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
5997                 break;
5998         case HPD_PORT_C:
5999                 bit = PORTC_HOTPLUG_LIVE_STATUS_GM45;
6000                 break;
6001         case HPD_PORT_D:
6002                 bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
6003                 break;
6004         default:
6005                 MISSING_CASE(encoder->hpd_pin);
6006                 return false;
6007         }
6008
6009         return intel_de_read(dev_priv, PORT_HOTPLUG_STAT) & bit;
6010 }
6011
6012 static bool ilk_digital_port_connected(struct intel_encoder *encoder)
6013 {
6014         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
6015         u32 bit = dev_priv->hotplug.hpd[encoder->hpd_pin];
6016
6017         return intel_de_read(dev_priv, DEISR) & bit;
6018 }
6019
6020 /*
6021  * intel_digital_port_connected - is the specified port connected?
6022  * @encoder: intel_encoder
6023  *
6024  * In cases where there's a connector physically connected but it can't be used
6025  * by our hardware we also return false, since the rest of the driver should
6026  * pretty much treat the port as disconnected. This is relevant for type-C
6027  * (starting on ICL) where there's ownership involved.
6028  *
6029  * Return %true if port is connected, %false otherwise.
6030  */
6031 bool intel_digital_port_connected(struct intel_encoder *encoder)
6032 {
6033         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
6034         struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
6035         bool is_connected = false;
6036         intel_wakeref_t wakeref;
6037
6038         with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref)
6039                 is_connected = dig_port->connected(encoder);
6040
6041         return is_connected;
6042 }
6043
6044 static struct edid *
6045 intel_dp_get_edid(struct intel_dp *intel_dp)
6046 {
6047         struct intel_connector *intel_connector = intel_dp->attached_connector;
6048
6049         /* use cached edid if we have one */
6050         if (intel_connector->edid) {
6051                 /* invalid edid */
6052                 if (IS_ERR(intel_connector->edid))
6053                         return NULL;
6054
6055                 return drm_edid_duplicate(intel_connector->edid);
6056         } else
6057                 return drm_get_edid(&intel_connector->base,
6058                                     &intel_dp->aux.ddc);
6059 }
6060
6061 static void
6062 intel_dp_set_edid(struct intel_dp *intel_dp)
6063 {
6064         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
6065         struct intel_connector *connector = intel_dp->attached_connector;
6066         struct edid *edid;
6067
6068         intel_dp_unset_edid(intel_dp);
6069         edid = intel_dp_get_edid(intel_dp);
6070         connector->detect_edid = edid;
6071
6072         intel_dp->dfp.max_bpc =
6073                 drm_dp_downstream_max_bpc(intel_dp->dpcd,
6074                                           intel_dp->downstream_ports);
6075
6076         drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] DFP max bpc %d\n",
6077                     connector->base.base.id, connector->base.name,
6078                     intel_dp->dfp.max_bpc);
6079
6080         if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) {
6081                 intel_dp->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
6082                 intel_dp->has_audio = drm_detect_monitor_audio(edid);
6083         }
6084
6085         drm_dp_cec_set_edid(&intel_dp->aux, edid);
6086         intel_dp->edid_quirks = drm_dp_get_edid_quirks(edid);
6087 }
6088
6089 static void
6090 intel_dp_unset_edid(struct intel_dp *intel_dp)
6091 {
6092         struct intel_connector *intel_connector = intel_dp->attached_connector;
6093
6094         drm_dp_cec_unset_edid(&intel_dp->aux);
6095         kfree(intel_connector->detect_edid);
6096         intel_connector->detect_edid = NULL;
6097
6098         intel_dp->has_hdmi_sink = false;
6099         intel_dp->has_audio = false;
6100         intel_dp->edid_quirks = 0;
6101
6102         intel_dp->dfp.max_bpc = 0;
6103 }
6104
6105 static int
6106 intel_dp_detect(struct drm_connector *connector,
6107                 struct drm_modeset_acquire_ctx *ctx,
6108                 bool force)
6109 {
6110         struct drm_i915_private *dev_priv = to_i915(connector->dev);
6111         struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
6112         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
6113         struct intel_encoder *encoder = &dig_port->base;
6114         enum drm_connector_status status;
6115
6116         drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n",
6117                     connector->base.id, connector->name);
6118         drm_WARN_ON(&dev_priv->drm,
6119                     !drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
6120
6121         if (!INTEL_DISPLAY_ENABLED(dev_priv))
6122                 return connector_status_disconnected;
6123
6124         /* Can't disconnect eDP */
6125         if (intel_dp_is_edp(intel_dp))
6126                 status = edp_detect(intel_dp);
6127         else if (intel_digital_port_connected(encoder))
6128                 status = intel_dp_detect_dpcd(intel_dp);
6129         else
6130                 status = connector_status_disconnected;
6131
6132         if (status == connector_status_disconnected) {
6133                 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
6134                 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd));
6135
6136                 if (intel_dp->is_mst) {
6137                         drm_dbg_kms(&dev_priv->drm,
6138                                     "MST device may have disappeared %d vs %d\n",
6139                                     intel_dp->is_mst,
6140                                     intel_dp->mst_mgr.mst_state);
6141                         intel_dp->is_mst = false;
6142                         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
6143                                                         intel_dp->is_mst);
6144                 }
6145
6146                 goto out;
6147         }
6148
6149         /* Read DP Sink DSC Cap DPCD regs for DP v1.4 */
6150         if (INTEL_GEN(dev_priv) >= 11)
6151                 intel_dp_get_dsc_sink_cap(intel_dp);
6152
6153         intel_dp_configure_mst(intel_dp);
6154
6155         /*
6156          * TODO: Reset link params when switching to MST mode, until MST
6157          * supports link training fallback params.
6158          */
6159         if (intel_dp->reset_link_params || intel_dp->is_mst) {
6160                 /* Initial max link lane count */
6161                 intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp);
6162
6163                 /* Initial max link rate */
6164                 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
6165
6166                 intel_dp->reset_link_params = false;
6167         }
6168
6169         intel_dp_print_rates(intel_dp);
6170
6171         if (intel_dp->is_mst) {
6172                 /*
6173                  * If we are in MST mode then this connector
6174                  * won't appear connected or have anything
6175                  * with EDID on it
6176                  */
6177                 status = connector_status_disconnected;
6178                 goto out;
6179         }
6180
6181         /*
6182          * Some external monitors do not signal loss of link synchronization
6183          * with an IRQ_HPD, so force a link status check.
6184          */
6185         if (!intel_dp_is_edp(intel_dp)) {
6186                 int ret;
6187
6188                 ret = intel_dp_retrain_link(encoder, ctx);
6189                 if (ret)
6190                         return ret;
6191         }
6192
6193         /*
6194          * Clearing NACK and defer counts to get their exact values
6195          * while reading EDID which are required by Compliance tests
6196          * 4.2.2.4 and 4.2.2.5
6197          */
6198         intel_dp->aux.i2c_nack_count = 0;
6199         intel_dp->aux.i2c_defer_count = 0;
6200
6201         intel_dp_set_edid(intel_dp);
6202         if (intel_dp_is_edp(intel_dp) ||
6203             to_intel_connector(connector)->detect_edid)
6204                 status = connector_status_connected;
6205
6206         intel_dp_check_service_irq(intel_dp);
6207
6208 out:
6209         if (status != connector_status_connected && !intel_dp->is_mst)
6210                 intel_dp_unset_edid(intel_dp);
6211
6212         /*
6213          * Make sure the refs for power wells enabled during detect are
6214          * dropped to avoid a new detect cycle triggered by HPD polling.
6215          */
6216         intel_display_power_flush_work(dev_priv);
6217
6218         if (!intel_dp_is_edp(intel_dp))
6219                 drm_dp_set_subconnector_property(connector,
6220                                                  status,
6221                                                  intel_dp->dpcd,
6222                                                  intel_dp->downstream_ports);
6223         return status;
6224 }
6225
6226 static void
6227 intel_dp_force(struct drm_connector *connector)
6228 {
6229         struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
6230         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
6231         struct intel_encoder *intel_encoder = &dig_port->base;
6232         struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
6233         enum intel_display_power_domain aux_domain =
6234                 intel_aux_power_domain(dig_port);
6235         intel_wakeref_t wakeref;
6236
6237         drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n",
6238                     connector->base.id, connector->name);
6239         intel_dp_unset_edid(intel_dp);
6240
6241         if (connector->status != connector_status_connected)
6242                 return;
6243
6244         wakeref = intel_display_power_get(dev_priv, aux_domain);
6245
6246         intel_dp_set_edid(intel_dp);
6247
6248         intel_display_power_put(dev_priv, aux_domain, wakeref);
6249 }
6250
6251 static int intel_dp_get_modes(struct drm_connector *connector)
6252 {
6253         struct intel_connector *intel_connector = to_intel_connector(connector);
6254         struct edid *edid;
6255
6256         edid = intel_connector->detect_edid;
6257         if (edid) {
6258                 int ret = intel_connector_update_modes(connector, edid);
6259                 if (ret)
6260                         return ret;
6261         }
6262
6263         /* if eDP has no EDID, fall back to fixed mode */
6264         if (intel_dp_is_edp(intel_attached_dp(to_intel_connector(connector))) &&
6265             intel_connector->panel.fixed_mode) {
6266                 struct drm_display_mode *mode;
6267
6268                 mode = drm_mode_duplicate(connector->dev,
6269                                           intel_connector->panel.fixed_mode);
6270                 if (mode) {
6271                         drm_mode_probed_add(connector, mode);
6272                         return 1;
6273                 }
6274         }
6275
6276         return 0;
6277 }
6278
6279 static int
6280 intel_dp_connector_register(struct drm_connector *connector)
6281 {
6282         struct drm_i915_private *i915 = to_i915(connector->dev);
6283         struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
6284         int ret;
6285
6286         ret = intel_connector_register(connector);
6287         if (ret)
6288                 return ret;
6289
6290         drm_dbg_kms(&i915->drm, "registering %s bus for %s\n",
6291                     intel_dp->aux.name, connector->kdev->kobj.name);
6292
6293         intel_dp->aux.dev = connector->kdev;
6294         ret = drm_dp_aux_register(&intel_dp->aux);
6295         if (!ret)
6296                 drm_dp_cec_register_connector(&intel_dp->aux, connector);
6297         return ret;
6298 }
6299
6300 static void
6301 intel_dp_connector_unregister(struct drm_connector *connector)
6302 {
6303         struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
6304
6305         drm_dp_cec_unregister_connector(&intel_dp->aux);
6306         drm_dp_aux_unregister(&intel_dp->aux);
6307         intel_connector_unregister(connector);
6308 }
6309
6310 void intel_dp_encoder_flush_work(struct drm_encoder *encoder)
6311 {
6312         struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder));
6313         struct intel_dp *intel_dp = &dig_port->dp;
6314
6315         intel_dp_mst_encoder_cleanup(dig_port);
6316         if (intel_dp_is_edp(intel_dp)) {
6317                 intel_wakeref_t wakeref;
6318
6319                 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
6320                 /*
6321                  * vdd might still be enabled do to the delayed vdd off.
6322                  * Make sure vdd is actually turned off here.
6323                  */
6324                 with_pps_lock(intel_dp, wakeref)
6325                         edp_panel_vdd_off_sync(intel_dp);
6326
6327                 if (intel_dp->edp_notifier.notifier_call) {
6328                         unregister_reboot_notifier(&intel_dp->edp_notifier);
6329                         intel_dp->edp_notifier.notifier_call = NULL;
6330                 }
6331         }
6332
6333         intel_dp_aux_fini(intel_dp);
6334 }
6335
6336 static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
6337 {
6338         intel_dp_encoder_flush_work(encoder);
6339
6340         drm_encoder_cleanup(encoder);
6341         kfree(enc_to_dig_port(to_intel_encoder(encoder)));
6342 }
6343
6344 void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
6345 {
6346         struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
6347         intel_wakeref_t wakeref;
6348
6349         if (!intel_dp_is_edp(intel_dp))
6350                 return;
6351
6352         /*
6353          * vdd might still be enabled do to the delayed vdd off.
6354          * Make sure vdd is actually turned off here.
6355          */
6356         cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
6357         with_pps_lock(intel_dp, wakeref)
6358                 edp_panel_vdd_off_sync(intel_dp);
6359 }
6360
6361 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
6362 {
6363         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6364         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
6365
6366         lockdep_assert_held(&dev_priv->pps_mutex);
6367
6368         if (!edp_have_panel_vdd(intel_dp))
6369                 return;
6370
6371         /*
6372          * The VDD bit needs a power domain reference, so if the bit is
6373          * already enabled when we boot or resume, grab this reference and
6374          * schedule a vdd off, so we don't hold on to the reference
6375          * indefinitely.
6376          */
6377         drm_dbg_kms(&dev_priv->drm,
6378                     "VDD left on by BIOS, adjusting state tracking\n");
6379         intel_display_power_get(dev_priv, intel_aux_power_domain(dig_port));
6380
6381         edp_panel_vdd_schedule_off(intel_dp);
6382 }
6383
6384 static enum pipe vlv_active_pipe(struct intel_dp *intel_dp)
6385 {
6386         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6387         struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
6388         enum pipe pipe;
6389
6390         if (intel_dp_port_enabled(dev_priv, intel_dp->output_reg,
6391                                   encoder->port, &pipe))
6392                 return pipe;
6393
6394         return INVALID_PIPE;
6395 }
6396
6397 void intel_dp_encoder_reset(struct drm_encoder *encoder)
6398 {
6399         struct drm_i915_private *dev_priv = to_i915(encoder->dev);
6400         struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(encoder));
6401         struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
6402         intel_wakeref_t wakeref;
6403
6404         if (!HAS_DDI(dev_priv))
6405                 intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg);
6406
6407         if (lspcon->active)
6408                 lspcon_resume(lspcon);
6409
6410         intel_dp->reset_link_params = true;
6411
6412         if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
6413             !intel_dp_is_edp(intel_dp))
6414                 return;
6415
6416         with_pps_lock(intel_dp, wakeref) {
6417                 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6418                         intel_dp->active_pipe = vlv_active_pipe(intel_dp);
6419
6420                 if (intel_dp_is_edp(intel_dp)) {
6421                         /*
6422                          * Reinit the power sequencer, in case BIOS did
6423                          * something nasty with it.
6424                          */
6425                         intel_dp_pps_init(intel_dp);
6426                         intel_edp_panel_vdd_sanitize(intel_dp);
6427                 }
6428         }
6429 }
6430
6431 static int intel_modeset_tile_group(struct intel_atomic_state *state,
6432                                     int tile_group_id)
6433 {
6434         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6435         struct drm_connector_list_iter conn_iter;
6436         struct drm_connector *connector;
6437         int ret = 0;
6438
6439         drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
6440         drm_for_each_connector_iter(connector, &conn_iter) {
6441                 struct drm_connector_state *conn_state;
6442                 struct intel_crtc_state *crtc_state;
6443                 struct intel_crtc *crtc;
6444
6445                 if (!connector->has_tile ||
6446                     connector->tile_group->id != tile_group_id)
6447                         continue;
6448
6449                 conn_state = drm_atomic_get_connector_state(&state->base,
6450                                                             connector);
6451                 if (IS_ERR(conn_state)) {
6452                         ret = PTR_ERR(conn_state);
6453                         break;
6454                 }
6455
6456                 crtc = to_intel_crtc(conn_state->crtc);
6457
6458                 if (!crtc)
6459                         continue;
6460
6461                 crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
6462                 crtc_state->uapi.mode_changed = true;
6463
6464                 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
6465                 if (ret)
6466                         break;
6467         }
6468         drm_connector_list_iter_end(&conn_iter);
6469
6470         return ret;
6471 }
6472
6473 static int intel_modeset_affected_transcoders(struct intel_atomic_state *state, u8 transcoders)
6474 {
6475         struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6476         struct intel_crtc *crtc;
6477
6478         if (transcoders == 0)
6479                 return 0;
6480
6481         for_each_intel_crtc(&dev_priv->drm, crtc) {
6482                 struct intel_crtc_state *crtc_state;
6483                 int ret;
6484
6485                 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
6486                 if (IS_ERR(crtc_state))
6487                         return PTR_ERR(crtc_state);
6488
6489                 if (!crtc_state->hw.enable)
6490                         continue;
6491
6492                 if (!(transcoders & BIT(crtc_state->cpu_transcoder)))
6493                         continue;
6494
6495                 crtc_state->uapi.mode_changed = true;
6496
6497                 ret = drm_atomic_add_affected_connectors(&state->base, &crtc->base);
6498                 if (ret)
6499                         return ret;
6500
6501                 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
6502                 if (ret)
6503                         return ret;
6504
6505                 transcoders &= ~BIT(crtc_state->cpu_transcoder);
6506         }
6507
6508         drm_WARN_ON(&dev_priv->drm, transcoders != 0);
6509
6510         return 0;
6511 }
6512
6513 static int intel_modeset_synced_crtcs(struct intel_atomic_state *state,
6514                                       struct drm_connector *connector)
6515 {
6516         const struct drm_connector_state *old_conn_state =
6517                 drm_atomic_get_old_connector_state(&state->base, connector);
6518         const struct intel_crtc_state *old_crtc_state;
6519         struct intel_crtc *crtc;
6520         u8 transcoders;
6521
6522         crtc = to_intel_crtc(old_conn_state->crtc);
6523         if (!crtc)
6524                 return 0;
6525
6526         old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
6527
6528         if (!old_crtc_state->hw.active)
6529                 return 0;
6530
6531         transcoders = old_crtc_state->sync_mode_slaves_mask;
6532         if (old_crtc_state->master_transcoder != INVALID_TRANSCODER)
6533                 transcoders |= BIT(old_crtc_state->master_transcoder);
6534
6535         return intel_modeset_affected_transcoders(state,
6536                                                   transcoders);
6537 }
6538
6539 static int intel_dp_connector_atomic_check(struct drm_connector *conn,
6540                                            struct drm_atomic_state *_state)
6541 {
6542         struct drm_i915_private *dev_priv = to_i915(conn->dev);
6543         struct intel_atomic_state *state = to_intel_atomic_state(_state);
6544         int ret;
6545
6546         ret = intel_digital_connector_atomic_check(conn, &state->base);
6547         if (ret)
6548                 return ret;
6549
6550         /*
6551          * We don't enable port sync on BDW due to missing w/as and
6552          * due to not having adjusted the modeset sequence appropriately.
6553          */
6554         if (INTEL_GEN(dev_priv) < 9)
6555                 return 0;
6556
6557         if (!intel_connector_needs_modeset(state, conn))
6558                 return 0;
6559
6560         if (conn->has_tile) {
6561                 ret = intel_modeset_tile_group(state, conn->tile_group->id);
6562                 if (ret)
6563                         return ret;
6564         }
6565
6566         return intel_modeset_synced_crtcs(state, conn);
6567 }
6568
6569 static const struct drm_connector_funcs intel_dp_connector_funcs = {
6570         .force = intel_dp_force,
6571         .fill_modes = drm_helper_probe_single_connector_modes,
6572         .atomic_get_property = intel_digital_connector_atomic_get_property,
6573         .atomic_set_property = intel_digital_connector_atomic_set_property,
6574         .late_register = intel_dp_connector_register,
6575         .early_unregister = intel_dp_connector_unregister,
6576         .destroy = intel_connector_destroy,
6577         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6578         .atomic_duplicate_state = intel_digital_connector_duplicate_state,
6579 };
6580
6581 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
6582         .detect_ctx = intel_dp_detect,
6583         .get_modes = intel_dp_get_modes,
6584         .mode_valid = intel_dp_mode_valid,
6585         .atomic_check = intel_dp_connector_atomic_check,
6586 };
6587
6588 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
6589         .reset = intel_dp_encoder_reset,
6590         .destroy = intel_dp_encoder_destroy,
6591 };
6592
6593 static bool intel_edp_have_power(struct intel_dp *intel_dp)
6594 {
6595         intel_wakeref_t wakeref;
6596         bool have_power = false;
6597
6598         with_pps_lock(intel_dp, wakeref) {
6599                 have_power = edp_have_panel_power(intel_dp) &&
6600                                                   edp_have_panel_vdd(intel_dp);
6601         }
6602
6603         return have_power;
6604 }
6605
6606 enum irqreturn
6607 intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd)
6608 {
6609         struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
6610         struct intel_dp *intel_dp = &dig_port->dp;
6611
6612         if (dig_port->base.type == INTEL_OUTPUT_EDP &&
6613             (long_hpd || !intel_edp_have_power(intel_dp))) {
6614                 /*
6615                  * vdd off can generate a long/short pulse on eDP which
6616                  * would require vdd on to handle it, and thus we
6617                  * would end up in an endless cycle of
6618                  * "vdd off -> long/short hpd -> vdd on -> detect -> vdd off -> ..."
6619                  */
6620                 drm_dbg_kms(&i915->drm,
6621                             "ignoring %s hpd on eDP [ENCODER:%d:%s]\n",
6622                             long_hpd ? "long" : "short",
6623                             dig_port->base.base.base.id,
6624                             dig_port->base.base.name);
6625                 return IRQ_HANDLED;
6626         }
6627
6628         drm_dbg_kms(&i915->drm, "got hpd irq on [ENCODER:%d:%s] - %s\n",
6629                     dig_port->base.base.base.id,
6630                     dig_port->base.base.name,
6631                     long_hpd ? "long" : "short");
6632
6633         if (long_hpd) {
6634                 intel_dp->reset_link_params = true;
6635                 return IRQ_NONE;
6636         }
6637
6638         if (intel_dp->is_mst) {
6639                 if (!intel_dp_check_mst_status(intel_dp))
6640                         return IRQ_NONE;
6641         } else if (!intel_dp_short_pulse(intel_dp)) {
6642                 return IRQ_NONE;
6643         }
6644
6645         return IRQ_HANDLED;
6646 }
6647
6648 /* check the VBT to see whether the eDP is on another port */
6649 bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
6650 {
6651         /*
6652          * eDP not supported on g4x. so bail out early just
6653          * for a bit extra safety in case the VBT is bonkers.
6654          */
6655         if (INTEL_GEN(dev_priv) < 5)
6656                 return false;
6657
6658         if (INTEL_GEN(dev_priv) < 9 && port == PORT_A)
6659                 return true;
6660
6661         return intel_bios_is_port_edp(dev_priv, port);
6662 }
6663
6664 static void
6665 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
6666 {
6667         struct drm_i915_private *dev_priv = to_i915(connector->dev);
6668         enum port port = dp_to_dig_port(intel_dp)->base.port;
6669
6670         if (!intel_dp_is_edp(intel_dp))
6671                 drm_connector_attach_dp_subconnector_property(connector);
6672
6673         if (!IS_G4X(dev_priv) && port != PORT_A)
6674                 intel_attach_force_audio_property(connector);
6675
6676         intel_attach_broadcast_rgb_property(connector);
6677         if (HAS_GMCH(dev_priv))
6678                 drm_connector_attach_max_bpc_property(connector, 6, 10);
6679         else if (INTEL_GEN(dev_priv) >= 5)
6680                 drm_connector_attach_max_bpc_property(connector, 6, 12);
6681
6682         intel_attach_colorspace_property(connector);
6683
6684         if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 11)
6685                 drm_object_attach_property(&connector->base,
6686                                            connector->dev->mode_config.hdr_output_metadata_property,
6687                                            0);
6688
6689         if (intel_dp_is_edp(intel_dp)) {
6690                 u32 allowed_scalers;
6691
6692                 allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN);
6693                 if (!HAS_GMCH(dev_priv))
6694                         allowed_scalers |= BIT(DRM_MODE_SCALE_CENTER);
6695
6696                 drm_connector_attach_scaling_mode_property(connector, allowed_scalers);
6697
6698                 connector->state->scaling_mode = DRM_MODE_SCALE_ASPECT;
6699
6700         }
6701 }
6702
6703 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
6704 {
6705         intel_dp->panel_power_off_time = ktime_get_boottime();
6706         intel_dp->last_power_on = jiffies;
6707         intel_dp->last_backlight_off = jiffies;
6708 }
6709
6710 static void
6711 intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
6712 {
6713         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6714         u32 pp_on, pp_off, pp_ctl;
6715         struct pps_registers regs;
6716
6717         intel_pps_get_registers(intel_dp, &regs);
6718
6719         pp_ctl = ilk_get_pp_control(intel_dp);
6720
6721         /* Ensure PPS is unlocked */
6722         if (!HAS_DDI(dev_priv))
6723                 intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl);
6724
6725         pp_on = intel_de_read(dev_priv, regs.pp_on);
6726         pp_off = intel_de_read(dev_priv, regs.pp_off);
6727
6728         /* Pull timing values out of registers */
6729         seq->t1_t3 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on);
6730         seq->t8 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, pp_on);
6731         seq->t9 = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, pp_off);
6732         seq->t10 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, pp_off);
6733
6734         if (i915_mmio_reg_valid(regs.pp_div)) {
6735                 u32 pp_div;
6736
6737                 pp_div = intel_de_read(dev_priv, regs.pp_div);
6738
6739                 seq->t11_t12 = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div) * 1000;
6740         } else {
6741                 seq->t11_t12 = REG_FIELD_GET(BXT_POWER_CYCLE_DELAY_MASK, pp_ctl) * 1000;
6742         }
6743 }
6744
6745 static void
6746 intel_pps_dump_state(const char *state_name, const struct edp_power_seq *seq)
6747 {
6748         DRM_DEBUG_KMS("%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
6749                       state_name,
6750                       seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12);
6751 }
6752
6753 static void
6754 intel_pps_verify_state(struct intel_dp *intel_dp)
6755 {
6756         struct edp_power_seq hw;
6757         struct edp_power_seq *sw = &intel_dp->pps_delays;
6758
6759         intel_pps_readout_hw_state(intel_dp, &hw);
6760
6761         if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 ||
6762             hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) {
6763                 DRM_ERROR("PPS state mismatch\n");
6764                 intel_pps_dump_state("sw", sw);
6765                 intel_pps_dump_state("hw", &hw);
6766         }
6767 }
6768
6769 static void
6770 intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp)
6771 {
6772         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6773         struct edp_power_seq cur, vbt, spec,
6774                 *final = &intel_dp->pps_delays;
6775
6776         lockdep_assert_held(&dev_priv->pps_mutex);
6777
6778         /* already initialized? */
6779         if (final->t11_t12 != 0)
6780                 return;
6781
6782         intel_pps_readout_hw_state(intel_dp, &cur);
6783
6784         intel_pps_dump_state("cur", &cur);
6785
6786         vbt = dev_priv->vbt.edp.pps;
6787         /* On Toshiba Satellite P50-C-18C system the VBT T12 delay
6788          * of 500ms appears to be too short. Ocassionally the panel
6789          * just fails to power back on. Increasing the delay to 800ms
6790          * seems sufficient to avoid this problem.
6791          */
6792         if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) {
6793                 vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10);
6794                 drm_dbg_kms(&dev_priv->drm,
6795                             "Increasing T12 panel delay as per the quirk to %d\n",
6796                             vbt.t11_t12);
6797         }
6798         /* T11_T12 delay is special and actually in units of 100ms, but zero
6799          * based in the hw (so we need to add 100 ms). But the sw vbt
6800          * table multiplies it with 1000 to make it in units of 100usec,
6801          * too. */
6802         vbt.t11_t12 += 100 * 10;
6803
6804         /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
6805          * our hw here, which are all in 100usec. */
6806         spec.t1_t3 = 210 * 10;
6807         spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
6808         spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
6809         spec.t10 = 500 * 10;
6810         /* This one is special and actually in units of 100ms, but zero
6811          * based in the hw (so we need to add 100 ms). But the sw vbt
6812          * table multiplies it with 1000 to make it in units of 100usec,
6813          * too. */
6814         spec.t11_t12 = (510 + 100) * 10;
6815
6816         intel_pps_dump_state("vbt", &vbt);
6817
6818         /* Use the max of the register settings and vbt. If both are
6819          * unset, fall back to the spec limits. */
6820 #define assign_final(field)     final->field = (max(cur.field, vbt.field) == 0 ? \
6821                                        spec.field : \
6822                                        max(cur.field, vbt.field))
6823         assign_final(t1_t3);
6824         assign_final(t8);
6825         assign_final(t9);
6826         assign_final(t10);
6827         assign_final(t11_t12);
6828 #undef assign_final
6829
6830 #define get_delay(field)        (DIV_ROUND_UP(final->field, 10))
6831         intel_dp->panel_power_up_delay = get_delay(t1_t3);
6832         intel_dp->backlight_on_delay = get_delay(t8);
6833         intel_dp->backlight_off_delay = get_delay(t9);
6834         intel_dp->panel_power_down_delay = get_delay(t10);
6835         intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
6836 #undef get_delay
6837
6838         drm_dbg_kms(&dev_priv->drm,
6839                     "panel power up delay %d, power down delay %d, power cycle delay %d\n",
6840                     intel_dp->panel_power_up_delay,
6841                     intel_dp->panel_power_down_delay,
6842                     intel_dp->panel_power_cycle_delay);
6843
6844         drm_dbg_kms(&dev_priv->drm, "backlight on delay %d, off delay %d\n",
6845                     intel_dp->backlight_on_delay,
6846                     intel_dp->backlight_off_delay);
6847
6848         /*
6849          * We override the HW backlight delays to 1 because we do manual waits
6850          * on them. For T8, even BSpec recommends doing it. For T9, if we
6851          * don't do this, we'll end up waiting for the backlight off delay
6852          * twice: once when we do the manual sleep, and once when we disable
6853          * the panel and wait for the PP_STATUS bit to become zero.
6854          */
6855         final->t8 = 1;
6856         final->t9 = 1;
6857
6858         /*
6859          * HW has only a 100msec granularity for t11_t12 so round it up
6860          * accordingly.
6861          */
6862         final->t11_t12 = roundup(final->t11_t12, 100 * 10);
6863 }
6864
6865 static void
6866 intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
6867                                               bool force_disable_vdd)
6868 {
6869         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6870         u32 pp_on, pp_off, port_sel = 0;
6871         int div = RUNTIME_INFO(dev_priv)->rawclk_freq / 1000;
6872         struct pps_registers regs;
6873         enum port port = dp_to_dig_port(intel_dp)->base.port;
6874         const struct edp_power_seq *seq = &intel_dp->pps_delays;
6875
6876         lockdep_assert_held(&dev_priv->pps_mutex);
6877
6878         intel_pps_get_registers(intel_dp, &regs);
6879
6880         /*
6881          * On some VLV machines the BIOS can leave the VDD
6882          * enabled even on power sequencers which aren't
6883          * hooked up to any port. This would mess up the
6884          * power domain tracking the first time we pick
6885          * one of these power sequencers for use since
6886          * edp_panel_vdd_on() would notice that the VDD was
6887          * already on and therefore wouldn't grab the power
6888          * domain reference. Disable VDD first to avoid this.
6889          * This also avoids spuriously turning the VDD on as
6890          * soon as the new power sequencer gets initialized.
6891          */
6892         if (force_disable_vdd) {
6893                 u32 pp = ilk_get_pp_control(intel_dp);
6894
6895                 drm_WARN(&dev_priv->drm, pp & PANEL_POWER_ON,
6896                          "Panel power already on\n");
6897
6898                 if (pp & EDP_FORCE_VDD)
6899                         drm_dbg_kms(&dev_priv->drm,
6900                                     "VDD already on, disabling first\n");
6901
6902                 pp &= ~EDP_FORCE_VDD;
6903
6904                 intel_de_write(dev_priv, regs.pp_ctrl, pp);
6905         }
6906
6907         pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->t1_t3) |
6908                 REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, seq->t8);
6909         pp_off = REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, seq->t9) |
6910                 REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, seq->t10);
6911
6912         /* Haswell doesn't have any port selection bits for the panel
6913          * power sequencer any more. */
6914         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
6915                 port_sel = PANEL_PORT_SELECT_VLV(port);
6916         } else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
6917                 switch (port) {
6918                 case PORT_A:
6919                         port_sel = PANEL_PORT_SELECT_DPA;
6920                         break;
6921                 case PORT_C:
6922                         port_sel = PANEL_PORT_SELECT_DPC;
6923                         break;
6924                 case PORT_D:
6925                         port_sel = PANEL_PORT_SELECT_DPD;
6926                         break;
6927                 default:
6928                         MISSING_CASE(port);
6929                         break;
6930                 }
6931         }
6932
6933         pp_on |= port_sel;
6934
6935         intel_de_write(dev_priv, regs.pp_on, pp_on);
6936         intel_de_write(dev_priv, regs.pp_off, pp_off);
6937
6938         /*
6939          * Compute the divisor for the pp clock, simply match the Bspec formula.
6940          */
6941         if (i915_mmio_reg_valid(regs.pp_div)) {
6942                 intel_de_write(dev_priv, regs.pp_div,
6943                                REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)));
6944         } else {
6945                 u32 pp_ctl;
6946
6947                 pp_ctl = intel_de_read(dev_priv, regs.pp_ctrl);
6948                 pp_ctl &= ~BXT_POWER_CYCLE_DELAY_MASK;
6949                 pp_ctl |= REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000));
6950                 intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl);
6951         }
6952
6953         drm_dbg_kms(&dev_priv->drm,
6954                     "panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
6955                     intel_de_read(dev_priv, regs.pp_on),
6956                     intel_de_read(dev_priv, regs.pp_off),
6957                     i915_mmio_reg_valid(regs.pp_div) ?
6958                     intel_de_read(dev_priv, regs.pp_div) :
6959                     (intel_de_read(dev_priv, regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK));
6960 }
6961
6962 static void intel_dp_pps_init(struct intel_dp *intel_dp)
6963 {
6964         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
6965
6966         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
6967                 vlv_initial_power_sequencer_setup(intel_dp);
6968         } else {
6969                 intel_dp_init_panel_power_sequencer(intel_dp);
6970                 intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
6971         }
6972 }
6973
6974 /**
6975  * intel_dp_set_drrs_state - program registers for RR switch to take effect
6976  * @dev_priv: i915 device
6977  * @crtc_state: a pointer to the active intel_crtc_state
6978  * @refresh_rate: RR to be programmed
6979  *
6980  * This function gets called when refresh rate (RR) has to be changed from
6981  * one frequency to another. Switches can be between high and low RR
6982  * supported by the panel or to any other RR based on media playback (in
6983  * this case, RR value needs to be passed from user space).
6984  *
6985  * The caller of this function needs to take a lock on dev_priv->drrs.
6986  */
6987 static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
6988                                     const struct intel_crtc_state *crtc_state,
6989                                     int refresh_rate)
6990 {
6991         struct intel_dp *intel_dp = dev_priv->drrs.dp;
6992         struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
6993         enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
6994
6995         if (refresh_rate <= 0) {
6996                 drm_dbg_kms(&dev_priv->drm,
6997                             "Refresh rate should be positive non-zero.\n");
6998                 return;
6999         }
7000
7001         if (intel_dp == NULL) {
7002                 drm_dbg_kms(&dev_priv->drm, "DRRS not supported.\n");
7003                 return;
7004         }
7005
7006         if (!intel_crtc) {
7007                 drm_dbg_kms(&dev_priv->drm,
7008                             "DRRS: intel_crtc not initialized\n");
7009                 return;
7010         }
7011
7012         if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
7013                 drm_dbg_kms(&dev_priv->drm, "Only Seamless DRRS supported.\n");
7014                 return;
7015         }
7016
7017         if (drm_mode_vrefresh(intel_dp->attached_connector->panel.downclock_mode) ==
7018                         refresh_rate)
7019                 index = DRRS_LOW_RR;
7020
7021         if (index == dev_priv->drrs.refresh_rate_type) {
7022                 drm_dbg_kms(&dev_priv->drm,
7023                             "DRRS requested for previously set RR...ignoring\n");
7024                 return;
7025         }
7026
7027         if (!crtc_state->hw.active) {
7028                 drm_dbg_kms(&dev_priv->drm,
7029                             "eDP encoder disabled. CRTC not Active\n");
7030                 return;
7031         }
7032
7033         if (INTEL_GEN(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) {
7034                 switch (index) {
7035                 case DRRS_HIGH_RR:
7036                         intel_dp_set_m_n(crtc_state, M1_N1);
7037                         break;
7038                 case DRRS_LOW_RR:
7039                         intel_dp_set_m_n(crtc_state, M2_N2);
7040                         break;
7041                 case DRRS_MAX_RR:
7042                 default:
7043                         drm_err(&dev_priv->drm,
7044                                 "Unsupported refreshrate type\n");
7045                 }
7046         } else if (INTEL_GEN(dev_priv) > 6) {
7047                 i915_reg_t reg = PIPECONF(crtc_state->cpu_transcoder);
7048                 u32 val;
7049
7050                 val = intel_de_read(dev_priv, reg);
7051                 if (index > DRRS_HIGH_RR) {
7052                         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
7053                                 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
7054                         else
7055                                 val |= PIPECONF_EDP_RR_MODE_SWITCH;
7056                 } else {
7057                         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
7058                                 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
7059                         else
7060                                 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
7061                 }
7062                 intel_de_write(dev_priv, reg, val);
7063         }
7064
7065         dev_priv->drrs.refresh_rate_type = index;
7066
7067         drm_dbg_kms(&dev_priv->drm, "eDP Refresh Rate set to : %dHz\n",
7068                     refresh_rate);
7069 }
7070
7071 static void
7072 intel_edp_drrs_enable_locked(struct intel_dp *intel_dp)
7073 {
7074         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
7075
7076         dev_priv->drrs.busy_frontbuffer_bits = 0;
7077         dev_priv->drrs.dp = intel_dp;
7078 }
7079
7080 /**
7081  * intel_edp_drrs_enable - init drrs struct if supported
7082  * @intel_dp: DP struct
7083  * @crtc_state: A pointer to the active crtc state.
7084  *
7085  * Initializes frontbuffer_bits and drrs.dp
7086  */
7087 void intel_edp_drrs_enable(struct intel_dp *intel_dp,
7088                            const struct intel_crtc_state *crtc_state)
7089 {
7090         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
7091
7092         if (!crtc_state->has_drrs)
7093                 return;
7094
7095         drm_dbg_kms(&dev_priv->drm, "Enabling DRRS\n");
7096
7097         mutex_lock(&dev_priv->drrs.mutex);
7098
7099         if (dev_priv->drrs.dp) {
7100                 drm_warn(&dev_priv->drm, "DRRS already enabled\n");
7101                 goto unlock;
7102         }
7103
7104         intel_edp_drrs_enable_locked(intel_dp);
7105
7106 unlock:
7107         mutex_unlock(&dev_priv->drrs.mutex);
7108 }
7109
7110 static void
7111 intel_edp_drrs_disable_locked(struct intel_dp *intel_dp,
7112                               const struct intel_crtc_state *crtc_state)
7113 {
7114         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
7115
7116         if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) {
7117                 int refresh;
7118
7119                 refresh = drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode);
7120                 intel_dp_set_drrs_state(dev_priv, crtc_state, refresh);
7121         }
7122
7123         dev_priv->drrs.dp = NULL;
7124 }
7125
7126 /**
7127  * intel_edp_drrs_disable - Disable DRRS
7128  * @intel_dp: DP struct
7129  * @old_crtc_state: Pointer to old crtc_state.
7130  *
7131  */
7132 void intel_edp_drrs_disable(struct intel_dp *intel_dp,
7133                             const struct intel_crtc_state *old_crtc_state)
7134 {
7135         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
7136
7137         if (!old_crtc_state->has_drrs)
7138                 return;
7139
7140         mutex_lock(&dev_priv->drrs.mutex);
7141         if (!dev_priv->drrs.dp) {
7142                 mutex_unlock(&dev_priv->drrs.mutex);
7143                 return;
7144         }
7145
7146         intel_edp_drrs_disable_locked(intel_dp, old_crtc_state);
7147         mutex_unlock(&dev_priv->drrs.mutex);
7148
7149         cancel_delayed_work_sync(&dev_priv->drrs.work);
7150 }
7151
7152 /**
7153  * intel_edp_drrs_update - Update DRRS state
7154  * @intel_dp: Intel DP
7155  * @crtc_state: new CRTC state
7156  *
7157  * This function will update DRRS states, disabling or enabling DRRS when
7158  * executing fastsets. For full modeset, intel_edp_drrs_disable() and
7159  * intel_edp_drrs_enable() should be called instead.
7160  */
7161 void
7162 intel_edp_drrs_update(struct intel_dp *intel_dp,
7163                       const struct intel_crtc_state *crtc_state)
7164 {
7165         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
7166
7167         if (dev_priv->drrs.type != SEAMLESS_DRRS_SUPPORT)
7168                 return;
7169
7170         mutex_lock(&dev_priv->drrs.mutex);
7171
7172         /* New state matches current one? */
7173         if (crtc_state->has_drrs == !!dev_priv->drrs.dp)
7174                 goto unlock;
7175
7176         if (crtc_state->has_drrs)
7177                 intel_edp_drrs_enable_locked(intel_dp);
7178         else
7179                 intel_edp_drrs_disable_locked(intel_dp, crtc_state);
7180
7181 unlock:
7182         mutex_unlock(&dev_priv->drrs.mutex);
7183 }
7184
7185 static void intel_edp_drrs_downclock_work(struct work_struct *work)
7186 {
7187         struct drm_i915_private *dev_priv =
7188                 container_of(work, typeof(*dev_priv), drrs.work.work);
7189         struct intel_dp *intel_dp;
7190
7191         mutex_lock(&dev_priv->drrs.mutex);
7192
7193         intel_dp = dev_priv->drrs.dp;
7194
7195         if (!intel_dp)
7196                 goto unlock;
7197
7198         /*
7199          * The delayed work can race with an invalidate hence we need to
7200          * recheck.
7201          */
7202
7203         if (dev_priv->drrs.busy_frontbuffer_bits)
7204                 goto unlock;
7205
7206         if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR) {
7207                 struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
7208
7209                 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
7210                         drm_mode_vrefresh(intel_dp->attached_connector->panel.downclock_mode));
7211         }
7212
7213 unlock:
7214         mutex_unlock(&dev_priv->drrs.mutex);
7215 }
7216
7217 /**
7218  * intel_edp_drrs_invalidate - Disable Idleness DRRS
7219  * @dev_priv: i915 device
7220  * @frontbuffer_bits: frontbuffer plane tracking bits
7221  *
7222  * This function gets called everytime rendering on the given planes start.
7223  * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
7224  *
7225  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
7226  */
7227 void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
7228                                unsigned int frontbuffer_bits)
7229 {
7230         struct intel_dp *intel_dp;
7231         struct drm_crtc *crtc;
7232         enum pipe pipe;
7233
7234         if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
7235                 return;
7236
7237         cancel_delayed_work(&dev_priv->drrs.work);
7238
7239         mutex_lock(&dev_priv->drrs.mutex);
7240
7241         intel_dp = dev_priv->drrs.dp;
7242         if (!intel_dp) {
7243                 mutex_unlock(&dev_priv->drrs.mutex);
7244                 return;
7245         }
7246
7247         crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
7248         pipe = to_intel_crtc(crtc)->pipe;
7249
7250         frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
7251         dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
7252
7253         /* invalidate means busy screen hence upclock */
7254         if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
7255                 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
7256                                         drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode));
7257
7258         mutex_unlock(&dev_priv->drrs.mutex);
7259 }
7260
7261 /**
7262  * intel_edp_drrs_flush - Restart Idleness DRRS
7263  * @dev_priv: i915 device
7264  * @frontbuffer_bits: frontbuffer plane tracking bits
7265  *
7266  * This function gets called every time rendering on the given planes has
7267  * completed or flip on a crtc is completed. So DRRS should be upclocked
7268  * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
7269  * if no other planes are dirty.
7270  *
7271  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
7272  */
7273 void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
7274                           unsigned int frontbuffer_bits)
7275 {
7276         struct intel_dp *intel_dp;
7277         struct drm_crtc *crtc;
7278         enum pipe pipe;
7279
7280         if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
7281                 return;
7282
7283         cancel_delayed_work(&dev_priv->drrs.work);
7284
7285         mutex_lock(&dev_priv->drrs.mutex);
7286
7287         intel_dp = dev_priv->drrs.dp;
7288         if (!intel_dp) {
7289                 mutex_unlock(&dev_priv->drrs.mutex);
7290                 return;
7291         }
7292
7293         crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
7294         pipe = to_intel_crtc(crtc)->pipe;
7295
7296         frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
7297         dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
7298
7299         /* flush means busy screen hence upclock */
7300         if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
7301                 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
7302                                         drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode));
7303
7304         /*
7305          * flush also means no more activity hence schedule downclock, if all
7306          * other fbs are quiescent too
7307          */
7308         if (!dev_priv->drrs.busy_frontbuffer_bits)
7309                 schedule_delayed_work(&dev_priv->drrs.work,
7310                                 msecs_to_jiffies(1000));
7311         mutex_unlock(&dev_priv->drrs.mutex);
7312 }
7313
7314 /**
7315  * DOC: Display Refresh Rate Switching (DRRS)
7316  *
7317  * Display Refresh Rate Switching (DRRS) is a power conservation feature
7318  * which enables swtching between low and high refresh rates,
7319  * dynamically, based on the usage scenario. This feature is applicable
7320  * for internal panels.
7321  *
7322  * Indication that the panel supports DRRS is given by the panel EDID, which
7323  * would list multiple refresh rates for one resolution.
7324  *
7325  * DRRS is of 2 types - static and seamless.
7326  * Static DRRS involves changing refresh rate (RR) by doing a full modeset
7327  * (may appear as a blink on screen) and is used in dock-undock scenario.
7328  * Seamless DRRS involves changing RR without any visual effect to the user
7329  * and can be used during normal system usage. This is done by programming
7330  * certain registers.
7331  *
7332  * Support for static/seamless DRRS may be indicated in the VBT based on
7333  * inputs from the panel spec.
7334  *
7335  * DRRS saves power by switching to low RR based on usage scenarios.
7336  *
7337  * The implementation is based on frontbuffer tracking implementation.  When
7338  * there is a disturbance on the screen triggered by user activity or a periodic
7339  * system activity, DRRS is disabled (RR is changed to high RR).  When there is
7340  * no movement on screen, after a timeout of 1 second, a switch to low RR is
7341  * made.
7342  *
7343  * For integration with frontbuffer tracking code, intel_edp_drrs_invalidate()
7344  * and intel_edp_drrs_flush() are called.
7345  *
7346  * DRRS can be further extended to support other internal panels and also
7347  * the scenario of video playback wherein RR is set based on the rate
7348  * requested by userspace.
7349  */
7350
7351 /**
7352  * intel_dp_drrs_init - Init basic DRRS work and mutex.
7353  * @connector: eDP connector
7354  * @fixed_mode: preferred mode of panel
7355  *
7356  * This function is  called only once at driver load to initialize basic
7357  * DRRS stuff.
7358  *
7359  * Returns:
7360  * Downclock mode if panel supports it, else return NULL.
7361  * DRRS support is determined by the presence of downclock mode (apart
7362  * from VBT setting).
7363  */
7364 static struct drm_display_mode *
7365 intel_dp_drrs_init(struct intel_connector *connector,
7366                    struct drm_display_mode *fixed_mode)
7367 {
7368         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
7369         struct drm_display_mode *downclock_mode = NULL;
7370
7371         INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
7372         mutex_init(&dev_priv->drrs.mutex);
7373
7374         if (INTEL_GEN(dev_priv) <= 6) {
7375                 drm_dbg_kms(&dev_priv->drm,
7376                             "DRRS supported for Gen7 and above\n");
7377                 return NULL;
7378         }
7379
7380         if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
7381                 drm_dbg_kms(&dev_priv->drm, "VBT doesn't support DRRS\n");
7382                 return NULL;
7383         }
7384
7385         downclock_mode = intel_panel_edid_downclock_mode(connector, fixed_mode);
7386         if (!downclock_mode) {
7387                 drm_dbg_kms(&dev_priv->drm,
7388                             "Downclock mode is not found. DRRS not supported\n");
7389                 return NULL;
7390         }
7391
7392         dev_priv->drrs.type = dev_priv->vbt.drrs_type;
7393
7394         dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
7395         drm_dbg_kms(&dev_priv->drm,
7396                     "seamless DRRS supported for eDP panel.\n");
7397         return downclock_mode;
7398 }
7399
7400 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
7401                                      struct intel_connector *intel_connector)
7402 {
7403         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
7404         struct drm_device *dev = &dev_priv->drm;
7405         struct drm_connector *connector = &intel_connector->base;
7406         struct drm_display_mode *fixed_mode = NULL;
7407         struct drm_display_mode *downclock_mode = NULL;
7408         bool has_dpcd;
7409         enum pipe pipe = INVALID_PIPE;
7410         intel_wakeref_t wakeref;
7411         struct edid *edid;
7412
7413         if (!intel_dp_is_edp(intel_dp))
7414                 return true;
7415
7416         INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, edp_panel_vdd_work);
7417
7418         /*
7419          * On IBX/CPT we may get here with LVDS already registered. Since the
7420          * driver uses the only internal power sequencer available for both
7421          * eDP and LVDS bail out early in this case to prevent interfering
7422          * with an already powered-on LVDS power sequencer.
7423          */
7424         if (intel_get_lvds_encoder(dev_priv)) {
7425                 drm_WARN_ON(dev,
7426                             !(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
7427                 drm_info(&dev_priv->drm,
7428                          "LVDS was detected, not registering eDP\n");
7429
7430                 return false;
7431         }
7432
7433         with_pps_lock(intel_dp, wakeref) {
7434                 intel_dp_init_panel_power_timestamps(intel_dp);
7435                 intel_dp_pps_init(intel_dp);
7436                 intel_edp_panel_vdd_sanitize(intel_dp);
7437         }
7438
7439         /* Cache DPCD and EDID for edp. */
7440         has_dpcd = intel_edp_init_dpcd(intel_dp);
7441
7442         if (!has_dpcd) {
7443                 /* if this fails, presume the device is a ghost */
7444                 drm_info(&dev_priv->drm,
7445                          "failed to retrieve link info, disabling eDP\n");
7446                 goto out_vdd_off;
7447         }
7448
7449         mutex_lock(&dev->mode_config.mutex);
7450         edid = drm_get_edid(connector, &intel_dp->aux.ddc);
7451         if (edid) {
7452                 if (drm_add_edid_modes(connector, edid)) {
7453                         drm_connector_update_edid_property(connector, edid);
7454                         intel_dp->edid_quirks = drm_dp_get_edid_quirks(edid);
7455                 } else {
7456                         kfree(edid);
7457                         edid = ERR_PTR(-EINVAL);
7458                 }
7459         } else {
7460                 edid = ERR_PTR(-ENOENT);
7461         }
7462         intel_connector->edid = edid;
7463
7464         fixed_mode = intel_panel_edid_fixed_mode(intel_connector);
7465         if (fixed_mode)
7466                 downclock_mode = intel_dp_drrs_init(intel_connector, fixed_mode);
7467
7468         /* fallback to VBT if available for eDP */
7469         if (!fixed_mode)
7470                 fixed_mode = intel_panel_vbt_fixed_mode(intel_connector);
7471         mutex_unlock(&dev->mode_config.mutex);
7472
7473         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
7474                 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
7475                 register_reboot_notifier(&intel_dp->edp_notifier);
7476
7477                 /*
7478                  * Figure out the current pipe for the initial backlight setup.
7479                  * If the current pipe isn't valid, try the PPS pipe, and if that
7480                  * fails just assume pipe A.
7481                  */
7482                 pipe = vlv_active_pipe(intel_dp);
7483
7484                 if (pipe != PIPE_A && pipe != PIPE_B)
7485                         pipe = intel_dp->pps_pipe;
7486
7487                 if (pipe != PIPE_A && pipe != PIPE_B)
7488                         pipe = PIPE_A;
7489
7490                 drm_dbg_kms(&dev_priv->drm,
7491                             "using pipe %c for initial backlight setup\n",
7492                             pipe_name(pipe));
7493         }
7494
7495         intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
7496         intel_connector->panel.backlight.power = intel_edp_backlight_power;
7497         intel_panel_setup_backlight(connector, pipe);
7498
7499         if (fixed_mode) {
7500                 drm_connector_set_panel_orientation_with_quirk(connector,
7501                                 dev_priv->vbt.orientation,
7502                                 fixed_mode->hdisplay, fixed_mode->vdisplay);
7503         }
7504
7505         return true;
7506
7507 out_vdd_off:
7508         cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
7509         /*
7510          * vdd might still be enabled do to the delayed vdd off.
7511          * Make sure vdd is actually turned off here.
7512          */
7513         with_pps_lock(intel_dp, wakeref)
7514                 edp_panel_vdd_off_sync(intel_dp);
7515
7516         return false;
7517 }
7518
7519 static void intel_dp_modeset_retry_work_fn(struct work_struct *work)
7520 {
7521         struct intel_connector *intel_connector;
7522         struct drm_connector *connector;
7523
7524         intel_connector = container_of(work, typeof(*intel_connector),
7525                                        modeset_retry_work);
7526         connector = &intel_connector->base;
7527         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
7528                       connector->name);
7529
7530         /* Grab the locks before changing connector property*/
7531         mutex_lock(&connector->dev->mode_config.mutex);
7532         /* Set connector link status to BAD and send a Uevent to notify
7533          * userspace to do a modeset.
7534          */
7535         drm_connector_set_link_status_property(connector,
7536                                                DRM_MODE_LINK_STATUS_BAD);
7537         mutex_unlock(&connector->dev->mode_config.mutex);
7538         /* Send Hotplug uevent so userspace can reprobe */
7539         drm_kms_helper_hotplug_event(connector->dev);
7540 }
7541
7542 bool
7543 intel_dp_init_connector(struct intel_digital_port *dig_port,
7544                         struct intel_connector *intel_connector)
7545 {
7546         struct drm_connector *connector = &intel_connector->base;
7547         struct intel_dp *intel_dp = &dig_port->dp;
7548         struct intel_encoder *intel_encoder = &dig_port->base;
7549         struct drm_device *dev = intel_encoder->base.dev;
7550         struct drm_i915_private *dev_priv = to_i915(dev);
7551         enum port port = intel_encoder->port;
7552         enum phy phy = intel_port_to_phy(dev_priv, port);
7553         int type;
7554
7555         /* Initialize the work for modeset in case of link train failure */
7556         INIT_WORK(&intel_connector->modeset_retry_work,
7557                   intel_dp_modeset_retry_work_fn);
7558
7559         if (drm_WARN(dev, dig_port->max_lanes < 1,
7560                      "Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n",
7561                      dig_port->max_lanes, intel_encoder->base.base.id,
7562                      intel_encoder->base.name))
7563                 return false;
7564
7565         intel_dp_set_source_rates(intel_dp);
7566
7567         intel_dp->reset_link_params = true;
7568         intel_dp->pps_pipe = INVALID_PIPE;
7569         intel_dp->active_pipe = INVALID_PIPE;
7570
7571         /* Preserve the current hw state. */
7572         intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg);
7573         intel_dp->attached_connector = intel_connector;
7574
7575         if (intel_dp_is_port_edp(dev_priv, port)) {
7576                 /*
7577                  * Currently we don't support eDP on TypeC ports, although in
7578                  * theory it could work on TypeC legacy ports.
7579                  */
7580                 drm_WARN_ON(dev, intel_phy_is_tc(dev_priv, phy));
7581                 type = DRM_MODE_CONNECTOR_eDP;
7582         } else {
7583                 type = DRM_MODE_CONNECTOR_DisplayPort;
7584         }
7585
7586         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
7587                 intel_dp->active_pipe = vlv_active_pipe(intel_dp);
7588
7589         /*
7590          * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
7591          * for DP the encoder type can be set by the caller to
7592          * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
7593          */
7594         if (type == DRM_MODE_CONNECTOR_eDP)
7595                 intel_encoder->type = INTEL_OUTPUT_EDP;
7596
7597         /* eDP only on port B and/or C on vlv/chv */
7598         if (drm_WARN_ON(dev, (IS_VALLEYVIEW(dev_priv) ||
7599                               IS_CHERRYVIEW(dev_priv)) &&
7600                         intel_dp_is_edp(intel_dp) &&
7601                         port != PORT_B && port != PORT_C))
7602                 return false;
7603
7604         drm_dbg_kms(&dev_priv->drm,
7605                     "Adding %s connector on [ENCODER:%d:%s]\n",
7606                     type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
7607                     intel_encoder->base.base.id, intel_encoder->base.name);
7608
7609         drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
7610         drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
7611
7612         if (!HAS_GMCH(dev_priv))
7613                 connector->interlace_allowed = true;
7614         connector->doublescan_allowed = 0;
7615
7616         if (INTEL_GEN(dev_priv) >= 11)
7617                 connector->ycbcr_420_allowed = true;
7618
7619         intel_connector->polled = DRM_CONNECTOR_POLL_HPD;
7620
7621         intel_dp_aux_init(intel_dp);
7622
7623         intel_connector_attach_encoder(intel_connector, intel_encoder);
7624
7625         if (HAS_DDI(dev_priv))
7626                 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
7627         else
7628                 intel_connector->get_hw_state = intel_connector_get_hw_state;
7629
7630         /* init MST on ports that can support it */
7631         intel_dp_mst_encoder_init(dig_port,
7632                                   intel_connector->base.base.id);
7633
7634         if (!intel_edp_init_connector(intel_dp, intel_connector)) {
7635                 intel_dp_aux_fini(intel_dp);
7636                 intel_dp_mst_encoder_cleanup(dig_port);
7637                 goto fail;
7638         }
7639
7640         intel_dp_add_properties(intel_dp, connector);
7641
7642         if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_edp(intel_dp)) {
7643                 int ret = intel_dp_init_hdcp(dig_port, intel_connector);
7644                 if (ret)
7645                         drm_dbg_kms(&dev_priv->drm,
7646                                     "HDCP init failed, skipping.\n");
7647         }
7648
7649         /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
7650          * 0xd.  Failure to do so will result in spurious interrupts being
7651          * generated on the port when a cable is not attached.
7652          */
7653         if (IS_G45(dev_priv)) {
7654                 u32 temp = intel_de_read(dev_priv, PEG_BAND_GAP_DATA);
7655                 intel_de_write(dev_priv, PEG_BAND_GAP_DATA,
7656                                (temp & ~0xf) | 0xd);
7657         }
7658
7659         return true;
7660
7661 fail:
7662         drm_connector_cleanup(connector);
7663
7664         return false;
7665 }
7666
7667 bool intel_dp_init(struct drm_i915_private *dev_priv,
7668                    i915_reg_t output_reg,
7669                    enum port port)
7670 {
7671         struct intel_digital_port *dig_port;
7672         struct intel_encoder *intel_encoder;
7673         struct drm_encoder *encoder;
7674         struct intel_connector *intel_connector;
7675
7676         dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL);
7677         if (!dig_port)
7678                 return false;
7679
7680         intel_connector = intel_connector_alloc();
7681         if (!intel_connector)
7682                 goto err_connector_alloc;
7683
7684         intel_encoder = &dig_port->base;
7685         encoder = &intel_encoder->base;
7686
7687         mutex_init(&dig_port->hdcp_mutex);
7688
7689         if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
7690                              &intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS,
7691                              "DP %c", port_name(port)))
7692                 goto err_encoder_init;
7693
7694         intel_encoder->hotplug = intel_dp_hotplug;
7695         intel_encoder->compute_config = intel_dp_compute_config;
7696         intel_encoder->get_hw_state = intel_dp_get_hw_state;
7697         intel_encoder->get_config = intel_dp_get_config;
7698         intel_encoder->update_pipe = intel_panel_update_backlight;
7699         intel_encoder->suspend = intel_dp_encoder_suspend;
7700         if (IS_CHERRYVIEW(dev_priv)) {
7701                 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
7702                 intel_encoder->pre_enable = chv_pre_enable_dp;
7703                 intel_encoder->enable = vlv_enable_dp;
7704                 intel_encoder->disable = vlv_disable_dp;
7705                 intel_encoder->post_disable = chv_post_disable_dp;
7706                 intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
7707         } else if (IS_VALLEYVIEW(dev_priv)) {
7708                 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
7709                 intel_encoder->pre_enable = vlv_pre_enable_dp;
7710                 intel_encoder->enable = vlv_enable_dp;
7711                 intel_encoder->disable = vlv_disable_dp;
7712                 intel_encoder->post_disable = vlv_post_disable_dp;
7713         } else {
7714                 intel_encoder->pre_enable = g4x_pre_enable_dp;
7715                 intel_encoder->enable = g4x_enable_dp;
7716                 intel_encoder->disable = g4x_disable_dp;
7717                 intel_encoder->post_disable = g4x_post_disable_dp;
7718         }
7719
7720         if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
7721             (HAS_PCH_CPT(dev_priv) && port != PORT_A))
7722                 dig_port->dp.set_link_train = cpt_set_link_train;
7723         else
7724                 dig_port->dp.set_link_train = g4x_set_link_train;
7725
7726         if (IS_CHERRYVIEW(dev_priv))
7727                 dig_port->dp.set_signal_levels = chv_set_signal_levels;
7728         else if (IS_VALLEYVIEW(dev_priv))
7729                 dig_port->dp.set_signal_levels = vlv_set_signal_levels;
7730         else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
7731                 dig_port->dp.set_signal_levels = ivb_cpu_edp_set_signal_levels;
7732         else if (IS_GEN(dev_priv, 6) && port == PORT_A)
7733                 dig_port->dp.set_signal_levels = snb_cpu_edp_set_signal_levels;
7734         else
7735                 dig_port->dp.set_signal_levels = g4x_set_signal_levels;
7736
7737         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv) ||
7738             (HAS_PCH_SPLIT(dev_priv) && port != PORT_A)) {
7739                 dig_port->dp.preemph_max = intel_dp_pre_empemph_max_3;
7740                 dig_port->dp.voltage_max = intel_dp_voltage_max_3;
7741         } else {
7742                 dig_port->dp.preemph_max = intel_dp_pre_empemph_max_2;
7743                 dig_port->dp.voltage_max = intel_dp_voltage_max_2;
7744         }
7745
7746         dig_port->dp.output_reg = output_reg;
7747         dig_port->max_lanes = 4;
7748         dig_port->dp.regs.dp_tp_ctl = DP_TP_CTL(port);
7749         dig_port->dp.regs.dp_tp_status = DP_TP_STATUS(port);
7750
7751         intel_encoder->type = INTEL_OUTPUT_DP;
7752         intel_encoder->power_domain = intel_port_to_power_domain(port);
7753         if (IS_CHERRYVIEW(dev_priv)) {
7754                 if (port == PORT_D)
7755                         intel_encoder->pipe_mask = BIT(PIPE_C);
7756                 else
7757                         intel_encoder->pipe_mask = BIT(PIPE_A) | BIT(PIPE_B);
7758         } else {
7759                 intel_encoder->pipe_mask = ~0;
7760         }
7761         intel_encoder->cloneable = 0;
7762         intel_encoder->port = port;
7763         intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port);
7764
7765         dig_port->hpd_pulse = intel_dp_hpd_pulse;
7766
7767         if (HAS_GMCH(dev_priv)) {
7768                 if (IS_GM45(dev_priv))
7769                         dig_port->connected = gm45_digital_port_connected;
7770                 else
7771                         dig_port->connected = g4x_digital_port_connected;
7772         } else {
7773                 if (port == PORT_A)
7774                         dig_port->connected = ilk_digital_port_connected;
7775                 else
7776                         dig_port->connected = ibx_digital_port_connected;
7777         }
7778
7779         if (port != PORT_A)
7780                 intel_infoframe_init(dig_port);
7781
7782         dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
7783         if (!intel_dp_init_connector(dig_port, intel_connector))
7784                 goto err_init_connector;
7785
7786         return true;
7787
7788 err_init_connector:
7789         drm_encoder_cleanup(encoder);
7790 err_encoder_init:
7791         kfree(intel_connector);
7792 err_connector_alloc:
7793         kfree(dig_port);
7794         return false;
7795 }
7796
7797 void intel_dp_mst_suspend(struct drm_i915_private *dev_priv)
7798 {
7799         struct intel_encoder *encoder;
7800
7801         for_each_intel_encoder(&dev_priv->drm, encoder) {
7802                 struct intel_dp *intel_dp;
7803
7804                 if (encoder->type != INTEL_OUTPUT_DDI)
7805                         continue;
7806
7807                 intel_dp = enc_to_intel_dp(encoder);
7808
7809                 if (!intel_dp->can_mst)
7810                         continue;
7811
7812                 if (intel_dp->is_mst)
7813                         drm_dp_mst_topology_mgr_suspend(&intel_dp->mst_mgr);
7814         }
7815 }
7816
7817 void intel_dp_mst_resume(struct drm_i915_private *dev_priv)
7818 {
7819         struct intel_encoder *encoder;
7820
7821         for_each_intel_encoder(&dev_priv->drm, encoder) {
7822                 struct intel_dp *intel_dp;
7823                 int ret;
7824
7825                 if (encoder->type != INTEL_OUTPUT_DDI)
7826                         continue;
7827
7828                 intel_dp = enc_to_intel_dp(encoder);
7829
7830                 if (!intel_dp->can_mst)
7831                         continue;
7832
7833                 ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr,
7834                                                      true);
7835                 if (ret) {
7836                         intel_dp->is_mst = false;
7837                         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
7838                                                         false);
7839                 }
7840         }
7841 }