Merge drm-next into drm-intel-next-queued (this time for real)
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / intel_dp.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Keith Packard <keithp@keithp.com>
25  *
26  */
27
28 #include <linux/i2c.h>
29 #include <linux/slab.h>
30 #include <linux/export.h>
31 #include <linux/types.h>
32 #include <linux/notifier.h>
33 #include <linux/reboot.h>
34 #include <asm/byteorder.h>
35 #include <drm/drmP.h>
36 #include <drm/drm_atomic_helper.h>
37 #include <drm/drm_crtc.h>
38 #include <drm/drm_crtc_helper.h>
39 #include <drm/drm_dp_helper.h>
40 #include <drm/drm_edid.h>
41 #include <drm/drm_hdcp.h>
42 #include "intel_drv.h"
43 #include <drm/i915_drm.h>
44 #include "i915_drv.h"
45
46 #define DP_LINK_CHECK_TIMEOUT   (10 * 1000)
47 #define DP_DPRX_ESI_LEN 14
48
49 /* Compliance test status bits  */
50 #define INTEL_DP_RESOLUTION_SHIFT_MASK  0
51 #define INTEL_DP_RESOLUTION_PREFERRED   (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
52 #define INTEL_DP_RESOLUTION_STANDARD    (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
53 #define INTEL_DP_RESOLUTION_FAILSAFE    (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
54
55 struct dp_link_dpll {
56         int clock;
57         struct dpll dpll;
58 };
59
60 static const struct dp_link_dpll gen4_dpll[] = {
61         { 162000,
62                 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
63         { 270000,
64                 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
65 };
66
67 static const struct dp_link_dpll pch_dpll[] = {
68         { 162000,
69                 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
70         { 270000,
71                 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
72 };
73
74 static const struct dp_link_dpll vlv_dpll[] = {
75         { 162000,
76                 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
77         { 270000,
78                 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
79 };
80
81 /*
82  * CHV supports eDP 1.4 that have  more link rates.
83  * Below only provides the fixed rate but exclude variable rate.
84  */
85 static const struct dp_link_dpll chv_dpll[] = {
86         /*
87          * CHV requires to program fractional division for m2.
88          * m2 is stored in fixed point format using formula below
89          * (m2_int << 22) | m2_fraction
90          */
91         { 162000,       /* m2_int = 32, m2_fraction = 1677722 */
92                 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
93         { 270000,       /* m2_int = 27, m2_fraction = 0 */
94                 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
95         { 540000,       /* m2_int = 27, m2_fraction = 0 */
96                 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
97 };
98
99 /**
100  * intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH)
101  * @intel_dp: DP struct
102  *
103  * If a CPU or PCH DP output is attached to an eDP panel, this function
104  * will return true, and false otherwise.
105  */
106 bool intel_dp_is_edp(struct intel_dp *intel_dp)
107 {
108         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
109
110         return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
111 }
112
113 static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
114 {
115         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
116
117         return intel_dig_port->base.base.dev;
118 }
119
120 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
121 {
122         return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
123 }
124
125 static void intel_dp_link_down(struct intel_encoder *encoder,
126                                const struct intel_crtc_state *old_crtc_state);
127 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
128 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
129 static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
130                                            const struct intel_crtc_state *crtc_state);
131 static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
132                                       enum pipe pipe);
133 static void intel_dp_unset_edid(struct intel_dp *intel_dp);
134
135 /* update sink rates from dpcd */
136 static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
137 {
138         static const int dp_rates[] = {
139                 162000, 270000, 540000, 810000
140         };
141         int i, max_rate;
142
143         max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]);
144
145         for (i = 0; i < ARRAY_SIZE(dp_rates); i++) {
146                 if (dp_rates[i] > max_rate)
147                         break;
148                 intel_dp->sink_rates[i] = dp_rates[i];
149         }
150
151         intel_dp->num_sink_rates = i;
152 }
153
154 /* Get length of rates array potentially limited by max_rate. */
155 static int intel_dp_rate_limit_len(const int *rates, int len, int max_rate)
156 {
157         int i;
158
159         /* Limit results by potentially reduced max rate */
160         for (i = 0; i < len; i++) {
161                 if (rates[len - i - 1] <= max_rate)
162                         return len - i;
163         }
164
165         return 0;
166 }
167
168 /* Get length of common rates array potentially limited by max_rate. */
169 static int intel_dp_common_len_rate_limit(const struct intel_dp *intel_dp,
170                                           int max_rate)
171 {
172         return intel_dp_rate_limit_len(intel_dp->common_rates,
173                                        intel_dp->num_common_rates, max_rate);
174 }
175
176 /* Theoretical max between source and sink */
177 static int intel_dp_max_common_rate(struct intel_dp *intel_dp)
178 {
179         return intel_dp->common_rates[intel_dp->num_common_rates - 1];
180 }
181
182 /* Theoretical max between source and sink */
183 static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp)
184 {
185         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
186         int source_max = intel_dig_port->max_lanes;
187         int sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
188
189         return min(source_max, sink_max);
190 }
191
192 int intel_dp_max_lane_count(struct intel_dp *intel_dp)
193 {
194         return intel_dp->max_link_lane_count;
195 }
196
197 int
198 intel_dp_link_required(int pixel_clock, int bpp)
199 {
200         /* pixel_clock is in kHz, divide bpp by 8 for bit to Byte conversion */
201         return DIV_ROUND_UP(pixel_clock * bpp, 8);
202 }
203
204 int
205 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
206 {
207         /* max_link_clock is the link symbol clock (LS_Clk) in kHz and not the
208          * link rate that is generally expressed in Gbps. Since, 8 bits of data
209          * is transmitted every LS_Clk per lane, there is no need to account for
210          * the channel encoding that is done in the PHY layer here.
211          */
212
213         return max_link_clock * max_lanes;
214 }
215
216 static int
217 intel_dp_downstream_max_dotclock(struct intel_dp *intel_dp)
218 {
219         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
220         struct intel_encoder *encoder = &intel_dig_port->base;
221         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
222         int max_dotclk = dev_priv->max_dotclk_freq;
223         int ds_max_dotclk;
224
225         int type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
226
227         if (type != DP_DS_PORT_TYPE_VGA)
228                 return max_dotclk;
229
230         ds_max_dotclk = drm_dp_downstream_max_clock(intel_dp->dpcd,
231                                                     intel_dp->downstream_ports);
232
233         if (ds_max_dotclk != 0)
234                 max_dotclk = min(max_dotclk, ds_max_dotclk);
235
236         return max_dotclk;
237 }
238
239 static int cnl_max_source_rate(struct intel_dp *intel_dp)
240 {
241         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
242         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
243         enum port port = dig_port->base.port;
244
245         u32 voltage = I915_READ(CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
246
247         /* Low voltage SKUs are limited to max of 5.4G */
248         if (voltage == VOLTAGE_INFO_0_85V)
249                 return 540000;
250
251         /* For this SKU 8.1G is supported in all ports */
252         if (IS_CNL_WITH_PORT_F(dev_priv))
253                 return 810000;
254
255         /* For other SKUs, max rate on ports A and D is 5.4G */
256         if (port == PORT_A || port == PORT_D)
257                 return 540000;
258
259         return 810000;
260 }
261
262 static void
263 intel_dp_set_source_rates(struct intel_dp *intel_dp)
264 {
265         /* The values must be in increasing order */
266         static const int cnl_rates[] = {
267                 162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000
268         };
269         static const int bxt_rates[] = {
270                 162000, 216000, 243000, 270000, 324000, 432000, 540000
271         };
272         static const int skl_rates[] = {
273                 162000, 216000, 270000, 324000, 432000, 540000
274         };
275         static const int hsw_rates[] = {
276                 162000, 270000, 540000
277         };
278         static const int g4x_rates[] = {
279                 162000, 270000
280         };
281         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
282         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
283         const struct ddi_vbt_port_info *info =
284                 &dev_priv->vbt.ddi_port_info[dig_port->base.port];
285         const int *source_rates;
286         int size, max_rate = 0, vbt_max_rate = info->dp_max_link_rate;
287
288         /* This should only be done once */
289         WARN_ON(intel_dp->source_rates || intel_dp->num_source_rates);
290
291         if (IS_CANNONLAKE(dev_priv)) {
292                 source_rates = cnl_rates;
293                 size = ARRAY_SIZE(cnl_rates);
294                 max_rate = cnl_max_source_rate(intel_dp);
295         } else if (IS_GEN9_LP(dev_priv)) {
296                 source_rates = bxt_rates;
297                 size = ARRAY_SIZE(bxt_rates);
298         } else if (IS_GEN9_BC(dev_priv)) {
299                 source_rates = skl_rates;
300                 size = ARRAY_SIZE(skl_rates);
301         } else if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) ||
302                    IS_BROADWELL(dev_priv)) {
303                 source_rates = hsw_rates;
304                 size = ARRAY_SIZE(hsw_rates);
305         } else {
306                 source_rates = g4x_rates;
307                 size = ARRAY_SIZE(g4x_rates);
308         }
309
310         if (max_rate && vbt_max_rate)
311                 max_rate = min(max_rate, vbt_max_rate);
312         else if (vbt_max_rate)
313                 max_rate = vbt_max_rate;
314
315         if (max_rate)
316                 size = intel_dp_rate_limit_len(source_rates, size, max_rate);
317
318         intel_dp->source_rates = source_rates;
319         intel_dp->num_source_rates = size;
320 }
321
322 static int intersect_rates(const int *source_rates, int source_len,
323                            const int *sink_rates, int sink_len,
324                            int *common_rates)
325 {
326         int i = 0, j = 0, k = 0;
327
328         while (i < source_len && j < sink_len) {
329                 if (source_rates[i] == sink_rates[j]) {
330                         if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
331                                 return k;
332                         common_rates[k] = source_rates[i];
333                         ++k;
334                         ++i;
335                         ++j;
336                 } else if (source_rates[i] < sink_rates[j]) {
337                         ++i;
338                 } else {
339                         ++j;
340                 }
341         }
342         return k;
343 }
344
345 /* return index of rate in rates array, or -1 if not found */
346 static int intel_dp_rate_index(const int *rates, int len, int rate)
347 {
348         int i;
349
350         for (i = 0; i < len; i++)
351                 if (rate == rates[i])
352                         return i;
353
354         return -1;
355 }
356
357 static void intel_dp_set_common_rates(struct intel_dp *intel_dp)
358 {
359         WARN_ON(!intel_dp->num_source_rates || !intel_dp->num_sink_rates);
360
361         intel_dp->num_common_rates = intersect_rates(intel_dp->source_rates,
362                                                      intel_dp->num_source_rates,
363                                                      intel_dp->sink_rates,
364                                                      intel_dp->num_sink_rates,
365                                                      intel_dp->common_rates);
366
367         /* Paranoia, there should always be something in common. */
368         if (WARN_ON(intel_dp->num_common_rates == 0)) {
369                 intel_dp->common_rates[0] = 162000;
370                 intel_dp->num_common_rates = 1;
371         }
372 }
373
374 static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate,
375                                        uint8_t lane_count)
376 {
377         /*
378          * FIXME: we need to synchronize the current link parameters with
379          * hardware readout. Currently fast link training doesn't work on
380          * boot-up.
381          */
382         if (link_rate == 0 ||
383             link_rate > intel_dp->max_link_rate)
384                 return false;
385
386         if (lane_count == 0 ||
387             lane_count > intel_dp_max_lane_count(intel_dp))
388                 return false;
389
390         return true;
391 }
392
393 int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
394                                             int link_rate, uint8_t lane_count)
395 {
396         int index;
397
398         index = intel_dp_rate_index(intel_dp->common_rates,
399                                     intel_dp->num_common_rates,
400                                     link_rate);
401         if (index > 0) {
402                 intel_dp->max_link_rate = intel_dp->common_rates[index - 1];
403                 intel_dp->max_link_lane_count = lane_count;
404         } else if (lane_count > 1) {
405                 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
406                 intel_dp->max_link_lane_count = lane_count >> 1;
407         } else {
408                 DRM_ERROR("Link Training Unsuccessful\n");
409                 return -1;
410         }
411
412         return 0;
413 }
414
415 static enum drm_mode_status
416 intel_dp_mode_valid(struct drm_connector *connector,
417                     struct drm_display_mode *mode)
418 {
419         struct intel_dp *intel_dp = intel_attached_dp(connector);
420         struct intel_connector *intel_connector = to_intel_connector(connector);
421         struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
422         int target_clock = mode->clock;
423         int max_rate, mode_rate, max_lanes, max_link_clock;
424         int max_dotclk;
425
426         max_dotclk = intel_dp_downstream_max_dotclock(intel_dp);
427
428         if (intel_dp_is_edp(intel_dp) && fixed_mode) {
429                 if (mode->hdisplay > fixed_mode->hdisplay)
430                         return MODE_PANEL;
431
432                 if (mode->vdisplay > fixed_mode->vdisplay)
433                         return MODE_PANEL;
434
435                 target_clock = fixed_mode->clock;
436         }
437
438         max_link_clock = intel_dp_max_link_rate(intel_dp);
439         max_lanes = intel_dp_max_lane_count(intel_dp);
440
441         max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
442         mode_rate = intel_dp_link_required(target_clock, 18);
443
444         if (mode_rate > max_rate || target_clock > max_dotclk)
445                 return MODE_CLOCK_HIGH;
446
447         if (mode->clock < 10000)
448                 return MODE_CLOCK_LOW;
449
450         if (mode->flags & DRM_MODE_FLAG_DBLCLK)
451                 return MODE_H_ILLEGAL;
452
453         return MODE_OK;
454 }
455
456 uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
457 {
458         int     i;
459         uint32_t v = 0;
460
461         if (src_bytes > 4)
462                 src_bytes = 4;
463         for (i = 0; i < src_bytes; i++)
464                 v |= ((uint32_t) src[i]) << ((3-i) * 8);
465         return v;
466 }
467
468 static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
469 {
470         int i;
471         if (dst_bytes > 4)
472                 dst_bytes = 4;
473         for (i = 0; i < dst_bytes; i++)
474                 dst[i] = src >> ((3-i) * 8);
475 }
476
477 static void
478 intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp);
479 static void
480 intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
481                                               bool force_disable_vdd);
482 static void
483 intel_dp_pps_init(struct intel_dp *intel_dp);
484
485 static void pps_lock(struct intel_dp *intel_dp)
486 {
487         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
488
489         /*
490          * See intel_power_sequencer_reset() why we need
491          * a power domain reference here.
492          */
493         intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
494
495         mutex_lock(&dev_priv->pps_mutex);
496 }
497
498 static void pps_unlock(struct intel_dp *intel_dp)
499 {
500         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
501
502         mutex_unlock(&dev_priv->pps_mutex);
503
504         intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
505 }
506
507 static void
508 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
509 {
510         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
511         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
512         enum pipe pipe = intel_dp->pps_pipe;
513         bool pll_enabled, release_cl_override = false;
514         enum dpio_phy phy = DPIO_PHY(pipe);
515         enum dpio_channel ch = vlv_pipe_to_channel(pipe);
516         uint32_t DP;
517
518         if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
519                  "skipping pipe %c power seqeuncer kick due to port %c being active\n",
520                  pipe_name(pipe), port_name(intel_dig_port->base.port)))
521                 return;
522
523         DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
524                       pipe_name(pipe), port_name(intel_dig_port->base.port));
525
526         /* Preserve the BIOS-computed detected bit. This is
527          * supposed to be read-only.
528          */
529         DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
530         DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
531         DP |= DP_PORT_WIDTH(1);
532         DP |= DP_LINK_TRAIN_PAT_1;
533
534         if (IS_CHERRYVIEW(dev_priv))
535                 DP |= DP_PIPE_SELECT_CHV(pipe);
536         else if (pipe == PIPE_B)
537                 DP |= DP_PIPEB_SELECT;
538
539         pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
540
541         /*
542          * The DPLL for the pipe must be enabled for this to work.
543          * So enable temporarily it if it's not already enabled.
544          */
545         if (!pll_enabled) {
546                 release_cl_override = IS_CHERRYVIEW(dev_priv) &&
547                         !chv_phy_powergate_ch(dev_priv, phy, ch, true);
548
549                 if (vlv_force_pll_on(dev_priv, pipe, IS_CHERRYVIEW(dev_priv) ?
550                                      &chv_dpll[0].dpll : &vlv_dpll[0].dpll)) {
551                         DRM_ERROR("Failed to force on pll for pipe %c!\n",
552                                   pipe_name(pipe));
553                         return;
554                 }
555         }
556
557         /*
558          * Similar magic as in intel_dp_enable_port().
559          * We _must_ do this port enable + disable trick
560          * to make this power seqeuencer lock onto the port.
561          * Otherwise even VDD force bit won't work.
562          */
563         I915_WRITE(intel_dp->output_reg, DP);
564         POSTING_READ(intel_dp->output_reg);
565
566         I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
567         POSTING_READ(intel_dp->output_reg);
568
569         I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
570         POSTING_READ(intel_dp->output_reg);
571
572         if (!pll_enabled) {
573                 vlv_force_pll_off(dev_priv, pipe);
574
575                 if (release_cl_override)
576                         chv_phy_powergate_ch(dev_priv, phy, ch, false);
577         }
578 }
579
580 static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
581 {
582         struct intel_encoder *encoder;
583         unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
584
585         /*
586          * We don't have power sequencer currently.
587          * Pick one that's not used by other ports.
588          */
589         for_each_intel_encoder(&dev_priv->drm, encoder) {
590                 struct intel_dp *intel_dp;
591
592                 if (encoder->type != INTEL_OUTPUT_DP &&
593                     encoder->type != INTEL_OUTPUT_EDP)
594                         continue;
595
596                 intel_dp = enc_to_intel_dp(&encoder->base);
597
598                 if (encoder->type == INTEL_OUTPUT_EDP) {
599                         WARN_ON(intel_dp->active_pipe != INVALID_PIPE &&
600                                 intel_dp->active_pipe != intel_dp->pps_pipe);
601
602                         if (intel_dp->pps_pipe != INVALID_PIPE)
603                                 pipes &= ~(1 << intel_dp->pps_pipe);
604                 } else {
605                         WARN_ON(intel_dp->pps_pipe != INVALID_PIPE);
606
607                         if (intel_dp->active_pipe != INVALID_PIPE)
608                                 pipes &= ~(1 << intel_dp->active_pipe);
609                 }
610         }
611
612         if (pipes == 0)
613                 return INVALID_PIPE;
614
615         return ffs(pipes) - 1;
616 }
617
618 static enum pipe
619 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
620 {
621         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
622         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
623         enum pipe pipe;
624
625         lockdep_assert_held(&dev_priv->pps_mutex);
626
627         /* We should never land here with regular DP ports */
628         WARN_ON(!intel_dp_is_edp(intel_dp));
629
630         WARN_ON(intel_dp->active_pipe != INVALID_PIPE &&
631                 intel_dp->active_pipe != intel_dp->pps_pipe);
632
633         if (intel_dp->pps_pipe != INVALID_PIPE)
634                 return intel_dp->pps_pipe;
635
636         pipe = vlv_find_free_pps(dev_priv);
637
638         /*
639          * Didn't find one. This should not happen since there
640          * are two power sequencers and up to two eDP ports.
641          */
642         if (WARN_ON(pipe == INVALID_PIPE))
643                 pipe = PIPE_A;
644
645         vlv_steal_power_sequencer(dev_priv, pipe);
646         intel_dp->pps_pipe = pipe;
647
648         DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
649                       pipe_name(intel_dp->pps_pipe),
650                       port_name(intel_dig_port->base.port));
651
652         /* init power sequencer on this pipe and port */
653         intel_dp_init_panel_power_sequencer(intel_dp);
654         intel_dp_init_panel_power_sequencer_registers(intel_dp, true);
655
656         /*
657          * Even vdd force doesn't work until we've made
658          * the power sequencer lock in on the port.
659          */
660         vlv_power_sequencer_kick(intel_dp);
661
662         return intel_dp->pps_pipe;
663 }
664
665 static int
666 bxt_power_sequencer_idx(struct intel_dp *intel_dp)
667 {
668         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
669         int backlight_controller = dev_priv->vbt.backlight.controller;
670
671         lockdep_assert_held(&dev_priv->pps_mutex);
672
673         /* We should never land here with regular DP ports */
674         WARN_ON(!intel_dp_is_edp(intel_dp));
675
676         if (!intel_dp->pps_reset)
677                 return backlight_controller;
678
679         intel_dp->pps_reset = false;
680
681         /*
682          * Only the HW needs to be reprogrammed, the SW state is fixed and
683          * has been setup during connector init.
684          */
685         intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
686
687         return backlight_controller;
688 }
689
690 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
691                                enum pipe pipe);
692
693 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
694                                enum pipe pipe)
695 {
696         return I915_READ(PP_STATUS(pipe)) & PP_ON;
697 }
698
699 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
700                                 enum pipe pipe)
701 {
702         return I915_READ(PP_CONTROL(pipe)) & EDP_FORCE_VDD;
703 }
704
705 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
706                          enum pipe pipe)
707 {
708         return true;
709 }
710
711 static enum pipe
712 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
713                      enum port port,
714                      vlv_pipe_check pipe_check)
715 {
716         enum pipe pipe;
717
718         for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
719                 u32 port_sel = I915_READ(PP_ON_DELAYS(pipe)) &
720                         PANEL_PORT_SELECT_MASK;
721
722                 if (port_sel != PANEL_PORT_SELECT_VLV(port))
723                         continue;
724
725                 if (!pipe_check(dev_priv, pipe))
726                         continue;
727
728                 return pipe;
729         }
730
731         return INVALID_PIPE;
732 }
733
734 static void
735 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
736 {
737         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
738         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
739         enum port port = intel_dig_port->base.port;
740
741         lockdep_assert_held(&dev_priv->pps_mutex);
742
743         /* try to find a pipe with this port selected */
744         /* first pick one where the panel is on */
745         intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
746                                                   vlv_pipe_has_pp_on);
747         /* didn't find one? pick one where vdd is on */
748         if (intel_dp->pps_pipe == INVALID_PIPE)
749                 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
750                                                           vlv_pipe_has_vdd_on);
751         /* didn't find one? pick one with just the correct port */
752         if (intel_dp->pps_pipe == INVALID_PIPE)
753                 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
754                                                           vlv_pipe_any);
755
756         /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
757         if (intel_dp->pps_pipe == INVALID_PIPE) {
758                 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
759                               port_name(port));
760                 return;
761         }
762
763         DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
764                       port_name(port), pipe_name(intel_dp->pps_pipe));
765
766         intel_dp_init_panel_power_sequencer(intel_dp);
767         intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
768 }
769
770 void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
771 {
772         struct intel_encoder *encoder;
773
774         if (WARN_ON(!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
775                     !IS_GEN9_LP(dev_priv)))
776                 return;
777
778         /*
779          * We can't grab pps_mutex here due to deadlock with power_domain
780          * mutex when power_domain functions are called while holding pps_mutex.
781          * That also means that in order to use pps_pipe the code needs to
782          * hold both a power domain reference and pps_mutex, and the power domain
783          * reference get/put must be done while _not_ holding pps_mutex.
784          * pps_{lock,unlock}() do these steps in the correct order, so one
785          * should use them always.
786          */
787
788         for_each_intel_encoder(&dev_priv->drm, encoder) {
789                 struct intel_dp *intel_dp;
790
791                 if (encoder->type != INTEL_OUTPUT_DP &&
792                     encoder->type != INTEL_OUTPUT_EDP &&
793                     encoder->type != INTEL_OUTPUT_DDI)
794                         continue;
795
796                 intel_dp = enc_to_intel_dp(&encoder->base);
797
798                 /* Skip pure DVI/HDMI DDI encoders */
799                 if (!i915_mmio_reg_valid(intel_dp->output_reg))
800                         continue;
801
802                 WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
803
804                 if (encoder->type != INTEL_OUTPUT_EDP)
805                         continue;
806
807                 if (IS_GEN9_LP(dev_priv))
808                         intel_dp->pps_reset = true;
809                 else
810                         intel_dp->pps_pipe = INVALID_PIPE;
811         }
812 }
813
814 struct pps_registers {
815         i915_reg_t pp_ctrl;
816         i915_reg_t pp_stat;
817         i915_reg_t pp_on;
818         i915_reg_t pp_off;
819         i915_reg_t pp_div;
820 };
821
822 static void intel_pps_get_registers(struct intel_dp *intel_dp,
823                                     struct pps_registers *regs)
824 {
825         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
826         int pps_idx = 0;
827
828         memset(regs, 0, sizeof(*regs));
829
830         if (IS_GEN9_LP(dev_priv))
831                 pps_idx = bxt_power_sequencer_idx(intel_dp);
832         else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
833                 pps_idx = vlv_power_sequencer_pipe(intel_dp);
834
835         regs->pp_ctrl = PP_CONTROL(pps_idx);
836         regs->pp_stat = PP_STATUS(pps_idx);
837         regs->pp_on = PP_ON_DELAYS(pps_idx);
838         regs->pp_off = PP_OFF_DELAYS(pps_idx);
839         if (!IS_GEN9_LP(dev_priv) && !HAS_PCH_CNP(dev_priv) &&
840             !HAS_PCH_ICP(dev_priv))
841                 regs->pp_div = PP_DIVISOR(pps_idx);
842 }
843
844 static i915_reg_t
845 _pp_ctrl_reg(struct intel_dp *intel_dp)
846 {
847         struct pps_registers regs;
848
849         intel_pps_get_registers(intel_dp, &regs);
850
851         return regs.pp_ctrl;
852 }
853
854 static i915_reg_t
855 _pp_stat_reg(struct intel_dp *intel_dp)
856 {
857         struct pps_registers regs;
858
859         intel_pps_get_registers(intel_dp, &regs);
860
861         return regs.pp_stat;
862 }
863
864 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
865    This function only applicable when panel PM state is not to be tracked */
866 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
867                               void *unused)
868 {
869         struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
870                                                  edp_notifier);
871         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
872
873         if (!intel_dp_is_edp(intel_dp) || code != SYS_RESTART)
874                 return 0;
875
876         pps_lock(intel_dp);
877
878         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
879                 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
880                 i915_reg_t pp_ctrl_reg, pp_div_reg;
881                 u32 pp_div;
882
883                 pp_ctrl_reg = PP_CONTROL(pipe);
884                 pp_div_reg  = PP_DIVISOR(pipe);
885                 pp_div = I915_READ(pp_div_reg);
886                 pp_div &= PP_REFERENCE_DIVIDER_MASK;
887
888                 /* 0x1F write to PP_DIV_REG sets max cycle delay */
889                 I915_WRITE(pp_div_reg, pp_div | 0x1F);
890                 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
891                 msleep(intel_dp->panel_power_cycle_delay);
892         }
893
894         pps_unlock(intel_dp);
895
896         return 0;
897 }
898
899 static bool edp_have_panel_power(struct intel_dp *intel_dp)
900 {
901         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
902
903         lockdep_assert_held(&dev_priv->pps_mutex);
904
905         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
906             intel_dp->pps_pipe == INVALID_PIPE)
907                 return false;
908
909         return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
910 }
911
912 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
913 {
914         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
915
916         lockdep_assert_held(&dev_priv->pps_mutex);
917
918         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
919             intel_dp->pps_pipe == INVALID_PIPE)
920                 return false;
921
922         return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
923 }
924
925 static void
926 intel_dp_check_edp(struct intel_dp *intel_dp)
927 {
928         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
929
930         if (!intel_dp_is_edp(intel_dp))
931                 return;
932
933         if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
934                 WARN(1, "eDP powered off while attempting aux channel communication.\n");
935                 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
936                               I915_READ(_pp_stat_reg(intel_dp)),
937                               I915_READ(_pp_ctrl_reg(intel_dp)));
938         }
939 }
940
941 static uint32_t
942 intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
943 {
944         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
945         i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
946         uint32_t status;
947         bool done;
948
949 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
950         if (has_aux_irq)
951                 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
952                                           msecs_to_jiffies_timeout(10));
953         else
954                 done = wait_for(C, 10) == 0;
955         if (!done)
956                 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
957                           has_aux_irq);
958 #undef C
959
960         return status;
961 }
962
963 static uint32_t g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
964 {
965         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
966
967         if (index)
968                 return 0;
969
970         /*
971          * The clock divider is based off the hrawclk, and would like to run at
972          * 2MHz.  So, take the hrawclk value and divide by 2000 and use that
973          */
974         return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
975 }
976
977 static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
978 {
979         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
980
981         if (index)
982                 return 0;
983
984         /*
985          * The clock divider is based off the cdclk or PCH rawclk, and would
986          * like to run at 2MHz.  So, take the cdclk or PCH rawclk value and
987          * divide by 2000 and use that
988          */
989         if (intel_dp->aux_ch == AUX_CH_A)
990                 return DIV_ROUND_CLOSEST(dev_priv->cdclk.hw.cdclk, 2000);
991         else
992                 return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
993 }
994
995 static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
996 {
997         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
998
999         if (intel_dp->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) {
1000                 /* Workaround for non-ULT HSW */
1001                 switch (index) {
1002                 case 0: return 63;
1003                 case 1: return 72;
1004                 default: return 0;
1005                 }
1006         }
1007
1008         return ilk_get_aux_clock_divider(intel_dp, index);
1009 }
1010
1011 static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
1012 {
1013         /*
1014          * SKL doesn't need us to program the AUX clock divider (Hardware will
1015          * derive the clock from CDCLK automatically). We still implement the
1016          * get_aux_clock_divider vfunc to plug-in into the existing code.
1017          */
1018         return index ? 0 : 1;
1019 }
1020
1021 static uint32_t g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
1022                                      bool has_aux_irq,
1023                                      int send_bytes,
1024                                      uint32_t aux_clock_divider)
1025 {
1026         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1027         struct drm_i915_private *dev_priv =
1028                         to_i915(intel_dig_port->base.base.dev);
1029         uint32_t precharge, timeout;
1030
1031         if (IS_GEN6(dev_priv))
1032                 precharge = 3;
1033         else
1034                 precharge = 5;
1035
1036         if (IS_BROADWELL(dev_priv))
1037                 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
1038         else
1039                 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
1040
1041         return DP_AUX_CH_CTL_SEND_BUSY |
1042                DP_AUX_CH_CTL_DONE |
1043                (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
1044                DP_AUX_CH_CTL_TIME_OUT_ERROR |
1045                timeout |
1046                DP_AUX_CH_CTL_RECEIVE_ERROR |
1047                (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
1048                (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
1049                (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
1050 }
1051
1052 static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
1053                                       bool has_aux_irq,
1054                                       int send_bytes,
1055                                       uint32_t unused)
1056 {
1057         return DP_AUX_CH_CTL_SEND_BUSY |
1058                DP_AUX_CH_CTL_DONE |
1059                (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
1060                DP_AUX_CH_CTL_TIME_OUT_ERROR |
1061                DP_AUX_CH_CTL_TIME_OUT_MAX |
1062                DP_AUX_CH_CTL_RECEIVE_ERROR |
1063                (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
1064                DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
1065                DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
1066 }
1067
1068 static uint32_t intel_dp_get_aux_send_ctl(struct intel_dp *intel_dp,
1069                                           bool has_aux_irq,
1070                                           int send_bytes,
1071                                           uint32_t aux_clock_divider,
1072                                           bool aksv_write)
1073 {
1074         uint32_t val = 0;
1075
1076         if (aksv_write) {
1077                 send_bytes += 5;
1078                 val |= DP_AUX_CH_CTL_AUX_AKSV_SELECT;
1079         }
1080
1081         return val | intel_dp->get_aux_send_ctl(intel_dp,
1082                                                 has_aux_irq,
1083                                                 send_bytes,
1084                                                 aux_clock_divider);
1085 }
1086
1087 static int
1088 intel_dp_aux_ch(struct intel_dp *intel_dp,
1089                 const uint8_t *send, int send_bytes,
1090                 uint8_t *recv, int recv_size, bool aksv_write)
1091 {
1092         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1093         struct drm_i915_private *dev_priv =
1094                         to_i915(intel_dig_port->base.base.dev);
1095         i915_reg_t ch_ctl, ch_data[5];
1096         uint32_t aux_clock_divider;
1097         int i, ret, recv_bytes;
1098         uint32_t status;
1099         int try, clock = 0;
1100         bool has_aux_irq = HAS_AUX_IRQ(dev_priv);
1101         bool vdd;
1102
1103         ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
1104         for (i = 0; i < ARRAY_SIZE(ch_data); i++)
1105                 ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i);
1106
1107         pps_lock(intel_dp);
1108
1109         /*
1110          * We will be called with VDD already enabled for dpcd/edid/oui reads.
1111          * In such cases we want to leave VDD enabled and it's up to upper layers
1112          * to turn it off. But for eg. i2c-dev access we need to turn it on/off
1113          * ourselves.
1114          */
1115         vdd = edp_panel_vdd_on(intel_dp);
1116
1117         /* dp aux is extremely sensitive to irq latency, hence request the
1118          * lowest possible wakeup latency and so prevent the cpu from going into
1119          * deep sleep states.
1120          */
1121         pm_qos_update_request(&dev_priv->pm_qos, 0);
1122
1123         intel_dp_check_edp(intel_dp);
1124
1125         /* Try to wait for any previous AUX channel activity */
1126         for (try = 0; try < 3; try++) {
1127                 status = I915_READ_NOTRACE(ch_ctl);
1128                 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
1129                         break;
1130                 msleep(1);
1131         }
1132
1133         if (try == 3) {
1134                 static u32 last_status = -1;
1135                 const u32 status = I915_READ(ch_ctl);
1136
1137                 if (status != last_status) {
1138                         WARN(1, "dp_aux_ch not started status 0x%08x\n",
1139                              status);
1140                         last_status = status;
1141                 }
1142
1143                 ret = -EBUSY;
1144                 goto out;
1145         }
1146
1147         /* Only 5 data registers! */
1148         if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
1149                 ret = -E2BIG;
1150                 goto out;
1151         }
1152
1153         while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
1154                 u32 send_ctl = intel_dp_get_aux_send_ctl(intel_dp,
1155                                                          has_aux_irq,
1156                                                          send_bytes,
1157                                                          aux_clock_divider,
1158                                                          aksv_write);
1159
1160                 /* Must try at least 3 times according to DP spec */
1161                 for (try = 0; try < 5; try++) {
1162                         /* Load the send data into the aux channel data registers */
1163                         for (i = 0; i < send_bytes; i += 4)
1164                                 I915_WRITE(ch_data[i >> 2],
1165                                            intel_dp_pack_aux(send + i,
1166                                                              send_bytes - i));
1167
1168                         /* Send the command and wait for it to complete */
1169                         I915_WRITE(ch_ctl, send_ctl);
1170
1171                         status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
1172
1173                         /* Clear done status and any errors */
1174                         I915_WRITE(ch_ctl,
1175                                    status |
1176                                    DP_AUX_CH_CTL_DONE |
1177                                    DP_AUX_CH_CTL_TIME_OUT_ERROR |
1178                                    DP_AUX_CH_CTL_RECEIVE_ERROR);
1179
1180                         /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
1181                          *   400us delay required for errors and timeouts
1182                          *   Timeout errors from the HW already meet this
1183                          *   requirement so skip to next iteration
1184                          */
1185                         if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
1186                                 continue;
1187
1188                         if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1189                                 usleep_range(400, 500);
1190                                 continue;
1191                         }
1192                         if (status & DP_AUX_CH_CTL_DONE)
1193                                 goto done;
1194                 }
1195         }
1196
1197         if ((status & DP_AUX_CH_CTL_DONE) == 0) {
1198                 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
1199                 ret = -EBUSY;
1200                 goto out;
1201         }
1202
1203 done:
1204         /* Check for timeout or receive error.
1205          * Timeouts occur when the sink is not connected
1206          */
1207         if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1208                 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
1209                 ret = -EIO;
1210                 goto out;
1211         }
1212
1213         /* Timeouts occur when the device isn't connected, so they're
1214          * "normal" -- don't fill the kernel log with these */
1215         if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
1216                 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
1217                 ret = -ETIMEDOUT;
1218                 goto out;
1219         }
1220
1221         /* Unload any bytes sent back from the other side */
1222         recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
1223                       DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
1224
1225         /*
1226          * By BSpec: "Message sizes of 0 or >20 are not allowed."
1227          * We have no idea of what happened so we return -EBUSY so
1228          * drm layer takes care for the necessary retries.
1229          */
1230         if (recv_bytes == 0 || recv_bytes > 20) {
1231                 DRM_DEBUG_KMS("Forbidden recv_bytes = %d on aux transaction\n",
1232                               recv_bytes);
1233                 ret = -EBUSY;
1234                 goto out;
1235         }
1236
1237         if (recv_bytes > recv_size)
1238                 recv_bytes = recv_size;
1239
1240         for (i = 0; i < recv_bytes; i += 4)
1241                 intel_dp_unpack_aux(I915_READ(ch_data[i >> 2]),
1242                                     recv + i, recv_bytes - i);
1243
1244         ret = recv_bytes;
1245 out:
1246         pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
1247
1248         if (vdd)
1249                 edp_panel_vdd_off(intel_dp, false);
1250
1251         pps_unlock(intel_dp);
1252
1253         return ret;
1254 }
1255
1256 #define BARE_ADDRESS_SIZE       3
1257 #define HEADER_SIZE             (BARE_ADDRESS_SIZE + 1)
1258 static ssize_t
1259 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
1260 {
1261         struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
1262         uint8_t txbuf[20], rxbuf[20];
1263         size_t txsize, rxsize;
1264         int ret;
1265
1266         txbuf[0] = (msg->request << 4) |
1267                 ((msg->address >> 16) & 0xf);
1268         txbuf[1] = (msg->address >> 8) & 0xff;
1269         txbuf[2] = msg->address & 0xff;
1270         txbuf[3] = msg->size - 1;
1271
1272         switch (msg->request & ~DP_AUX_I2C_MOT) {
1273         case DP_AUX_NATIVE_WRITE:
1274         case DP_AUX_I2C_WRITE:
1275         case DP_AUX_I2C_WRITE_STATUS_UPDATE:
1276                 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
1277                 rxsize = 2; /* 0 or 1 data bytes */
1278
1279                 if (WARN_ON(txsize > 20))
1280                         return -E2BIG;
1281
1282                 WARN_ON(!msg->buffer != !msg->size);
1283
1284                 if (msg->buffer)
1285                         memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
1286
1287                 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize,
1288                                       false);
1289                 if (ret > 0) {
1290                         msg->reply = rxbuf[0] >> 4;
1291
1292                         if (ret > 1) {
1293                                 /* Number of bytes written in a short write. */
1294                                 ret = clamp_t(int, rxbuf[1], 0, msg->size);
1295                         } else {
1296                                 /* Return payload size. */
1297                                 ret = msg->size;
1298                         }
1299                 }
1300                 break;
1301
1302         case DP_AUX_NATIVE_READ:
1303         case DP_AUX_I2C_READ:
1304                 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
1305                 rxsize = msg->size + 1;
1306
1307                 if (WARN_ON(rxsize > 20))
1308                         return -E2BIG;
1309
1310                 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize,
1311                                       false);
1312                 if (ret > 0) {
1313                         msg->reply = rxbuf[0] >> 4;
1314                         /*
1315                          * Assume happy day, and copy the data. The caller is
1316                          * expected to check msg->reply before touching it.
1317                          *
1318                          * Return payload size.
1319                          */
1320                         ret--;
1321                         memcpy(msg->buffer, rxbuf + 1, ret);
1322                 }
1323                 break;
1324
1325         default:
1326                 ret = -EINVAL;
1327                 break;
1328         }
1329
1330         return ret;
1331 }
1332
1333 static enum aux_ch intel_aux_ch(struct intel_dp *intel_dp)
1334 {
1335         struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
1336         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1337         enum port port = encoder->port;
1338         const struct ddi_vbt_port_info *info =
1339                 &dev_priv->vbt.ddi_port_info[port];
1340         enum aux_ch aux_ch;
1341
1342         if (!info->alternate_aux_channel) {
1343                 aux_ch = (enum aux_ch) port;
1344
1345                 DRM_DEBUG_KMS("using AUX %c for port %c (platform default)\n",
1346                               aux_ch_name(aux_ch), port_name(port));
1347                 return aux_ch;
1348         }
1349
1350         switch (info->alternate_aux_channel) {
1351         case DP_AUX_A:
1352                 aux_ch = AUX_CH_A;
1353                 break;
1354         case DP_AUX_B:
1355                 aux_ch = AUX_CH_B;
1356                 break;
1357         case DP_AUX_C:
1358                 aux_ch = AUX_CH_C;
1359                 break;
1360         case DP_AUX_D:
1361                 aux_ch = AUX_CH_D;
1362                 break;
1363         case DP_AUX_F:
1364                 aux_ch = AUX_CH_F;
1365                 break;
1366         default:
1367                 MISSING_CASE(info->alternate_aux_channel);
1368                 aux_ch = AUX_CH_A;
1369                 break;
1370         }
1371
1372         DRM_DEBUG_KMS("using AUX %c for port %c (VBT)\n",
1373                       aux_ch_name(aux_ch), port_name(port));
1374
1375         return aux_ch;
1376 }
1377
1378 static enum intel_display_power_domain
1379 intel_aux_power_domain(struct intel_dp *intel_dp)
1380 {
1381         switch (intel_dp->aux_ch) {
1382         case AUX_CH_A:
1383                 return POWER_DOMAIN_AUX_A;
1384         case AUX_CH_B:
1385                 return POWER_DOMAIN_AUX_B;
1386         case AUX_CH_C:
1387                 return POWER_DOMAIN_AUX_C;
1388         case AUX_CH_D:
1389                 return POWER_DOMAIN_AUX_D;
1390         case AUX_CH_F:
1391                 return POWER_DOMAIN_AUX_F;
1392         default:
1393                 MISSING_CASE(intel_dp->aux_ch);
1394                 return POWER_DOMAIN_AUX_A;
1395         }
1396 }
1397
1398 static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp)
1399 {
1400         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
1401         enum aux_ch aux_ch = intel_dp->aux_ch;
1402
1403         switch (aux_ch) {
1404         case AUX_CH_B:
1405         case AUX_CH_C:
1406         case AUX_CH_D:
1407                 return DP_AUX_CH_CTL(aux_ch);
1408         default:
1409                 MISSING_CASE(aux_ch);
1410                 return DP_AUX_CH_CTL(AUX_CH_B);
1411         }
1412 }
1413
1414 static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index)
1415 {
1416         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
1417         enum aux_ch aux_ch = intel_dp->aux_ch;
1418
1419         switch (aux_ch) {
1420         case AUX_CH_B:
1421         case AUX_CH_C:
1422         case AUX_CH_D:
1423                 return DP_AUX_CH_DATA(aux_ch, index);
1424         default:
1425                 MISSING_CASE(aux_ch);
1426                 return DP_AUX_CH_DATA(AUX_CH_B, index);
1427         }
1428 }
1429
1430 static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp)
1431 {
1432         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
1433         enum aux_ch aux_ch = intel_dp->aux_ch;
1434
1435         switch (aux_ch) {
1436         case AUX_CH_A:
1437                 return DP_AUX_CH_CTL(aux_ch);
1438         case AUX_CH_B:
1439         case AUX_CH_C:
1440         case AUX_CH_D:
1441                 return PCH_DP_AUX_CH_CTL(aux_ch);
1442         default:
1443                 MISSING_CASE(aux_ch);
1444                 return DP_AUX_CH_CTL(AUX_CH_A);
1445         }
1446 }
1447
1448 static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index)
1449 {
1450         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
1451         enum aux_ch aux_ch = intel_dp->aux_ch;
1452
1453         switch (aux_ch) {
1454         case AUX_CH_A:
1455                 return DP_AUX_CH_DATA(aux_ch, index);
1456         case AUX_CH_B:
1457         case AUX_CH_C:
1458         case AUX_CH_D:
1459                 return PCH_DP_AUX_CH_DATA(aux_ch, index);
1460         default:
1461                 MISSING_CASE(aux_ch);
1462                 return DP_AUX_CH_DATA(AUX_CH_A, index);
1463         }
1464 }
1465
1466 static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
1467 {
1468         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
1469         enum aux_ch aux_ch = intel_dp->aux_ch;
1470
1471         switch (aux_ch) {
1472         case AUX_CH_A:
1473         case AUX_CH_B:
1474         case AUX_CH_C:
1475         case AUX_CH_D:
1476         case AUX_CH_F:
1477                 return DP_AUX_CH_CTL(aux_ch);
1478         default:
1479                 MISSING_CASE(aux_ch);
1480                 return DP_AUX_CH_CTL(AUX_CH_A);
1481         }
1482 }
1483
1484 static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index)
1485 {
1486         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
1487         enum aux_ch aux_ch = intel_dp->aux_ch;
1488
1489         switch (aux_ch) {
1490         case AUX_CH_A:
1491         case AUX_CH_B:
1492         case AUX_CH_C:
1493         case AUX_CH_D:
1494         case AUX_CH_F:
1495                 return DP_AUX_CH_DATA(aux_ch, index);
1496         default:
1497                 MISSING_CASE(aux_ch);
1498                 return DP_AUX_CH_DATA(AUX_CH_A, index);
1499         }
1500 }
1501
1502 static void
1503 intel_dp_aux_fini(struct intel_dp *intel_dp)
1504 {
1505         kfree(intel_dp->aux.name);
1506 }
1507
1508 static void
1509 intel_dp_aux_init(struct intel_dp *intel_dp)
1510 {
1511         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
1512         struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
1513
1514         intel_dp->aux_ch = intel_aux_ch(intel_dp);
1515         intel_dp->aux_power_domain = intel_aux_power_domain(intel_dp);
1516
1517         if (INTEL_GEN(dev_priv) >= 9) {
1518                 intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg;
1519                 intel_dp->aux_ch_data_reg = skl_aux_data_reg;
1520         } else if (HAS_PCH_SPLIT(dev_priv)) {
1521                 intel_dp->aux_ch_ctl_reg = ilk_aux_ctl_reg;
1522                 intel_dp->aux_ch_data_reg = ilk_aux_data_reg;
1523         } else {
1524                 intel_dp->aux_ch_ctl_reg = g4x_aux_ctl_reg;
1525                 intel_dp->aux_ch_data_reg = g4x_aux_data_reg;
1526         }
1527
1528         if (INTEL_GEN(dev_priv) >= 9)
1529                 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
1530         else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
1531                 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
1532         else if (HAS_PCH_SPLIT(dev_priv))
1533                 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
1534         else
1535                 intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
1536
1537         if (INTEL_GEN(dev_priv) >= 9)
1538                 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
1539         else
1540                 intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
1541
1542         drm_dp_aux_init(&intel_dp->aux);
1543
1544         /* Failure to allocate our preferred name is not critical */
1545         intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c",
1546                                        port_name(encoder->port));
1547         intel_dp->aux.transfer = intel_dp_aux_transfer;
1548 }
1549
1550 bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
1551 {
1552         int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1];
1553
1554         return max_rate >= 540000;
1555 }
1556
1557 static void
1558 intel_dp_set_clock(struct intel_encoder *encoder,
1559                    struct intel_crtc_state *pipe_config)
1560 {
1561         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1562         const struct dp_link_dpll *divisor = NULL;
1563         int i, count = 0;
1564
1565         if (IS_G4X(dev_priv)) {
1566                 divisor = gen4_dpll;
1567                 count = ARRAY_SIZE(gen4_dpll);
1568         } else if (HAS_PCH_SPLIT(dev_priv)) {
1569                 divisor = pch_dpll;
1570                 count = ARRAY_SIZE(pch_dpll);
1571         } else if (IS_CHERRYVIEW(dev_priv)) {
1572                 divisor = chv_dpll;
1573                 count = ARRAY_SIZE(chv_dpll);
1574         } else if (IS_VALLEYVIEW(dev_priv)) {
1575                 divisor = vlv_dpll;
1576                 count = ARRAY_SIZE(vlv_dpll);
1577         }
1578
1579         if (divisor && count) {
1580                 for (i = 0; i < count; i++) {
1581                         if (pipe_config->port_clock == divisor[i].clock) {
1582                                 pipe_config->dpll = divisor[i].dpll;
1583                                 pipe_config->clock_set = true;
1584                                 break;
1585                         }
1586                 }
1587         }
1588 }
1589
1590 static void snprintf_int_array(char *str, size_t len,
1591                                const int *array, int nelem)
1592 {
1593         int i;
1594
1595         str[0] = '\0';
1596
1597         for (i = 0; i < nelem; i++) {
1598                 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1599                 if (r >= len)
1600                         return;
1601                 str += r;
1602                 len -= r;
1603         }
1604 }
1605
1606 static void intel_dp_print_rates(struct intel_dp *intel_dp)
1607 {
1608         char str[128]; /* FIXME: too big for stack? */
1609
1610         if ((drm_debug & DRM_UT_KMS) == 0)
1611                 return;
1612
1613         snprintf_int_array(str, sizeof(str),
1614                            intel_dp->source_rates, intel_dp->num_source_rates);
1615         DRM_DEBUG_KMS("source rates: %s\n", str);
1616
1617         snprintf_int_array(str, sizeof(str),
1618                            intel_dp->sink_rates, intel_dp->num_sink_rates);
1619         DRM_DEBUG_KMS("sink rates: %s\n", str);
1620
1621         snprintf_int_array(str, sizeof(str),
1622                            intel_dp->common_rates, intel_dp->num_common_rates);
1623         DRM_DEBUG_KMS("common rates: %s\n", str);
1624 }
1625
1626 int
1627 intel_dp_max_link_rate(struct intel_dp *intel_dp)
1628 {
1629         int len;
1630
1631         len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->max_link_rate);
1632         if (WARN_ON(len <= 0))
1633                 return 162000;
1634
1635         return intel_dp->common_rates[len - 1];
1636 }
1637
1638 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1639 {
1640         int i = intel_dp_rate_index(intel_dp->sink_rates,
1641                                     intel_dp->num_sink_rates, rate);
1642
1643         if (WARN_ON(i < 0))
1644                 i = 0;
1645
1646         return i;
1647 }
1648
1649 void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1650                            uint8_t *link_bw, uint8_t *rate_select)
1651 {
1652         /* eDP 1.4 rate select method. */
1653         if (intel_dp->use_rate_select) {
1654                 *link_bw = 0;
1655                 *rate_select =
1656                         intel_dp_rate_select(intel_dp, port_clock);
1657         } else {
1658                 *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1659                 *rate_select = 0;
1660         }
1661 }
1662
1663 static int intel_dp_compute_bpp(struct intel_dp *intel_dp,
1664                                 struct intel_crtc_state *pipe_config)
1665 {
1666         int bpp, bpc;
1667
1668         bpp = pipe_config->pipe_bpp;
1669         bpc = drm_dp_downstream_max_bpc(intel_dp->dpcd, intel_dp->downstream_ports);
1670
1671         if (bpc > 0)
1672                 bpp = min(bpp, 3*bpc);
1673
1674         /* For DP Compliance we override the computed bpp for the pipe */
1675         if (intel_dp->compliance.test_data.bpc != 0) {
1676                 pipe_config->pipe_bpp = 3*intel_dp->compliance.test_data.bpc;
1677                 pipe_config->dither_force_disable = pipe_config->pipe_bpp == 6*3;
1678                 DRM_DEBUG_KMS("Setting pipe_bpp to %d\n",
1679                               pipe_config->pipe_bpp);
1680         }
1681         return bpp;
1682 }
1683
1684 static bool intel_edp_compare_alt_mode(struct drm_display_mode *m1,
1685                                        struct drm_display_mode *m2)
1686 {
1687         bool bres = false;
1688
1689         if (m1 && m2)
1690                 bres = (m1->hdisplay == m2->hdisplay &&
1691                         m1->hsync_start == m2->hsync_start &&
1692                         m1->hsync_end == m2->hsync_end &&
1693                         m1->htotal == m2->htotal &&
1694                         m1->vdisplay == m2->vdisplay &&
1695                         m1->vsync_start == m2->vsync_start &&
1696                         m1->vsync_end == m2->vsync_end &&
1697                         m1->vtotal == m2->vtotal);
1698         return bres;
1699 }
1700
1701 bool
1702 intel_dp_compute_config(struct intel_encoder *encoder,
1703                         struct intel_crtc_state *pipe_config,
1704                         struct drm_connector_state *conn_state)
1705 {
1706         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1707         struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1708         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1709         enum port port = encoder->port;
1710         struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
1711         struct intel_connector *intel_connector = intel_dp->attached_connector;
1712         struct intel_digital_connector_state *intel_conn_state =
1713                 to_intel_digital_connector_state(conn_state);
1714         int lane_count, clock;
1715         int min_lane_count = 1;
1716         int max_lane_count = intel_dp_max_lane_count(intel_dp);
1717         /* Conveniently, the link BW constants become indices with a shift...*/
1718         int min_clock = 0;
1719         int max_clock;
1720         int bpp, mode_rate;
1721         int link_avail, link_clock;
1722         int common_len;
1723         uint8_t link_bw, rate_select;
1724         bool reduce_m_n = drm_dp_has_quirk(&intel_dp->desc,
1725                                            DP_DPCD_QUIRK_LIMITED_M_N);
1726
1727         common_len = intel_dp_common_len_rate_limit(intel_dp,
1728                                                     intel_dp->max_link_rate);
1729
1730         /* No common link rates between source and sink */
1731         WARN_ON(common_len <= 0);
1732
1733         max_clock = common_len - 1;
1734
1735         if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A)
1736                 pipe_config->has_pch_encoder = true;
1737
1738         pipe_config->has_drrs = false;
1739         if (IS_G4X(dev_priv) || port == PORT_A)
1740                 pipe_config->has_audio = false;
1741         else if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
1742                 pipe_config->has_audio = intel_dp->has_audio;
1743         else
1744                 pipe_config->has_audio = intel_conn_state->force_audio == HDMI_AUDIO_ON;
1745
1746         if (intel_dp_is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1747                 struct drm_display_mode *panel_mode =
1748                         intel_connector->panel.alt_fixed_mode;
1749                 struct drm_display_mode *req_mode = &pipe_config->base.mode;
1750
1751                 if (!intel_edp_compare_alt_mode(req_mode, panel_mode))
1752                         panel_mode = intel_connector->panel.fixed_mode;
1753
1754                 drm_mode_debug_printmodeline(panel_mode);
1755
1756                 intel_fixed_panel_mode(panel_mode, adjusted_mode);
1757
1758                 if (INTEL_GEN(dev_priv) >= 9) {
1759                         int ret;
1760                         ret = skl_update_scaler_crtc(pipe_config);
1761                         if (ret)
1762                                 return ret;
1763                 }
1764
1765                 if (HAS_GMCH_DISPLAY(dev_priv))
1766                         intel_gmch_panel_fitting(intel_crtc, pipe_config,
1767                                                  conn_state->scaling_mode);
1768                 else
1769                         intel_pch_panel_fitting(intel_crtc, pipe_config,
1770                                                 conn_state->scaling_mode);
1771         }
1772
1773         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
1774             adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
1775                 return false;
1776
1777         if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1778                 return false;
1779
1780         /* Use values requested by Compliance Test Request */
1781         if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
1782                 int index;
1783
1784                 /* Validate the compliance test data since max values
1785                  * might have changed due to link train fallback.
1786                  */
1787                 if (intel_dp_link_params_valid(intel_dp, intel_dp->compliance.test_link_rate,
1788                                                intel_dp->compliance.test_lane_count)) {
1789                         index = intel_dp_rate_index(intel_dp->common_rates,
1790                                                     intel_dp->num_common_rates,
1791                                                     intel_dp->compliance.test_link_rate);
1792                         if (index >= 0)
1793                                 min_clock = max_clock = index;
1794                         min_lane_count = max_lane_count = intel_dp->compliance.test_lane_count;
1795                 }
1796         }
1797         DRM_DEBUG_KMS("DP link computation with max lane count %i "
1798                       "max bw %d pixel clock %iKHz\n",
1799                       max_lane_count, intel_dp->common_rates[max_clock],
1800                       adjusted_mode->crtc_clock);
1801
1802         /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1803          * bpc in between. */
1804         bpp = intel_dp_compute_bpp(intel_dp, pipe_config);
1805         if (intel_dp_is_edp(intel_dp)) {
1806
1807                 /* Get bpp from vbt only for panels that dont have bpp in edid */
1808                 if (intel_connector->base.display_info.bpc == 0 &&
1809                         (dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp)) {
1810                         DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1811                                       dev_priv->vbt.edp.bpp);
1812                         bpp = dev_priv->vbt.edp.bpp;
1813                 }
1814
1815                 /*
1816                  * Use the maximum clock and number of lanes the eDP panel
1817                  * advertizes being capable of. The panels are generally
1818                  * designed to support only a single clock and lane
1819                  * configuration, and typically these values correspond to the
1820                  * native resolution of the panel.
1821                  */
1822                 min_lane_count = max_lane_count;
1823                 min_clock = max_clock;
1824         }
1825
1826         for (; bpp >= 6*3; bpp -= 2*3) {
1827                 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1828                                                    bpp);
1829
1830                 for (clock = min_clock; clock <= max_clock; clock++) {
1831                         for (lane_count = min_lane_count;
1832                                 lane_count <= max_lane_count;
1833                                 lane_count <<= 1) {
1834
1835                                 link_clock = intel_dp->common_rates[clock];
1836                                 link_avail = intel_dp_max_data_rate(link_clock,
1837                                                                     lane_count);
1838
1839                                 if (mode_rate <= link_avail) {
1840                                         goto found;
1841                                 }
1842                         }
1843                 }
1844         }
1845
1846         return false;
1847
1848 found:
1849         if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
1850                 /*
1851                  * See:
1852                  * CEA-861-E - 5.1 Default Encoding Parameters
1853                  * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1854                  */
1855                 pipe_config->limited_color_range =
1856                         bpp != 18 &&
1857                         drm_default_rgb_quant_range(adjusted_mode) ==
1858                         HDMI_QUANTIZATION_RANGE_LIMITED;
1859         } else {
1860                 pipe_config->limited_color_range =
1861                         intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_LIMITED;
1862         }
1863
1864         pipe_config->lane_count = lane_count;
1865
1866         pipe_config->pipe_bpp = bpp;
1867         pipe_config->port_clock = intel_dp->common_rates[clock];
1868
1869         intel_dp_compute_rate(intel_dp, pipe_config->port_clock,
1870                               &link_bw, &rate_select);
1871
1872         DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n",
1873                       link_bw, rate_select, pipe_config->lane_count,
1874                       pipe_config->port_clock, bpp);
1875         DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1876                       mode_rate, link_avail);
1877
1878         intel_link_compute_m_n(bpp, lane_count,
1879                                adjusted_mode->crtc_clock,
1880                                pipe_config->port_clock,
1881                                &pipe_config->dp_m_n,
1882                                reduce_m_n);
1883
1884         if (intel_connector->panel.downclock_mode != NULL &&
1885                 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
1886                         pipe_config->has_drrs = true;
1887                         intel_link_compute_m_n(bpp, lane_count,
1888                                 intel_connector->panel.downclock_mode->clock,
1889                                 pipe_config->port_clock,
1890                                 &pipe_config->dp_m2_n2,
1891                                 reduce_m_n);
1892         }
1893
1894         /*
1895          * DPLL0 VCO may need to be adjusted to get the correct
1896          * clock for eDP. This will affect cdclk as well.
1897          */
1898         if (intel_dp_is_edp(intel_dp) && IS_GEN9_BC(dev_priv)) {
1899                 int vco;
1900
1901                 switch (pipe_config->port_clock / 2) {
1902                 case 108000:
1903                 case 216000:
1904                         vco = 8640000;
1905                         break;
1906                 default:
1907                         vco = 8100000;
1908                         break;
1909                 }
1910
1911                 to_intel_atomic_state(pipe_config->base.state)->cdclk.logical.vco = vco;
1912         }
1913
1914         if (!HAS_DDI(dev_priv))
1915                 intel_dp_set_clock(encoder, pipe_config);
1916
1917         intel_psr_compute_config(intel_dp, pipe_config);
1918
1919         return true;
1920 }
1921
1922 void intel_dp_set_link_params(struct intel_dp *intel_dp,
1923                               int link_rate, uint8_t lane_count,
1924                               bool link_mst)
1925 {
1926         intel_dp->link_rate = link_rate;
1927         intel_dp->lane_count = lane_count;
1928         intel_dp->link_mst = link_mst;
1929 }
1930
1931 static void intel_dp_prepare(struct intel_encoder *encoder,
1932                              const struct intel_crtc_state *pipe_config)
1933 {
1934         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1935         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1936         enum port port = encoder->port;
1937         struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
1938         const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1939
1940         intel_dp_set_link_params(intel_dp, pipe_config->port_clock,
1941                                  pipe_config->lane_count,
1942                                  intel_crtc_has_type(pipe_config,
1943                                                      INTEL_OUTPUT_DP_MST));
1944
1945         /*
1946          * There are four kinds of DP registers:
1947          *
1948          *      IBX PCH
1949          *      SNB CPU
1950          *      IVB CPU
1951          *      CPT PCH
1952          *
1953          * IBX PCH and CPU are the same for almost everything,
1954          * except that the CPU DP PLL is configured in this
1955          * register
1956          *
1957          * CPT PCH is quite different, having many bits moved
1958          * to the TRANS_DP_CTL register instead. That
1959          * configuration happens (oddly) in ironlake_pch_enable
1960          */
1961
1962         /* Preserve the BIOS-computed detected bit. This is
1963          * supposed to be read-only.
1964          */
1965         intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
1966
1967         /* Handle DP bits in common between all three register formats */
1968         intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1969         intel_dp->DP |= DP_PORT_WIDTH(pipe_config->lane_count);
1970
1971         /* Split out the IBX/CPU vs CPT settings */
1972
1973         if (IS_GEN7(dev_priv) && port == PORT_A) {
1974                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1975                         intel_dp->DP |= DP_SYNC_HS_HIGH;
1976                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1977                         intel_dp->DP |= DP_SYNC_VS_HIGH;
1978                 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1979
1980                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1981                         intel_dp->DP |= DP_ENHANCED_FRAMING;
1982
1983                 intel_dp->DP |= crtc->pipe << 29;
1984         } else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
1985                 u32 trans_dp;
1986
1987                 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1988
1989                 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1990                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1991                         trans_dp |= TRANS_DP_ENH_FRAMING;
1992                 else
1993                         trans_dp &= ~TRANS_DP_ENH_FRAMING;
1994                 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
1995         } else {
1996                 if (IS_G4X(dev_priv) && pipe_config->limited_color_range)
1997                         intel_dp->DP |= DP_COLOR_RANGE_16_235;
1998
1999                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
2000                         intel_dp->DP |= DP_SYNC_HS_HIGH;
2001                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
2002                         intel_dp->DP |= DP_SYNC_VS_HIGH;
2003                 intel_dp->DP |= DP_LINK_TRAIN_OFF;
2004
2005                 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2006                         intel_dp->DP |= DP_ENHANCED_FRAMING;
2007
2008                 if (IS_CHERRYVIEW(dev_priv))
2009                         intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
2010                 else if (crtc->pipe == PIPE_B)
2011                         intel_dp->DP |= DP_PIPEB_SELECT;
2012         }
2013 }
2014
2015 #define IDLE_ON_MASK            (PP_ON | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
2016 #define IDLE_ON_VALUE           (PP_ON | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
2017
2018 #define IDLE_OFF_MASK           (PP_ON | PP_SEQUENCE_MASK | 0                     | 0)
2019 #define IDLE_OFF_VALUE          (0     | PP_SEQUENCE_NONE | 0                     | 0)
2020
2021 #define IDLE_CYCLE_MASK         (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
2022 #define IDLE_CYCLE_VALUE        (0     | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
2023
2024 static void intel_pps_verify_state(struct intel_dp *intel_dp);
2025
2026 static void wait_panel_status(struct intel_dp *intel_dp,
2027                                        u32 mask,
2028                                        u32 value)
2029 {
2030         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
2031         i915_reg_t pp_stat_reg, pp_ctrl_reg;
2032
2033         lockdep_assert_held(&dev_priv->pps_mutex);
2034
2035         intel_pps_verify_state(intel_dp);
2036
2037         pp_stat_reg = _pp_stat_reg(intel_dp);
2038         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2039
2040         DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
2041                         mask, value,
2042                         I915_READ(pp_stat_reg),
2043                         I915_READ(pp_ctrl_reg));
2044
2045         if (intel_wait_for_register(dev_priv,
2046                                     pp_stat_reg, mask, value,
2047                                     5000))
2048                 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
2049                                 I915_READ(pp_stat_reg),
2050                                 I915_READ(pp_ctrl_reg));
2051
2052         DRM_DEBUG_KMS("Wait complete\n");
2053 }
2054
2055 static void wait_panel_on(struct intel_dp *intel_dp)
2056 {
2057         DRM_DEBUG_KMS("Wait for panel power on\n");
2058         wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
2059 }
2060
2061 static void wait_panel_off(struct intel_dp *intel_dp)
2062 {
2063         DRM_DEBUG_KMS("Wait for panel power off time\n");
2064         wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
2065 }
2066
2067 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
2068 {
2069         ktime_t panel_power_on_time;
2070         s64 panel_power_off_duration;
2071
2072         DRM_DEBUG_KMS("Wait for panel power cycle\n");
2073
2074         /* take the difference of currrent time and panel power off time
2075          * and then make panel wait for t11_t12 if needed. */
2076         panel_power_on_time = ktime_get_boottime();
2077         panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->panel_power_off_time);
2078
2079         /* When we disable the VDD override bit last we have to do the manual
2080          * wait. */
2081         if (panel_power_off_duration < (s64)intel_dp->panel_power_cycle_delay)
2082                 wait_remaining_ms_from_jiffies(jiffies,
2083                                        intel_dp->panel_power_cycle_delay - panel_power_off_duration);
2084
2085         wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
2086 }
2087
2088 static void wait_backlight_on(struct intel_dp *intel_dp)
2089 {
2090         wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
2091                                        intel_dp->backlight_on_delay);
2092 }
2093
2094 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
2095 {
2096         wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
2097                                        intel_dp->backlight_off_delay);
2098 }
2099
2100 /* Read the current pp_control value, unlocking the register if it
2101  * is locked
2102  */
2103
2104 static  u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
2105 {
2106         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
2107         u32 control;
2108
2109         lockdep_assert_held(&dev_priv->pps_mutex);
2110
2111         control = I915_READ(_pp_ctrl_reg(intel_dp));
2112         if (WARN_ON(!HAS_DDI(dev_priv) &&
2113                     (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) {
2114                 control &= ~PANEL_UNLOCK_MASK;
2115                 control |= PANEL_UNLOCK_REGS;
2116         }
2117         return control;
2118 }
2119
2120 /*
2121  * Must be paired with edp_panel_vdd_off().
2122  * Must hold pps_mutex around the whole on/off sequence.
2123  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
2124  */
2125 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
2126 {
2127         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
2128         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2129         u32 pp;
2130         i915_reg_t pp_stat_reg, pp_ctrl_reg;
2131         bool need_to_disable = !intel_dp->want_panel_vdd;
2132
2133         lockdep_assert_held(&dev_priv->pps_mutex);
2134
2135         if (!intel_dp_is_edp(intel_dp))
2136                 return false;
2137
2138         cancel_delayed_work(&intel_dp->panel_vdd_work);
2139         intel_dp->want_panel_vdd = true;
2140
2141         if (edp_have_panel_vdd(intel_dp))
2142                 return need_to_disable;
2143
2144         intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
2145
2146         DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
2147                       port_name(intel_dig_port->base.port));
2148
2149         if (!edp_have_panel_power(intel_dp))
2150                 wait_panel_power_cycle(intel_dp);
2151
2152         pp = ironlake_get_pp_control(intel_dp);
2153         pp |= EDP_FORCE_VDD;
2154
2155         pp_stat_reg = _pp_stat_reg(intel_dp);
2156         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2157
2158         I915_WRITE(pp_ctrl_reg, pp);
2159         POSTING_READ(pp_ctrl_reg);
2160         DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
2161                         I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
2162         /*
2163          * If the panel wasn't on, delay before accessing aux channel
2164          */
2165         if (!edp_have_panel_power(intel_dp)) {
2166                 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
2167                               port_name(intel_dig_port->base.port));
2168                 msleep(intel_dp->panel_power_up_delay);
2169         }
2170
2171         return need_to_disable;
2172 }
2173
2174 /*
2175  * Must be paired with intel_edp_panel_vdd_off() or
2176  * intel_edp_panel_off().
2177  * Nested calls to these functions are not allowed since
2178  * we drop the lock. Caller must use some higher level
2179  * locking to prevent nested calls from other threads.
2180  */
2181 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
2182 {
2183         bool vdd;
2184
2185         if (!intel_dp_is_edp(intel_dp))
2186                 return;
2187
2188         pps_lock(intel_dp);
2189         vdd = edp_panel_vdd_on(intel_dp);
2190         pps_unlock(intel_dp);
2191
2192         I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
2193              port_name(dp_to_dig_port(intel_dp)->base.port));
2194 }
2195
2196 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
2197 {
2198         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
2199         struct intel_digital_port *intel_dig_port =
2200                 dp_to_dig_port(intel_dp);
2201         u32 pp;
2202         i915_reg_t pp_stat_reg, pp_ctrl_reg;
2203
2204         lockdep_assert_held(&dev_priv->pps_mutex);
2205
2206         WARN_ON(intel_dp->want_panel_vdd);
2207
2208         if (!edp_have_panel_vdd(intel_dp))
2209                 return;
2210
2211         DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
2212                       port_name(intel_dig_port->base.port));
2213
2214         pp = ironlake_get_pp_control(intel_dp);
2215         pp &= ~EDP_FORCE_VDD;
2216
2217         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2218         pp_stat_reg = _pp_stat_reg(intel_dp);
2219
2220         I915_WRITE(pp_ctrl_reg, pp);
2221         POSTING_READ(pp_ctrl_reg);
2222
2223         /* Make sure sequencer is idle before allowing subsequent activity */
2224         DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
2225         I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
2226
2227         if ((pp & PANEL_POWER_ON) == 0)
2228                 intel_dp->panel_power_off_time = ktime_get_boottime();
2229
2230         intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
2231 }
2232
2233 static void edp_panel_vdd_work(struct work_struct *__work)
2234 {
2235         struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
2236                                                  struct intel_dp, panel_vdd_work);
2237
2238         pps_lock(intel_dp);
2239         if (!intel_dp->want_panel_vdd)
2240                 edp_panel_vdd_off_sync(intel_dp);
2241         pps_unlock(intel_dp);
2242 }
2243
2244 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
2245 {
2246         unsigned long delay;
2247
2248         /*
2249          * Queue the timer to fire a long time from now (relative to the power
2250          * down delay) to keep the panel power up across a sequence of
2251          * operations.
2252          */
2253         delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
2254         schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
2255 }
2256
2257 /*
2258  * Must be paired with edp_panel_vdd_on().
2259  * Must hold pps_mutex around the whole on/off sequence.
2260  * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
2261  */
2262 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
2263 {
2264         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
2265
2266         lockdep_assert_held(&dev_priv->pps_mutex);
2267
2268         if (!intel_dp_is_edp(intel_dp))
2269                 return;
2270
2271         I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
2272              port_name(dp_to_dig_port(intel_dp)->base.port));
2273
2274         intel_dp->want_panel_vdd = false;
2275
2276         if (sync)
2277                 edp_panel_vdd_off_sync(intel_dp);
2278         else
2279                 edp_panel_vdd_schedule_off(intel_dp);
2280 }
2281
2282 static void edp_panel_on(struct intel_dp *intel_dp)
2283 {
2284         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
2285         u32 pp;
2286         i915_reg_t pp_ctrl_reg;
2287
2288         lockdep_assert_held(&dev_priv->pps_mutex);
2289
2290         if (!intel_dp_is_edp(intel_dp))
2291                 return;
2292
2293         DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
2294                       port_name(dp_to_dig_port(intel_dp)->base.port));
2295
2296         if (WARN(edp_have_panel_power(intel_dp),
2297                  "eDP port %c panel power already on\n",
2298                  port_name(dp_to_dig_port(intel_dp)->base.port)))
2299                 return;
2300
2301         wait_panel_power_cycle(intel_dp);
2302
2303         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2304         pp = ironlake_get_pp_control(intel_dp);
2305         if (IS_GEN5(dev_priv)) {
2306                 /* ILK workaround: disable reset around power sequence */
2307                 pp &= ~PANEL_POWER_RESET;
2308                 I915_WRITE(pp_ctrl_reg, pp);
2309                 POSTING_READ(pp_ctrl_reg);
2310         }
2311
2312         pp |= PANEL_POWER_ON;
2313         if (!IS_GEN5(dev_priv))
2314                 pp |= PANEL_POWER_RESET;
2315
2316         I915_WRITE(pp_ctrl_reg, pp);
2317         POSTING_READ(pp_ctrl_reg);
2318
2319         wait_panel_on(intel_dp);
2320         intel_dp->last_power_on = jiffies;
2321
2322         if (IS_GEN5(dev_priv)) {
2323                 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
2324                 I915_WRITE(pp_ctrl_reg, pp);
2325                 POSTING_READ(pp_ctrl_reg);
2326         }
2327 }
2328
2329 void intel_edp_panel_on(struct intel_dp *intel_dp)
2330 {
2331         if (!intel_dp_is_edp(intel_dp))
2332                 return;
2333
2334         pps_lock(intel_dp);
2335         edp_panel_on(intel_dp);
2336         pps_unlock(intel_dp);
2337 }
2338
2339
2340 static void edp_panel_off(struct intel_dp *intel_dp)
2341 {
2342         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
2343         u32 pp;
2344         i915_reg_t pp_ctrl_reg;
2345
2346         lockdep_assert_held(&dev_priv->pps_mutex);
2347
2348         if (!intel_dp_is_edp(intel_dp))
2349                 return;
2350
2351         DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2352                       port_name(dp_to_dig_port(intel_dp)->base.port));
2353
2354         WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2355              port_name(dp_to_dig_port(intel_dp)->base.port));
2356
2357         pp = ironlake_get_pp_control(intel_dp);
2358         /* We need to switch off panel power _and_ force vdd, for otherwise some
2359          * panels get very unhappy and cease to work. */
2360         pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2361                 EDP_BLC_ENABLE);
2362
2363         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2364
2365         intel_dp->want_panel_vdd = false;
2366
2367         I915_WRITE(pp_ctrl_reg, pp);
2368         POSTING_READ(pp_ctrl_reg);
2369
2370         wait_panel_off(intel_dp);
2371         intel_dp->panel_power_off_time = ktime_get_boottime();
2372
2373         /* We got a reference when we enabled the VDD. */
2374         intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
2375 }
2376
2377 void intel_edp_panel_off(struct intel_dp *intel_dp)
2378 {
2379         if (!intel_dp_is_edp(intel_dp))
2380                 return;
2381
2382         pps_lock(intel_dp);
2383         edp_panel_off(intel_dp);
2384         pps_unlock(intel_dp);
2385 }
2386
2387 /* Enable backlight in the panel power control. */
2388 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
2389 {
2390         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
2391         u32 pp;
2392         i915_reg_t pp_ctrl_reg;
2393
2394         /*
2395          * If we enable the backlight right away following a panel power
2396          * on, we may see slight flicker as the panel syncs with the eDP
2397          * link.  So delay a bit to make sure the image is solid before
2398          * allowing it to appear.
2399          */
2400         wait_backlight_on(intel_dp);
2401
2402         pps_lock(intel_dp);
2403
2404         pp = ironlake_get_pp_control(intel_dp);
2405         pp |= EDP_BLC_ENABLE;
2406
2407         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2408
2409         I915_WRITE(pp_ctrl_reg, pp);
2410         POSTING_READ(pp_ctrl_reg);
2411
2412         pps_unlock(intel_dp);
2413 }
2414
2415 /* Enable backlight PWM and backlight PP control. */
2416 void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
2417                             const struct drm_connector_state *conn_state)
2418 {
2419         struct intel_dp *intel_dp = enc_to_intel_dp(conn_state->best_encoder);
2420
2421         if (!intel_dp_is_edp(intel_dp))
2422                 return;
2423
2424         DRM_DEBUG_KMS("\n");
2425
2426         intel_panel_enable_backlight(crtc_state, conn_state);
2427         _intel_edp_backlight_on(intel_dp);
2428 }
2429
2430 /* Disable backlight in the panel power control. */
2431 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2432 {
2433         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
2434         u32 pp;
2435         i915_reg_t pp_ctrl_reg;
2436
2437         if (!intel_dp_is_edp(intel_dp))
2438                 return;
2439
2440         pps_lock(intel_dp);
2441
2442         pp = ironlake_get_pp_control(intel_dp);
2443         pp &= ~EDP_BLC_ENABLE;
2444
2445         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2446
2447         I915_WRITE(pp_ctrl_reg, pp);
2448         POSTING_READ(pp_ctrl_reg);
2449
2450         pps_unlock(intel_dp);
2451
2452         intel_dp->last_backlight_off = jiffies;
2453         edp_wait_backlight_off(intel_dp);
2454 }
2455
2456 /* Disable backlight PP control and backlight PWM. */
2457 void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state)
2458 {
2459         struct intel_dp *intel_dp = enc_to_intel_dp(old_conn_state->best_encoder);
2460
2461         if (!intel_dp_is_edp(intel_dp))
2462                 return;
2463
2464         DRM_DEBUG_KMS("\n");
2465
2466         _intel_edp_backlight_off(intel_dp);
2467         intel_panel_disable_backlight(old_conn_state);
2468 }
2469
2470 /*
2471  * Hook for controlling the panel power control backlight through the bl_power
2472  * sysfs attribute. Take care to handle multiple calls.
2473  */
2474 static void intel_edp_backlight_power(struct intel_connector *connector,
2475                                       bool enable)
2476 {
2477         struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2478         bool is_enabled;
2479
2480         pps_lock(intel_dp);
2481         is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2482         pps_unlock(intel_dp);
2483
2484         if (is_enabled == enable)
2485                 return;
2486
2487         DRM_DEBUG_KMS("panel power control backlight %s\n",
2488                       enable ? "enable" : "disable");
2489
2490         if (enable)
2491                 _intel_edp_backlight_on(intel_dp);
2492         else
2493                 _intel_edp_backlight_off(intel_dp);
2494 }
2495
2496 static void assert_dp_port(struct intel_dp *intel_dp, bool state)
2497 {
2498         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2499         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
2500         bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN;
2501
2502         I915_STATE_WARN(cur_state != state,
2503                         "DP port %c state assertion failure (expected %s, current %s)\n",
2504                         port_name(dig_port->base.port),
2505                         onoff(state), onoff(cur_state));
2506 }
2507 #define assert_dp_port_disabled(d) assert_dp_port((d), false)
2508
2509 static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
2510 {
2511         bool cur_state = I915_READ(DP_A) & DP_PLL_ENABLE;
2512
2513         I915_STATE_WARN(cur_state != state,
2514                         "eDP PLL state assertion failure (expected %s, current %s)\n",
2515                         onoff(state), onoff(cur_state));
2516 }
2517 #define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
2518 #define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
2519
2520 static void ironlake_edp_pll_on(struct intel_dp *intel_dp,
2521                                 const struct intel_crtc_state *pipe_config)
2522 {
2523         struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
2524         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2525
2526         assert_pipe_disabled(dev_priv, crtc->pipe);
2527         assert_dp_port_disabled(intel_dp);
2528         assert_edp_pll_disabled(dev_priv);
2529
2530         DRM_DEBUG_KMS("enabling eDP PLL for clock %d\n",
2531                       pipe_config->port_clock);
2532
2533         intel_dp->DP &= ~DP_PLL_FREQ_MASK;
2534
2535         if (pipe_config->port_clock == 162000)
2536                 intel_dp->DP |= DP_PLL_FREQ_162MHZ;
2537         else
2538                 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
2539
2540         I915_WRITE(DP_A, intel_dp->DP);
2541         POSTING_READ(DP_A);
2542         udelay(500);
2543
2544         /*
2545          * [DevILK] Work around required when enabling DP PLL
2546          * while a pipe is enabled going to FDI:
2547          * 1. Wait for the start of vertical blank on the enabled pipe going to FDI
2548          * 2. Program DP PLL enable
2549          */
2550         if (IS_GEN5(dev_priv))
2551                 intel_wait_for_vblank_if_active(dev_priv, !crtc->pipe);
2552
2553         intel_dp->DP |= DP_PLL_ENABLE;
2554
2555         I915_WRITE(DP_A, intel_dp->DP);
2556         POSTING_READ(DP_A);
2557         udelay(200);
2558 }
2559
2560 static void ironlake_edp_pll_off(struct intel_dp *intel_dp,
2561                                  const struct intel_crtc_state *old_crtc_state)
2562 {
2563         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
2564         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2565
2566         assert_pipe_disabled(dev_priv, crtc->pipe);
2567         assert_dp_port_disabled(intel_dp);
2568         assert_edp_pll_enabled(dev_priv);
2569
2570         DRM_DEBUG_KMS("disabling eDP PLL\n");
2571
2572         intel_dp->DP &= ~DP_PLL_ENABLE;
2573
2574         I915_WRITE(DP_A, intel_dp->DP);
2575         POSTING_READ(DP_A);
2576         udelay(200);
2577 }
2578
2579 static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp)
2580 {
2581         /*
2582          * DPCD 1.2+ should support BRANCH_DEVICE_CTRL, and thus
2583          * be capable of signalling downstream hpd with a long pulse.
2584          * Whether or not that means D3 is safe to use is not clear,
2585          * but let's assume so until proven otherwise.
2586          *
2587          * FIXME should really check all downstream ports...
2588          */
2589         return intel_dp->dpcd[DP_DPCD_REV] == 0x11 &&
2590                 intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT &&
2591                 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD;
2592 }
2593
2594 /* If the sink supports it, try to set the power state appropriately */
2595 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2596 {
2597         int ret, i;
2598
2599         /* Should have a valid DPCD by this point */
2600         if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2601                 return;
2602
2603         if (mode != DRM_MODE_DPMS_ON) {
2604                 if (downstream_hpd_needs_d0(intel_dp))
2605                         return;
2606
2607                 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2608                                          DP_SET_POWER_D3);
2609         } else {
2610                 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
2611
2612                 /*
2613                  * When turning on, we need to retry for 1ms to give the sink
2614                  * time to wake up.
2615                  */
2616                 for (i = 0; i < 3; i++) {
2617                         ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2618                                                  DP_SET_POWER_D0);
2619                         if (ret == 1)
2620                                 break;
2621                         msleep(1);
2622                 }
2623
2624                 if (ret == 1 && lspcon->active)
2625                         lspcon_wait_pcon_mode(lspcon);
2626         }
2627
2628         if (ret != 1)
2629                 DRM_DEBUG_KMS("failed to %s sink power state\n",
2630                               mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2631 }
2632
2633 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2634                                   enum pipe *pipe)
2635 {
2636         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2637         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2638         enum port port = encoder->port;
2639         u32 tmp;
2640         bool ret;
2641
2642         if (!intel_display_power_get_if_enabled(dev_priv,
2643                                                 encoder->power_domain))
2644                 return false;
2645
2646         ret = false;
2647
2648         tmp = I915_READ(intel_dp->output_reg);
2649
2650         if (!(tmp & DP_PORT_EN))
2651                 goto out;
2652
2653         if (IS_GEN7(dev_priv) && port == PORT_A) {
2654                 *pipe = PORT_TO_PIPE_CPT(tmp);
2655         } else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
2656                 enum pipe p;
2657
2658                 for_each_pipe(dev_priv, p) {
2659                         u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2660                         if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2661                                 *pipe = p;
2662                                 ret = true;
2663
2664                                 goto out;
2665                         }
2666                 }
2667
2668                 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2669                               i915_mmio_reg_offset(intel_dp->output_reg));
2670         } else if (IS_CHERRYVIEW(dev_priv)) {
2671                 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2672         } else {
2673                 *pipe = PORT_TO_PIPE(tmp);
2674         }
2675
2676         ret = true;
2677
2678 out:
2679         intel_display_power_put(dev_priv, encoder->power_domain);
2680
2681         return ret;
2682 }
2683
2684 static void intel_dp_get_config(struct intel_encoder *encoder,
2685                                 struct intel_crtc_state *pipe_config)
2686 {
2687         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2688         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2689         u32 tmp, flags = 0;
2690         enum port port = encoder->port;
2691         struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
2692
2693         if (encoder->type == INTEL_OUTPUT_EDP)
2694                 pipe_config->output_types |= BIT(INTEL_OUTPUT_EDP);
2695         else
2696                 pipe_config->output_types |= BIT(INTEL_OUTPUT_DP);
2697
2698         tmp = I915_READ(intel_dp->output_reg);
2699
2700         pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
2701
2702         if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
2703                 u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2704
2705                 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2706                         flags |= DRM_MODE_FLAG_PHSYNC;
2707                 else
2708                         flags |= DRM_MODE_FLAG_NHSYNC;
2709
2710                 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2711                         flags |= DRM_MODE_FLAG_PVSYNC;
2712                 else
2713                         flags |= DRM_MODE_FLAG_NVSYNC;
2714         } else {
2715                 if (tmp & DP_SYNC_HS_HIGH)
2716                         flags |= DRM_MODE_FLAG_PHSYNC;
2717                 else
2718                         flags |= DRM_MODE_FLAG_NHSYNC;
2719
2720                 if (tmp & DP_SYNC_VS_HIGH)
2721                         flags |= DRM_MODE_FLAG_PVSYNC;
2722                 else
2723                         flags |= DRM_MODE_FLAG_NVSYNC;
2724         }
2725
2726         pipe_config->base.adjusted_mode.flags |= flags;
2727
2728         if (IS_G4X(dev_priv) && tmp & DP_COLOR_RANGE_16_235)
2729                 pipe_config->limited_color_range = true;
2730
2731         pipe_config->lane_count =
2732                 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
2733
2734         intel_dp_get_m_n(crtc, pipe_config);
2735
2736         if (port == PORT_A) {
2737                 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
2738                         pipe_config->port_clock = 162000;
2739                 else
2740                         pipe_config->port_clock = 270000;
2741         }
2742
2743         pipe_config->base.adjusted_mode.crtc_clock =
2744                 intel_dotclock_calculate(pipe_config->port_clock,
2745                                          &pipe_config->dp_m_n);
2746
2747         if (intel_dp_is_edp(intel_dp) && dev_priv->vbt.edp.bpp &&
2748             pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) {
2749                 /*
2750                  * This is a big fat ugly hack.
2751                  *
2752                  * Some machines in UEFI boot mode provide us a VBT that has 18
2753                  * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2754                  * unknown we fail to light up. Yet the same BIOS boots up with
2755                  * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2756                  * max, not what it tells us to use.
2757                  *
2758                  * Note: This will still be broken if the eDP panel is not lit
2759                  * up by the BIOS, and thus we can't get the mode at module
2760                  * load.
2761                  */
2762                 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2763                               pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp);
2764                 dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp;
2765         }
2766 }
2767
2768 static void intel_disable_dp(struct intel_encoder *encoder,
2769                              const struct intel_crtc_state *old_crtc_state,
2770                              const struct drm_connector_state *old_conn_state)
2771 {
2772         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2773
2774         if (old_crtc_state->has_audio)
2775                 intel_audio_codec_disable(encoder,
2776                                           old_crtc_state, old_conn_state);
2777
2778         /* Make sure the panel is off before trying to change the mode. But also
2779          * ensure that we have vdd while we switch off the panel. */
2780         intel_edp_panel_vdd_on(intel_dp);
2781         intel_edp_backlight_off(old_conn_state);
2782         intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2783         intel_edp_panel_off(intel_dp);
2784 }
2785
2786 static void g4x_disable_dp(struct intel_encoder *encoder,
2787                            const struct intel_crtc_state *old_crtc_state,
2788                            const struct drm_connector_state *old_conn_state)
2789 {
2790         intel_disable_dp(encoder, old_crtc_state, old_conn_state);
2791
2792         /* disable the port before the pipe on g4x */
2793         intel_dp_link_down(encoder, old_crtc_state);
2794 }
2795
2796 static void ilk_disable_dp(struct intel_encoder *encoder,
2797                            const struct intel_crtc_state *old_crtc_state,
2798                            const struct drm_connector_state *old_conn_state)
2799 {
2800         intel_disable_dp(encoder, old_crtc_state, old_conn_state);
2801 }
2802
2803 static void vlv_disable_dp(struct intel_encoder *encoder,
2804                            const struct intel_crtc_state *old_crtc_state,
2805                            const struct drm_connector_state *old_conn_state)
2806 {
2807         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2808
2809         intel_psr_disable(intel_dp, old_crtc_state);
2810
2811         intel_disable_dp(encoder, old_crtc_state, old_conn_state);
2812 }
2813
2814 static void ilk_post_disable_dp(struct intel_encoder *encoder,
2815                                 const struct intel_crtc_state *old_crtc_state,
2816                                 const struct drm_connector_state *old_conn_state)
2817 {
2818         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2819         enum port port = encoder->port;
2820
2821         intel_dp_link_down(encoder, old_crtc_state);
2822
2823         /* Only ilk+ has port A */
2824         if (port == PORT_A)
2825                 ironlake_edp_pll_off(intel_dp, old_crtc_state);
2826 }
2827
2828 static void vlv_post_disable_dp(struct intel_encoder *encoder,
2829                                 const struct intel_crtc_state *old_crtc_state,
2830                                 const struct drm_connector_state *old_conn_state)
2831 {
2832         intel_dp_link_down(encoder, old_crtc_state);
2833 }
2834
2835 static void chv_post_disable_dp(struct intel_encoder *encoder,
2836                                 const struct intel_crtc_state *old_crtc_state,
2837                                 const struct drm_connector_state *old_conn_state)
2838 {
2839         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2840
2841         intel_dp_link_down(encoder, old_crtc_state);
2842
2843         mutex_lock(&dev_priv->sb_lock);
2844
2845         /* Assert data lane reset */
2846         chv_data_lane_soft_reset(encoder, old_crtc_state, true);
2847
2848         mutex_unlock(&dev_priv->sb_lock);
2849 }
2850
2851 static void
2852 _intel_dp_set_link_train(struct intel_dp *intel_dp,
2853                          uint32_t *DP,
2854                          uint8_t dp_train_pat)
2855 {
2856         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
2857         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2858         enum port port = intel_dig_port->base.port;
2859
2860         if (dp_train_pat & DP_TRAINING_PATTERN_MASK)
2861                 DRM_DEBUG_KMS("Using DP training pattern TPS%d\n",
2862                               dp_train_pat & DP_TRAINING_PATTERN_MASK);
2863
2864         if (HAS_DDI(dev_priv)) {
2865                 uint32_t temp = I915_READ(DP_TP_CTL(port));
2866
2867                 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2868                         temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2869                 else
2870                         temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2871
2872                 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2873                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2874                 case DP_TRAINING_PATTERN_DISABLE:
2875                         temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2876
2877                         break;
2878                 case DP_TRAINING_PATTERN_1:
2879                         temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2880                         break;
2881                 case DP_TRAINING_PATTERN_2:
2882                         temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2883                         break;
2884                 case DP_TRAINING_PATTERN_3:
2885                         temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2886                         break;
2887                 }
2888                 I915_WRITE(DP_TP_CTL(port), temp);
2889
2890         } else if ((IS_GEN7(dev_priv) && port == PORT_A) ||
2891                    (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
2892                 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2893
2894                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2895                 case DP_TRAINING_PATTERN_DISABLE:
2896                         *DP |= DP_LINK_TRAIN_OFF_CPT;
2897                         break;
2898                 case DP_TRAINING_PATTERN_1:
2899                         *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2900                         break;
2901                 case DP_TRAINING_PATTERN_2:
2902                         *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2903                         break;
2904                 case DP_TRAINING_PATTERN_3:
2905                         DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
2906                         *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2907                         break;
2908                 }
2909
2910         } else {
2911                 if (IS_CHERRYVIEW(dev_priv))
2912                         *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2913                 else
2914                         *DP &= ~DP_LINK_TRAIN_MASK;
2915
2916                 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2917                 case DP_TRAINING_PATTERN_DISABLE:
2918                         *DP |= DP_LINK_TRAIN_OFF;
2919                         break;
2920                 case DP_TRAINING_PATTERN_1:
2921                         *DP |= DP_LINK_TRAIN_PAT_1;
2922                         break;
2923                 case DP_TRAINING_PATTERN_2:
2924                         *DP |= DP_LINK_TRAIN_PAT_2;
2925                         break;
2926                 case DP_TRAINING_PATTERN_3:
2927                         if (IS_CHERRYVIEW(dev_priv)) {
2928                                 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2929                         } else {
2930                                 DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
2931                                 *DP |= DP_LINK_TRAIN_PAT_2;
2932                         }
2933                         break;
2934                 }
2935         }
2936 }
2937
2938 static void intel_dp_enable_port(struct intel_dp *intel_dp,
2939                                  const struct intel_crtc_state *old_crtc_state)
2940 {
2941         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
2942
2943         /* enable with pattern 1 (as per spec) */
2944
2945         intel_dp_program_link_training_pattern(intel_dp, DP_TRAINING_PATTERN_1);
2946
2947         /*
2948          * Magic for VLV/CHV. We _must_ first set up the register
2949          * without actually enabling the port, and then do another
2950          * write to enable the port. Otherwise link training will
2951          * fail when the power sequencer is freshly used for this port.
2952          */
2953         intel_dp->DP |= DP_PORT_EN;
2954         if (old_crtc_state->has_audio)
2955                 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
2956
2957         I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2958         POSTING_READ(intel_dp->output_reg);
2959 }
2960
2961 static void intel_enable_dp(struct intel_encoder *encoder,
2962                             const struct intel_crtc_state *pipe_config,
2963                             const struct drm_connector_state *conn_state)
2964 {
2965         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2966         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2967         struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
2968         uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2969         enum pipe pipe = crtc->pipe;
2970
2971         if (WARN_ON(dp_reg & DP_PORT_EN))
2972                 return;
2973
2974         pps_lock(intel_dp);
2975
2976         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2977                 vlv_init_panel_power_sequencer(encoder, pipe_config);
2978
2979         intel_dp_enable_port(intel_dp, pipe_config);
2980
2981         edp_panel_vdd_on(intel_dp);
2982         edp_panel_on(intel_dp);
2983         edp_panel_vdd_off(intel_dp, true);
2984
2985         pps_unlock(intel_dp);
2986
2987         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
2988                 unsigned int lane_mask = 0x0;
2989
2990                 if (IS_CHERRYVIEW(dev_priv))
2991                         lane_mask = intel_dp_unused_lane_mask(pipe_config->lane_count);
2992
2993                 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2994                                     lane_mask);
2995         }
2996
2997         intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2998         intel_dp_start_link_train(intel_dp);
2999         intel_dp_stop_link_train(intel_dp);
3000
3001         if (pipe_config->has_audio) {
3002                 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
3003                                  pipe_name(pipe));
3004                 intel_audio_codec_enable(encoder, pipe_config, conn_state);
3005         }
3006 }
3007
3008 static void g4x_enable_dp(struct intel_encoder *encoder,
3009                           const struct intel_crtc_state *pipe_config,
3010                           const struct drm_connector_state *conn_state)
3011 {
3012         intel_enable_dp(encoder, pipe_config, conn_state);
3013         intel_edp_backlight_on(pipe_config, conn_state);
3014 }
3015
3016 static void vlv_enable_dp(struct intel_encoder *encoder,
3017                           const struct intel_crtc_state *pipe_config,
3018                           const struct drm_connector_state *conn_state)
3019 {
3020         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3021
3022         intel_edp_backlight_on(pipe_config, conn_state);
3023         intel_psr_enable(intel_dp, pipe_config);
3024 }
3025
3026 static void g4x_pre_enable_dp(struct intel_encoder *encoder,
3027                               const struct intel_crtc_state *pipe_config,
3028                               const struct drm_connector_state *conn_state)
3029 {
3030         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3031         enum port port = encoder->port;
3032
3033         intel_dp_prepare(encoder, pipe_config);
3034
3035         /* Only ilk+ has port A */
3036         if (port == PORT_A)
3037                 ironlake_edp_pll_on(intel_dp, pipe_config);
3038 }
3039
3040 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
3041 {
3042         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3043         struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
3044         enum pipe pipe = intel_dp->pps_pipe;
3045         i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe);
3046
3047         WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
3048
3049         if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
3050                 return;
3051
3052         edp_panel_vdd_off_sync(intel_dp);
3053
3054         /*
3055          * VLV seems to get confused when multiple power seqeuencers
3056          * have the same port selected (even if only one has power/vdd
3057          * enabled). The failure manifests as vlv_wait_port_ready() failing
3058          * CHV on the other hand doesn't seem to mind having the same port
3059          * selected in multiple power seqeuencers, but let's clear the
3060          * port select always when logically disconnecting a power sequencer
3061          * from a port.
3062          */
3063         DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
3064                       pipe_name(pipe), port_name(intel_dig_port->base.port));
3065         I915_WRITE(pp_on_reg, 0);
3066         POSTING_READ(pp_on_reg);
3067
3068         intel_dp->pps_pipe = INVALID_PIPE;
3069 }
3070
3071 static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
3072                                       enum pipe pipe)
3073 {
3074         struct intel_encoder *encoder;
3075
3076         lockdep_assert_held(&dev_priv->pps_mutex);
3077
3078         for_each_intel_encoder(&dev_priv->drm, encoder) {
3079                 struct intel_dp *intel_dp;
3080                 enum port port;
3081
3082                 if (encoder->type != INTEL_OUTPUT_DP &&
3083                     encoder->type != INTEL_OUTPUT_EDP)
3084                         continue;
3085
3086                 intel_dp = enc_to_intel_dp(&encoder->base);
3087                 port = dp_to_dig_port(intel_dp)->base.port;
3088
3089                 WARN(intel_dp->active_pipe == pipe,
3090                      "stealing pipe %c power sequencer from active (e)DP port %c\n",
3091                      pipe_name(pipe), port_name(port));
3092
3093                 if (intel_dp->pps_pipe != pipe)
3094                         continue;
3095
3096                 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
3097                               pipe_name(pipe), port_name(port));
3098
3099                 /* make sure vdd is off before we steal it */
3100                 vlv_detach_power_sequencer(intel_dp);
3101         }
3102 }
3103
3104 static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
3105                                            const struct intel_crtc_state *crtc_state)
3106 {
3107         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3108         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3109         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
3110
3111         lockdep_assert_held(&dev_priv->pps_mutex);
3112
3113         WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
3114
3115         if (intel_dp->pps_pipe != INVALID_PIPE &&
3116             intel_dp->pps_pipe != crtc->pipe) {
3117                 /*
3118                  * If another power sequencer was being used on this
3119                  * port previously make sure to turn off vdd there while
3120                  * we still have control of it.
3121                  */
3122                 vlv_detach_power_sequencer(intel_dp);
3123         }
3124
3125         /*
3126          * We may be stealing the power
3127          * sequencer from another port.
3128          */
3129         vlv_steal_power_sequencer(dev_priv, crtc->pipe);
3130
3131         intel_dp->active_pipe = crtc->pipe;
3132
3133         if (!intel_dp_is_edp(intel_dp))
3134                 return;
3135
3136         /* now it's all ours */
3137         intel_dp->pps_pipe = crtc->pipe;
3138
3139         DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
3140                       pipe_name(intel_dp->pps_pipe), port_name(encoder->port));
3141
3142         /* init power sequencer on this pipe and port */
3143         intel_dp_init_panel_power_sequencer(intel_dp);
3144         intel_dp_init_panel_power_sequencer_registers(intel_dp, true);
3145 }
3146
3147 static void vlv_pre_enable_dp(struct intel_encoder *encoder,
3148                               const struct intel_crtc_state *pipe_config,
3149                               const struct drm_connector_state *conn_state)
3150 {
3151         vlv_phy_pre_encoder_enable(encoder, pipe_config);
3152
3153         intel_enable_dp(encoder, pipe_config, conn_state);
3154 }
3155
3156 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder,
3157                                   const struct intel_crtc_state *pipe_config,
3158                                   const struct drm_connector_state *conn_state)
3159 {
3160         intel_dp_prepare(encoder, pipe_config);
3161
3162         vlv_phy_pre_pll_enable(encoder, pipe_config);
3163 }
3164
3165 static void chv_pre_enable_dp(struct intel_encoder *encoder,
3166                               const struct intel_crtc_state *pipe_config,
3167                               const struct drm_connector_state *conn_state)
3168 {
3169         chv_phy_pre_encoder_enable(encoder, pipe_config);
3170
3171         intel_enable_dp(encoder, pipe_config, conn_state);
3172
3173         /* Second common lane will stay alive on its own now */
3174         chv_phy_release_cl2_override(encoder);
3175 }
3176
3177 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder,
3178                                   const struct intel_crtc_state *pipe_config,
3179                                   const struct drm_connector_state *conn_state)
3180 {
3181         intel_dp_prepare(encoder, pipe_config);
3182
3183         chv_phy_pre_pll_enable(encoder, pipe_config);
3184 }
3185
3186 static void chv_dp_post_pll_disable(struct intel_encoder *encoder,
3187                                     const struct intel_crtc_state *old_crtc_state,
3188                                     const struct drm_connector_state *old_conn_state)
3189 {
3190         chv_phy_post_pll_disable(encoder, old_crtc_state);
3191 }
3192
3193 /*
3194  * Fetch AUX CH registers 0x202 - 0x207 which contain
3195  * link status information
3196  */
3197 bool
3198 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
3199 {
3200         return drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS, link_status,
3201                                 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
3202 }
3203
3204 /* These are source-specific values. */
3205 uint8_t
3206 intel_dp_voltage_max(struct intel_dp *intel_dp)
3207 {
3208         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
3209         enum port port = dp_to_dig_port(intel_dp)->base.port;
3210
3211         if (INTEL_GEN(dev_priv) >= 9) {
3212                 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3213                 return intel_ddi_dp_voltage_max(encoder);
3214         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
3215                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3216         else if (IS_GEN7(dev_priv) && port == PORT_A)
3217                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3218         else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
3219                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3220         else
3221                 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3222 }
3223
3224 uint8_t
3225 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3226 {
3227         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
3228         enum port port = dp_to_dig_port(intel_dp)->base.port;
3229
3230         if (INTEL_GEN(dev_priv) >= 9) {
3231                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3232                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3233                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
3234                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3235                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3236                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3237                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3238                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3239                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3240                 default:
3241                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3242                 }
3243         } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
3244                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3245                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3246                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
3247                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3248                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3249                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3250                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3251                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3252                 default:
3253                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3254                 }
3255         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
3256                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3257                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3258                         return DP_TRAIN_PRE_EMPH_LEVEL_3;
3259                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3260                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3261                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3262                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3263                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3264                 default:
3265                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3266                 }
3267         } else if (IS_GEN7(dev_priv) && port == PORT_A) {
3268                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3269                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3270                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3271                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3272                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3273                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3274                 default:
3275                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3276                 }
3277         } else {
3278                 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3279                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3280                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3281                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3282                         return DP_TRAIN_PRE_EMPH_LEVEL_2;
3283                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3284                         return DP_TRAIN_PRE_EMPH_LEVEL_1;
3285                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3286                 default:
3287                         return DP_TRAIN_PRE_EMPH_LEVEL_0;
3288                 }
3289         }
3290 }
3291
3292 static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
3293 {
3294         struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3295         unsigned long demph_reg_value, preemph_reg_value,
3296                 uniqtranscale_reg_value;
3297         uint8_t train_set = intel_dp->train_set[0];
3298
3299         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3300         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3301                 preemph_reg_value = 0x0004000;
3302                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3303                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3304                         demph_reg_value = 0x2B405555;
3305                         uniqtranscale_reg_value = 0x552AB83A;
3306                         break;
3307                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3308                         demph_reg_value = 0x2B404040;
3309                         uniqtranscale_reg_value = 0x5548B83A;
3310                         break;
3311                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3312                         demph_reg_value = 0x2B245555;
3313                         uniqtranscale_reg_value = 0x5560B83A;
3314                         break;
3315                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3316                         demph_reg_value = 0x2B405555;
3317                         uniqtranscale_reg_value = 0x5598DA3A;
3318                         break;
3319                 default:
3320                         return 0;
3321                 }
3322                 break;
3323         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3324                 preemph_reg_value = 0x0002000;
3325                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3326                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3327                         demph_reg_value = 0x2B404040;
3328                         uniqtranscale_reg_value = 0x5552B83A;
3329                         break;
3330                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3331                         demph_reg_value = 0x2B404848;
3332                         uniqtranscale_reg_value = 0x5580B83A;
3333                         break;
3334                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3335                         demph_reg_value = 0x2B404040;
3336                         uniqtranscale_reg_value = 0x55ADDA3A;
3337                         break;
3338                 default:
3339                         return 0;
3340                 }
3341                 break;
3342         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3343                 preemph_reg_value = 0x0000000;
3344                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3345                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3346                         demph_reg_value = 0x2B305555;
3347                         uniqtranscale_reg_value = 0x5570B83A;
3348                         break;
3349                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3350                         demph_reg_value = 0x2B2B4040;
3351                         uniqtranscale_reg_value = 0x55ADDA3A;
3352                         break;
3353                 default:
3354                         return 0;
3355                 }
3356                 break;
3357         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3358                 preemph_reg_value = 0x0006000;
3359                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3360                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3361                         demph_reg_value = 0x1B405555;
3362                         uniqtranscale_reg_value = 0x55ADDA3A;
3363                         break;
3364                 default:
3365                         return 0;
3366                 }
3367                 break;
3368         default:
3369                 return 0;
3370         }
3371
3372         vlv_set_phy_signal_level(encoder, demph_reg_value, preemph_reg_value,
3373                                  uniqtranscale_reg_value, 0);
3374
3375         return 0;
3376 }
3377
3378 static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
3379 {
3380         struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3381         u32 deemph_reg_value, margin_reg_value;
3382         bool uniq_trans_scale = false;
3383         uint8_t train_set = intel_dp->train_set[0];
3384
3385         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3386         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3387                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3388                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3389                         deemph_reg_value = 128;
3390                         margin_reg_value = 52;
3391                         break;
3392                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3393                         deemph_reg_value = 128;
3394                         margin_reg_value = 77;
3395                         break;
3396                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3397                         deemph_reg_value = 128;
3398                         margin_reg_value = 102;
3399                         break;
3400                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3401                         deemph_reg_value = 128;
3402                         margin_reg_value = 154;
3403                         uniq_trans_scale = true;
3404                         break;
3405                 default:
3406                         return 0;
3407                 }
3408                 break;
3409         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3410                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3411                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3412                         deemph_reg_value = 85;
3413                         margin_reg_value = 78;
3414                         break;
3415                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3416                         deemph_reg_value = 85;
3417                         margin_reg_value = 116;
3418                         break;
3419                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3420                         deemph_reg_value = 85;
3421                         margin_reg_value = 154;
3422                         break;
3423                 default:
3424                         return 0;
3425                 }
3426                 break;
3427         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3428                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3429                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3430                         deemph_reg_value = 64;
3431                         margin_reg_value = 104;
3432                         break;
3433                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3434                         deemph_reg_value = 64;
3435                         margin_reg_value = 154;
3436                         break;
3437                 default:
3438                         return 0;
3439                 }
3440                 break;
3441         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3442                 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3443                 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3444                         deemph_reg_value = 43;
3445                         margin_reg_value = 154;
3446                         break;
3447                 default:
3448                         return 0;
3449                 }
3450                 break;
3451         default:
3452                 return 0;
3453         }
3454
3455         chv_set_phy_signal_level(encoder, deemph_reg_value,
3456                                  margin_reg_value, uniq_trans_scale);
3457
3458         return 0;
3459 }
3460
3461 static uint32_t
3462 gen4_signal_levels(uint8_t train_set)
3463 {
3464         uint32_t        signal_levels = 0;
3465
3466         switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3467         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3468         default:
3469                 signal_levels |= DP_VOLTAGE_0_4;
3470                 break;
3471         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3472                 signal_levels |= DP_VOLTAGE_0_6;
3473                 break;
3474         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3475                 signal_levels |= DP_VOLTAGE_0_8;
3476                 break;
3477         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3478                 signal_levels |= DP_VOLTAGE_1_2;
3479                 break;
3480         }
3481         switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3482         case DP_TRAIN_PRE_EMPH_LEVEL_0:
3483         default:
3484                 signal_levels |= DP_PRE_EMPHASIS_0;
3485                 break;
3486         case DP_TRAIN_PRE_EMPH_LEVEL_1:
3487                 signal_levels |= DP_PRE_EMPHASIS_3_5;
3488                 break;
3489         case DP_TRAIN_PRE_EMPH_LEVEL_2:
3490                 signal_levels |= DP_PRE_EMPHASIS_6;
3491                 break;
3492         case DP_TRAIN_PRE_EMPH_LEVEL_3:
3493                 signal_levels |= DP_PRE_EMPHASIS_9_5;
3494                 break;
3495         }
3496         return signal_levels;
3497 }
3498
3499 /* Gen6's DP voltage swing and pre-emphasis control */
3500 static uint32_t
3501 gen6_edp_signal_levels(uint8_t train_set)
3502 {
3503         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3504                                          DP_TRAIN_PRE_EMPHASIS_MASK);
3505         switch (signal_levels) {
3506         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3507         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3508                 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3509         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3510                 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3511         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3512         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3513                 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3514         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3515         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3516                 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3517         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3518         case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3519                 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3520         default:
3521                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3522                               "0x%x\n", signal_levels);
3523                 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3524         }
3525 }
3526
3527 /* Gen7's DP voltage swing and pre-emphasis control */
3528 static uint32_t
3529 gen7_edp_signal_levels(uint8_t train_set)
3530 {
3531         int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3532                                          DP_TRAIN_PRE_EMPHASIS_MASK);
3533         switch (signal_levels) {
3534         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3535                 return EDP_LINK_TRAIN_400MV_0DB_IVB;
3536         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3537                 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3538         case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3539                 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3540
3541         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3542                 return EDP_LINK_TRAIN_600MV_0DB_IVB;
3543         case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3544                 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3545
3546         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3547                 return EDP_LINK_TRAIN_800MV_0DB_IVB;
3548         case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3549                 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3550
3551         default:
3552                 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3553                               "0x%x\n", signal_levels);
3554                 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3555         }
3556 }
3557
3558 void
3559 intel_dp_set_signal_levels(struct intel_dp *intel_dp)
3560 {
3561         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
3562         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3563         enum port port = intel_dig_port->base.port;
3564         uint32_t signal_levels, mask = 0;
3565         uint8_t train_set = intel_dp->train_set[0];
3566
3567         if (IS_GEN9_LP(dev_priv) || IS_CANNONLAKE(dev_priv)) {
3568                 signal_levels = bxt_signal_levels(intel_dp);
3569         } else if (HAS_DDI(dev_priv)) {
3570                 signal_levels = ddi_signal_levels(intel_dp);
3571                 mask = DDI_BUF_EMP_MASK;
3572         } else if (IS_CHERRYVIEW(dev_priv)) {
3573                 signal_levels = chv_signal_levels(intel_dp);
3574         } else if (IS_VALLEYVIEW(dev_priv)) {
3575                 signal_levels = vlv_signal_levels(intel_dp);
3576         } else if (IS_GEN7(dev_priv) && port == PORT_A) {
3577                 signal_levels = gen7_edp_signal_levels(train_set);
3578                 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3579         } else if (IS_GEN6(dev_priv) && port == PORT_A) {
3580                 signal_levels = gen6_edp_signal_levels(train_set);
3581                 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3582         } else {
3583                 signal_levels = gen4_signal_levels(train_set);
3584                 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3585         }
3586
3587         if (mask)
3588                 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3589
3590         DRM_DEBUG_KMS("Using vswing level %d\n",
3591                 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3592         DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3593                 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3594                         DP_TRAIN_PRE_EMPHASIS_SHIFT);
3595
3596         intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels;
3597
3598         I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3599         POSTING_READ(intel_dp->output_reg);
3600 }
3601
3602 void
3603 intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
3604                                        uint8_t dp_train_pat)
3605 {
3606         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3607         struct drm_i915_private *dev_priv =
3608                 to_i915(intel_dig_port->base.base.dev);
3609
3610         _intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat);
3611
3612         I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3613         POSTING_READ(intel_dp->output_reg);
3614 }
3615
3616 void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3617 {
3618         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
3619         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3620         enum port port = intel_dig_port->base.port;
3621         uint32_t val;
3622
3623         if (!HAS_DDI(dev_priv))
3624                 return;
3625
3626         val = I915_READ(DP_TP_CTL(port));
3627         val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3628         val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3629         I915_WRITE(DP_TP_CTL(port), val);
3630
3631         /*
3632          * On PORT_A we can have only eDP in SST mode. There the only reason
3633          * we need to set idle transmission mode is to work around a HW issue
3634          * where we enable the pipe while not in idle link-training mode.
3635          * In this case there is requirement to wait for a minimum number of
3636          * idle patterns to be sent.
3637          */
3638         if (port == PORT_A)
3639                 return;
3640
3641         if (intel_wait_for_register(dev_priv,DP_TP_STATUS(port),
3642                                     DP_TP_STATUS_IDLE_DONE,
3643                                     DP_TP_STATUS_IDLE_DONE,
3644                                     1))
3645                 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3646 }
3647
3648 static void
3649 intel_dp_link_down(struct intel_encoder *encoder,
3650                    const struct intel_crtc_state *old_crtc_state)
3651 {
3652         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3653         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3654         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
3655         enum port port = encoder->port;
3656         uint32_t DP = intel_dp->DP;
3657
3658         if (WARN_ON(HAS_DDI(dev_priv)))
3659                 return;
3660
3661         if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
3662                 return;
3663
3664         DRM_DEBUG_KMS("\n");
3665
3666         if ((IS_GEN7(dev_priv) && port == PORT_A) ||
3667             (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
3668                 DP &= ~DP_LINK_TRAIN_MASK_CPT;
3669                 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
3670         } else {
3671                 if (IS_CHERRYVIEW(dev_priv))
3672                         DP &= ~DP_LINK_TRAIN_MASK_CHV;
3673                 else
3674                         DP &= ~DP_LINK_TRAIN_MASK;
3675                 DP |= DP_LINK_TRAIN_PAT_IDLE;
3676         }
3677         I915_WRITE(intel_dp->output_reg, DP);
3678         POSTING_READ(intel_dp->output_reg);
3679
3680         DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3681         I915_WRITE(intel_dp->output_reg, DP);
3682         POSTING_READ(intel_dp->output_reg);
3683
3684         /*
3685          * HW workaround for IBX, we need to move the port
3686          * to transcoder A after disabling it to allow the
3687          * matching HDMI port to be enabled on transcoder A.
3688          */
3689         if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B && port != PORT_A) {
3690                 /*
3691                  * We get CPU/PCH FIFO underruns on the other pipe when
3692                  * doing the workaround. Sweep them under the rug.
3693                  */
3694                 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3695                 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3696
3697                 /* always enable with pattern 1 (as per spec) */
3698                 DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3699                 DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3700                 I915_WRITE(intel_dp->output_reg, DP);
3701                 POSTING_READ(intel_dp->output_reg);
3702
3703                 DP &= ~DP_PORT_EN;
3704                 I915_WRITE(intel_dp->output_reg, DP);
3705                 POSTING_READ(intel_dp->output_reg);
3706
3707                 intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
3708                 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3709                 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3710         }
3711
3712         msleep(intel_dp->panel_power_down_delay);
3713
3714         intel_dp->DP = DP;
3715
3716         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
3717                 pps_lock(intel_dp);
3718                 intel_dp->active_pipe = INVALID_PIPE;
3719                 pps_unlock(intel_dp);
3720         }
3721 }
3722
3723 bool
3724 intel_dp_read_dpcd(struct intel_dp *intel_dp)
3725 {
3726         if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd,
3727                              sizeof(intel_dp->dpcd)) < 0)
3728                 return false; /* aux transfer failed */
3729
3730         DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
3731
3732         return intel_dp->dpcd[DP_DPCD_REV] != 0;
3733 }
3734
3735 static bool
3736 intel_edp_init_dpcd(struct intel_dp *intel_dp)
3737 {
3738         struct drm_i915_private *dev_priv =
3739                 to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
3740
3741         /* this function is meant to be called only once */
3742         WARN_ON(intel_dp->dpcd[DP_DPCD_REV] != 0);
3743
3744         if (!intel_dp_read_dpcd(intel_dp))
3745                 return false;
3746
3747         drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
3748                          drm_dp_is_branch(intel_dp->dpcd));
3749
3750         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
3751                 dev_priv->no_aux_handshake = intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
3752                         DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
3753
3754         intel_psr_init_dpcd(intel_dp);
3755
3756         /*
3757          * Read the eDP display control registers.
3758          *
3759          * Do this independent of DP_DPCD_DISPLAY_CONTROL_CAPABLE bit in
3760          * DP_EDP_CONFIGURATION_CAP, because some buggy displays do not have it
3761          * set, but require eDP 1.4+ detection (e.g. for supported link rates
3762          * method). The display control registers should read zero if they're
3763          * not supported anyway.
3764          */
3765         if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
3766                              intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
3767                              sizeof(intel_dp->edp_dpcd))
3768                 DRM_DEBUG_KMS("eDP DPCD: %*ph\n", (int) sizeof(intel_dp->edp_dpcd),
3769                               intel_dp->edp_dpcd);
3770
3771         /* Read the eDP 1.4+ supported link rates. */
3772         if (intel_dp->edp_dpcd[0] >= DP_EDP_14) {
3773                 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
3774                 int i;
3775
3776                 drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES,
3777                                 sink_rates, sizeof(sink_rates));
3778
3779                 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3780                         int val = le16_to_cpu(sink_rates[i]);
3781
3782                         if (val == 0)
3783                                 break;
3784
3785                         /* Value read multiplied by 200kHz gives the per-lane
3786                          * link rate in kHz. The source rates are, however,
3787                          * stored in terms of LS_Clk kHz. The full conversion
3788                          * back to symbols is
3789                          * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte)
3790                          */
3791                         intel_dp->sink_rates[i] = (val * 200) / 10;
3792                 }
3793                 intel_dp->num_sink_rates = i;
3794         }
3795
3796         /*
3797          * Use DP_LINK_RATE_SET if DP_SUPPORTED_LINK_RATES are available,
3798          * default to DP_MAX_LINK_RATE and DP_LINK_BW_SET otherwise.
3799          */
3800         if (intel_dp->num_sink_rates)
3801                 intel_dp->use_rate_select = true;
3802         else
3803                 intel_dp_set_sink_rates(intel_dp);
3804
3805         intel_dp_set_common_rates(intel_dp);
3806
3807         return true;
3808 }
3809
3810
3811 static bool
3812 intel_dp_get_dpcd(struct intel_dp *intel_dp)
3813 {
3814         u8 sink_count;
3815
3816         if (!intel_dp_read_dpcd(intel_dp))
3817                 return false;
3818
3819         /* Don't clobber cached eDP rates. */
3820         if (!intel_dp_is_edp(intel_dp)) {
3821                 intel_dp_set_sink_rates(intel_dp);
3822                 intel_dp_set_common_rates(intel_dp);
3823         }
3824
3825         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_SINK_COUNT, &sink_count) <= 0)
3826                 return false;
3827
3828         /*
3829          * Sink count can change between short pulse hpd hence
3830          * a member variable in intel_dp will track any changes
3831          * between short pulse interrupts.
3832          */
3833         intel_dp->sink_count = DP_GET_SINK_COUNT(sink_count);
3834
3835         /*
3836          * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that
3837          * a dongle is present but no display. Unless we require to know
3838          * if a dongle is present or not, we don't need to update
3839          * downstream port information. So, an early return here saves
3840          * time from performing other operations which are not required.
3841          */
3842         if (!intel_dp_is_edp(intel_dp) && !intel_dp->sink_count)
3843                 return false;
3844
3845         if (!drm_dp_is_branch(intel_dp->dpcd))
3846                 return true; /* native DP sink */
3847
3848         if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3849                 return true; /* no per-port downstream info */
3850
3851         if (drm_dp_dpcd_read(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3852                              intel_dp->downstream_ports,
3853                              DP_MAX_DOWNSTREAM_PORTS) < 0)
3854                 return false; /* downstream port status fetch failed */
3855
3856         return true;
3857 }
3858
3859 static bool
3860 intel_dp_can_mst(struct intel_dp *intel_dp)
3861 {
3862         u8 mstm_cap;
3863
3864         if (!i915_modparams.enable_dp_mst)
3865                 return false;
3866
3867         if (!intel_dp->can_mst)
3868                 return false;
3869
3870         if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3871                 return false;
3872
3873         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_MSTM_CAP, &mstm_cap) != 1)
3874                 return false;
3875
3876         return mstm_cap & DP_MST_CAP;
3877 }
3878
3879 static void
3880 intel_dp_configure_mst(struct intel_dp *intel_dp)
3881 {
3882         if (!i915_modparams.enable_dp_mst)
3883                 return;
3884
3885         if (!intel_dp->can_mst)
3886                 return;
3887
3888         intel_dp->is_mst = intel_dp_can_mst(intel_dp);
3889
3890         if (intel_dp->is_mst)
3891                 DRM_DEBUG_KMS("Sink is MST capable\n");
3892         else
3893                 DRM_DEBUG_KMS("Sink is not MST capable\n");
3894
3895         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
3896                                         intel_dp->is_mst);
3897 }
3898
3899 static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp,
3900                                   struct intel_crtc_state *crtc_state, bool disable_wa)
3901 {
3902         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3903         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
3904         struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
3905         u8 buf;
3906         int ret = 0;
3907         int count = 0;
3908         int attempts = 10;
3909
3910         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
3911                 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
3912                 ret = -EIO;
3913                 goto out;
3914         }
3915
3916         if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3917                                buf & ~DP_TEST_SINK_START) < 0) {
3918                 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
3919                 ret = -EIO;
3920                 goto out;
3921         }
3922
3923         do {
3924                 intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
3925
3926                 if (drm_dp_dpcd_readb(&intel_dp->aux,
3927                                       DP_TEST_SINK_MISC, &buf) < 0) {
3928                         ret = -EIO;
3929                         goto out;
3930                 }
3931                 count = buf & DP_TEST_COUNT_MASK;
3932         } while (--attempts && count);
3933
3934         if (attempts == 0) {
3935                 DRM_DEBUG_KMS("TIMEOUT: Sink CRC counter is not zeroed after calculation is stopped\n");
3936                 ret = -ETIMEDOUT;
3937         }
3938
3939  out:
3940         if (disable_wa)
3941                 hsw_enable_ips(crtc_state);
3942         return ret;
3943 }
3944
3945 static int intel_dp_sink_crc_start(struct intel_dp *intel_dp,
3946                                    struct intel_crtc_state *crtc_state)
3947 {
3948         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3949         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
3950         struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
3951         u8 buf;
3952         int ret;
3953
3954         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
3955                 return -EIO;
3956
3957         if (!(buf & DP_TEST_CRC_SUPPORTED))
3958                 return -ENOTTY;
3959
3960         if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3961                 return -EIO;
3962
3963         if (buf & DP_TEST_SINK_START) {
3964                 ret = intel_dp_sink_crc_stop(intel_dp, crtc_state, false);
3965                 if (ret)
3966                         return ret;
3967         }
3968
3969         hsw_disable_ips(crtc_state);
3970
3971         if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3972                                buf | DP_TEST_SINK_START) < 0) {
3973                 hsw_enable_ips(crtc_state);
3974                 return -EIO;
3975         }
3976
3977         intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
3978         return 0;
3979 }
3980
3981 int intel_dp_sink_crc(struct intel_dp *intel_dp, struct intel_crtc_state *crtc_state, u8 *crc)
3982 {
3983         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3984         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
3985         struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
3986         u8 buf;
3987         int count, ret;
3988         int attempts = 6;
3989
3990         ret = intel_dp_sink_crc_start(intel_dp, crtc_state);
3991         if (ret)
3992                 return ret;
3993
3994         do {
3995                 intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
3996
3997                 if (drm_dp_dpcd_readb(&intel_dp->aux,
3998                                       DP_TEST_SINK_MISC, &buf) < 0) {
3999                         ret = -EIO;
4000                         goto stop;
4001                 }
4002                 count = buf & DP_TEST_COUNT_MASK;
4003
4004         } while (--attempts && count == 0);
4005
4006         if (attempts == 0) {
4007                 DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
4008                 ret = -ETIMEDOUT;
4009                 goto stop;
4010         }
4011
4012         if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
4013                 ret = -EIO;
4014                 goto stop;
4015         }
4016
4017 stop:
4018         intel_dp_sink_crc_stop(intel_dp, crtc_state, true);
4019         return ret;
4020 }
4021
4022 static bool
4023 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4024 {
4025         return drm_dp_dpcd_readb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR,
4026                                  sink_irq_vector) == 1;
4027 }
4028
4029 static bool
4030 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4031 {
4032         return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI,
4033                                 sink_irq_vector, DP_DPRX_ESI_LEN) ==
4034                 DP_DPRX_ESI_LEN;
4035 }
4036
4037 static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4038 {
4039         int status = 0;
4040         int test_link_rate;
4041         uint8_t test_lane_count, test_link_bw;
4042         /* (DP CTS 1.2)
4043          * 4.3.1.11
4044          */
4045         /* Read the TEST_LANE_COUNT and TEST_LINK_RTAE fields (DP CTS 3.1.4) */
4046         status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LANE_COUNT,
4047                                    &test_lane_count);
4048
4049         if (status <= 0) {
4050                 DRM_DEBUG_KMS("Lane count read failed\n");
4051                 return DP_TEST_NAK;
4052         }
4053         test_lane_count &= DP_MAX_LANE_COUNT_MASK;
4054
4055         status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE,
4056                                    &test_link_bw);
4057         if (status <= 0) {
4058                 DRM_DEBUG_KMS("Link Rate read failed\n");
4059                 return DP_TEST_NAK;
4060         }
4061         test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw);
4062
4063         /* Validate the requested link rate and lane count */
4064         if (!intel_dp_link_params_valid(intel_dp, test_link_rate,
4065                                         test_lane_count))
4066                 return DP_TEST_NAK;
4067
4068         intel_dp->compliance.test_lane_count = test_lane_count;
4069         intel_dp->compliance.test_link_rate = test_link_rate;
4070
4071         return DP_TEST_ACK;
4072 }
4073
4074 static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4075 {
4076         uint8_t test_pattern;
4077         uint8_t test_misc;
4078         __be16 h_width, v_height;
4079         int status = 0;
4080
4081         /* Read the TEST_PATTERN (DP CTS 3.1.5) */
4082         status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_PATTERN,
4083                                    &test_pattern);
4084         if (status <= 0) {
4085                 DRM_DEBUG_KMS("Test pattern read failed\n");
4086                 return DP_TEST_NAK;
4087         }
4088         if (test_pattern != DP_COLOR_RAMP)
4089                 return DP_TEST_NAK;
4090
4091         status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_H_WIDTH_HI,
4092                                   &h_width, 2);
4093         if (status <= 0) {
4094                 DRM_DEBUG_KMS("H Width read failed\n");
4095                 return DP_TEST_NAK;
4096         }
4097
4098         status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_V_HEIGHT_HI,
4099                                   &v_height, 2);
4100         if (status <= 0) {
4101                 DRM_DEBUG_KMS("V Height read failed\n");
4102                 return DP_TEST_NAK;
4103         }
4104
4105         status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_MISC0,
4106                                    &test_misc);
4107         if (status <= 0) {
4108                 DRM_DEBUG_KMS("TEST MISC read failed\n");
4109                 return DP_TEST_NAK;
4110         }
4111         if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB)
4112                 return DP_TEST_NAK;
4113         if (test_misc & DP_TEST_DYNAMIC_RANGE_CEA)
4114                 return DP_TEST_NAK;
4115         switch (test_misc & DP_TEST_BIT_DEPTH_MASK) {
4116         case DP_TEST_BIT_DEPTH_6:
4117                 intel_dp->compliance.test_data.bpc = 6;
4118                 break;
4119         case DP_TEST_BIT_DEPTH_8:
4120                 intel_dp->compliance.test_data.bpc = 8;
4121                 break;
4122         default:
4123                 return DP_TEST_NAK;
4124         }
4125
4126         intel_dp->compliance.test_data.video_pattern = test_pattern;
4127         intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width);
4128         intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height);
4129         /* Set test active flag here so userspace doesn't interrupt things */
4130         intel_dp->compliance.test_active = 1;
4131
4132         return DP_TEST_ACK;
4133 }
4134
4135 static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
4136 {
4137         uint8_t test_result = DP_TEST_ACK;
4138         struct intel_connector *intel_connector = intel_dp->attached_connector;
4139         struct drm_connector *connector = &intel_connector->base;
4140
4141         if (intel_connector->detect_edid == NULL ||
4142             connector->edid_corrupt ||
4143             intel_dp->aux.i2c_defer_count > 6) {
4144                 /* Check EDID read for NACKs, DEFERs and corruption
4145                  * (DP CTS 1.2 Core r1.1)
4146                  *    4.2.2.4 : Failed EDID read, I2C_NAK
4147                  *    4.2.2.5 : Failed EDID read, I2C_DEFER
4148                  *    4.2.2.6 : EDID corruption detected
4149                  * Use failsafe mode for all cases
4150                  */
4151                 if (intel_dp->aux.i2c_nack_count > 0 ||
4152                         intel_dp->aux.i2c_defer_count > 0)
4153                         DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4154                                       intel_dp->aux.i2c_nack_count,
4155                                       intel_dp->aux.i2c_defer_count);
4156                 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE;
4157         } else {
4158                 struct edid *block = intel_connector->detect_edid;
4159
4160                 /* We have to write the checksum
4161                  * of the last block read
4162                  */
4163                 block += intel_connector->detect_edid->extensions;
4164
4165                 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_EDID_CHECKSUM,
4166                                        block->checksum) <= 0)
4167                         DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4168
4169                 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4170                 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED;
4171         }
4172
4173         /* Set test active flag here so userspace doesn't interrupt things */
4174         intel_dp->compliance.test_active = 1;
4175
4176         return test_result;
4177 }
4178
4179 static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4180 {
4181         uint8_t test_result = DP_TEST_NAK;
4182         return test_result;
4183 }
4184
4185 static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4186 {
4187         uint8_t response = DP_TEST_NAK;
4188         uint8_t request = 0;
4189         int status;
4190
4191         status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request);
4192         if (status <= 0) {
4193                 DRM_DEBUG_KMS("Could not read test request from sink\n");
4194                 goto update_status;
4195         }
4196
4197         switch (request) {
4198         case DP_TEST_LINK_TRAINING:
4199                 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4200                 response = intel_dp_autotest_link_training(intel_dp);
4201                 break;
4202         case DP_TEST_LINK_VIDEO_PATTERN:
4203                 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4204                 response = intel_dp_autotest_video_pattern(intel_dp);
4205                 break;
4206         case DP_TEST_LINK_EDID_READ:
4207                 DRM_DEBUG_KMS("EDID test requested\n");
4208                 response = intel_dp_autotest_edid(intel_dp);
4209                 break;
4210         case DP_TEST_LINK_PHY_TEST_PATTERN:
4211                 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4212                 response = intel_dp_autotest_phy_pattern(intel_dp);
4213                 break;
4214         default:
4215                 DRM_DEBUG_KMS("Invalid test request '%02x'\n", request);
4216                 break;
4217         }
4218
4219         if (response & DP_TEST_ACK)
4220                 intel_dp->compliance.test_type = request;
4221
4222 update_status:
4223         status = drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, response);
4224         if (status <= 0)
4225                 DRM_DEBUG_KMS("Could not write test response to sink\n");
4226 }
4227
4228 static int
4229 intel_dp_check_mst_status(struct intel_dp *intel_dp)
4230 {
4231         bool bret;
4232
4233         if (intel_dp->is_mst) {
4234                 u8 esi[DP_DPRX_ESI_LEN] = { 0 };
4235                 int ret = 0;
4236                 int retry;
4237                 bool handled;
4238                 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4239 go_again:
4240                 if (bret == true) {
4241
4242                         /* check link status - esi[10] = 0x200c */
4243                         if (intel_dp->active_mst_links &&
4244                             !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
4245                                 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4246                                 intel_dp_start_link_train(intel_dp);
4247                                 intel_dp_stop_link_train(intel_dp);
4248                         }
4249
4250                         DRM_DEBUG_KMS("got esi %3ph\n", esi);
4251                         ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4252
4253                         if (handled) {
4254                                 for (retry = 0; retry < 3; retry++) {
4255                                         int wret;
4256                                         wret = drm_dp_dpcd_write(&intel_dp->aux,
4257                                                                  DP_SINK_COUNT_ESI+1,
4258                                                                  &esi[1], 3);
4259                                         if (wret == 3) {
4260                                                 break;
4261                                         }
4262                                 }
4263
4264                                 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4265                                 if (bret == true) {
4266                                         DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
4267                                         goto go_again;
4268                                 }
4269                         } else
4270                                 ret = 0;
4271
4272                         return ret;
4273                 } else {
4274                         struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4275                         DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4276                         intel_dp->is_mst = false;
4277                         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4278                         /* send a hotplug event */
4279                         drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4280                 }
4281         }
4282         return -EINVAL;
4283 }
4284
4285 static void
4286 intel_dp_retrain_link(struct intel_dp *intel_dp)
4287 {
4288         struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
4289         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4290         struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
4291
4292         /* Suppress underruns caused by re-training */
4293         intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
4294         if (crtc->config->has_pch_encoder)
4295                 intel_set_pch_fifo_underrun_reporting(dev_priv,
4296                                                       intel_crtc_pch_transcoder(crtc), false);
4297
4298         intel_dp_start_link_train(intel_dp);
4299         intel_dp_stop_link_train(intel_dp);
4300
4301         /* Keep underrun reporting disabled until things are stable */
4302         intel_wait_for_vblank(dev_priv, crtc->pipe);
4303
4304         intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
4305         if (crtc->config->has_pch_encoder)
4306                 intel_set_pch_fifo_underrun_reporting(dev_priv,
4307                                                       intel_crtc_pch_transcoder(crtc), true);
4308 }
4309
4310 static void
4311 intel_dp_check_link_status(struct intel_dp *intel_dp)
4312 {
4313         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
4314         struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4315         struct drm_connector_state *conn_state =
4316                 intel_dp->attached_connector->base.state;
4317         u8 link_status[DP_LINK_STATUS_SIZE];
4318
4319         WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
4320
4321         if (!intel_dp_get_link_status(intel_dp, link_status)) {
4322                 DRM_ERROR("Failed to get link status\n");
4323                 return;
4324         }
4325
4326         if (!conn_state->crtc)
4327                 return;
4328
4329         WARN_ON(!drm_modeset_is_locked(&conn_state->crtc->mutex));
4330
4331         if (!conn_state->crtc->state->active)
4332                 return;
4333
4334         if (conn_state->commit &&
4335             !try_wait_for_completion(&conn_state->commit->hw_done))
4336                 return;
4337
4338         /*
4339          * Validate the cached values of intel_dp->link_rate and
4340          * intel_dp->lane_count before attempting to retrain.
4341          */
4342         if (!intel_dp_link_params_valid(intel_dp, intel_dp->link_rate,
4343                                         intel_dp->lane_count))
4344                 return;
4345
4346         /* Retrain if Channel EQ or CR not ok */
4347         if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
4348                 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
4349                               intel_encoder->base.name);
4350
4351                 intel_dp_retrain_link(intel_dp);
4352         }
4353 }
4354
4355 /*
4356  * According to DP spec
4357  * 5.1.2:
4358  *  1. Read DPCD
4359  *  2. Configure link according to Receiver Capabilities
4360  *  3. Use Link Training from 2.5.3.3 and 3.5.1.3
4361  *  4. Check link status on receipt of hot-plug interrupt
4362  *
4363  * intel_dp_short_pulse -  handles short pulse interrupts
4364  * when full detection is not required.
4365  * Returns %true if short pulse is handled and full detection
4366  * is NOT required and %false otherwise.
4367  */
4368 static bool
4369 intel_dp_short_pulse(struct intel_dp *intel_dp)
4370 {
4371         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
4372         u8 sink_irq_vector = 0;
4373         u8 old_sink_count = intel_dp->sink_count;
4374         bool ret;
4375
4376         /*
4377          * Clearing compliance test variables to allow capturing
4378          * of values for next automated test request.
4379          */
4380         memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
4381
4382         /*
4383          * Now read the DPCD to see if it's actually running
4384          * If the current value of sink count doesn't match with
4385          * the value that was stored earlier or dpcd read failed
4386          * we need to do full detection
4387          */
4388         ret = intel_dp_get_dpcd(intel_dp);
4389
4390         if ((old_sink_count != intel_dp->sink_count) || !ret) {
4391                 /* No need to proceed if we are going to do full detect */
4392                 return false;
4393         }
4394
4395         /* Try to read the source of the interrupt */
4396         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4397             intel_dp_get_sink_irq(intel_dp, &sink_irq_vector) &&
4398             sink_irq_vector != 0) {
4399                 /* Clear interrupt source */
4400                 drm_dp_dpcd_writeb(&intel_dp->aux,
4401                                    DP_DEVICE_SERVICE_IRQ_VECTOR,
4402                                    sink_irq_vector);
4403
4404                 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4405                         intel_dp_handle_test_request(intel_dp);
4406                 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4407                         DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4408         }
4409
4410         intel_dp_check_link_status(intel_dp);
4411
4412         if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
4413                 DRM_DEBUG_KMS("Link Training Compliance Test requested\n");
4414                 /* Send a Hotplug Uevent to userspace to start modeset */
4415                 drm_kms_helper_hotplug_event(&dev_priv->drm);
4416         }
4417
4418         return true;
4419 }
4420
4421 /* XXX this is probably wrong for multiple downstream ports */
4422 static enum drm_connector_status
4423 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4424 {
4425         struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
4426         uint8_t *dpcd = intel_dp->dpcd;
4427         uint8_t type;
4428
4429         if (lspcon->active)
4430                 lspcon_resume(lspcon);
4431
4432         if (!intel_dp_get_dpcd(intel_dp))
4433                 return connector_status_disconnected;
4434
4435         if (intel_dp_is_edp(intel_dp))
4436                 return connector_status_connected;
4437
4438         /* if there's no downstream port, we're done */
4439         if (!drm_dp_is_branch(dpcd))
4440                 return connector_status_connected;
4441
4442         /* If we're HPD-aware, SINK_COUNT changes dynamically */
4443         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4444             intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
4445
4446                 return intel_dp->sink_count ?
4447                 connector_status_connected : connector_status_disconnected;
4448         }
4449
4450         if (intel_dp_can_mst(intel_dp))
4451                 return connector_status_connected;
4452
4453         /* If no HPD, poke DDC gently */
4454         if (drm_probe_ddc(&intel_dp->aux.ddc))
4455                 return connector_status_connected;
4456
4457         /* Well we tried, say unknown for unreliable port types */
4458         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4459                 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4460                 if (type == DP_DS_PORT_TYPE_VGA ||
4461                     type == DP_DS_PORT_TYPE_NON_EDID)
4462                         return connector_status_unknown;
4463         } else {
4464                 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4465                         DP_DWN_STRM_PORT_TYPE_MASK;
4466                 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4467                     type == DP_DWN_STRM_PORT_TYPE_OTHER)
4468                         return connector_status_unknown;
4469         }
4470
4471         /* Anything else is out of spec, warn and ignore */
4472         DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4473         return connector_status_disconnected;
4474 }
4475
4476 static enum drm_connector_status
4477 edp_detect(struct intel_dp *intel_dp)
4478 {
4479         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
4480         enum drm_connector_status status;
4481
4482         status = intel_panel_detect(dev_priv);
4483         if (status == connector_status_unknown)
4484                 status = connector_status_connected;
4485
4486         return status;
4487 }
4488
4489 static bool ibx_digital_port_connected(struct intel_encoder *encoder)
4490 {
4491         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4492         u32 bit;
4493
4494         switch (encoder->hpd_pin) {
4495         case HPD_PORT_B:
4496                 bit = SDE_PORTB_HOTPLUG;
4497                 break;
4498         case HPD_PORT_C:
4499                 bit = SDE_PORTC_HOTPLUG;
4500                 break;
4501         case HPD_PORT_D:
4502                 bit = SDE_PORTD_HOTPLUG;
4503                 break;
4504         default:
4505                 MISSING_CASE(encoder->hpd_pin);
4506                 return false;
4507         }
4508
4509         return I915_READ(SDEISR) & bit;
4510 }
4511
4512 static bool cpt_digital_port_connected(struct intel_encoder *encoder)
4513 {
4514         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4515         u32 bit;
4516
4517         switch (encoder->hpd_pin) {
4518         case HPD_PORT_B:
4519                 bit = SDE_PORTB_HOTPLUG_CPT;
4520                 break;
4521         case HPD_PORT_C:
4522                 bit = SDE_PORTC_HOTPLUG_CPT;
4523                 break;
4524         case HPD_PORT_D:
4525                 bit = SDE_PORTD_HOTPLUG_CPT;
4526                 break;
4527         default:
4528                 MISSING_CASE(encoder->hpd_pin);
4529                 return false;
4530         }
4531
4532         return I915_READ(SDEISR) & bit;
4533 }
4534
4535 static bool spt_digital_port_connected(struct intel_encoder *encoder)
4536 {
4537         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4538         u32 bit;
4539
4540         switch (encoder->hpd_pin) {
4541         case HPD_PORT_A:
4542                 bit = SDE_PORTA_HOTPLUG_SPT;
4543                 break;
4544         case HPD_PORT_E:
4545                 bit = SDE_PORTE_HOTPLUG_SPT;
4546                 break;
4547         default:
4548                 return cpt_digital_port_connected(encoder);
4549         }
4550
4551         return I915_READ(SDEISR) & bit;
4552 }
4553
4554 static bool g4x_digital_port_connected(struct intel_encoder *encoder)
4555 {
4556         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4557         u32 bit;
4558
4559         switch (encoder->hpd_pin) {
4560         case HPD_PORT_B:
4561                 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4562                 break;
4563         case HPD_PORT_C:
4564                 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4565                 break;
4566         case HPD_PORT_D:
4567                 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4568                 break;
4569         default:
4570                 MISSING_CASE(encoder->hpd_pin);
4571                 return false;
4572         }
4573
4574         return I915_READ(PORT_HOTPLUG_STAT) & bit;
4575 }
4576
4577 static bool gm45_digital_port_connected(struct intel_encoder *encoder)
4578 {
4579         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4580         u32 bit;
4581
4582         switch (encoder->hpd_pin) {
4583         case HPD_PORT_B:
4584                 bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
4585                 break;
4586         case HPD_PORT_C:
4587                 bit = PORTC_HOTPLUG_LIVE_STATUS_GM45;
4588                 break;
4589         case HPD_PORT_D:
4590                 bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
4591                 break;
4592         default:
4593                 MISSING_CASE(encoder->hpd_pin);
4594                 return false;
4595         }
4596
4597         return I915_READ(PORT_HOTPLUG_STAT) & bit;
4598 }
4599
4600 static bool ilk_digital_port_connected(struct intel_encoder *encoder)
4601 {
4602         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4603
4604         if (encoder->hpd_pin == HPD_PORT_A)
4605                 return I915_READ(DEISR) & DE_DP_A_HOTPLUG;
4606         else
4607                 return ibx_digital_port_connected(encoder);
4608 }
4609
4610 static bool snb_digital_port_connected(struct intel_encoder *encoder)
4611 {
4612         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4613
4614         if (encoder->hpd_pin == HPD_PORT_A)
4615                 return I915_READ(DEISR) & DE_DP_A_HOTPLUG;
4616         else
4617                 return cpt_digital_port_connected(encoder);
4618 }
4619
4620 static bool ivb_digital_port_connected(struct intel_encoder *encoder)
4621 {
4622         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4623
4624         if (encoder->hpd_pin == HPD_PORT_A)
4625                 return I915_READ(DEISR) & DE_DP_A_HOTPLUG_IVB;
4626         else
4627                 return cpt_digital_port_connected(encoder);
4628 }
4629
4630 static bool bdw_digital_port_connected(struct intel_encoder *encoder)
4631 {
4632         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4633
4634         if (encoder->hpd_pin == HPD_PORT_A)
4635                 return I915_READ(GEN8_DE_PORT_ISR) & GEN8_PORT_DP_A_HOTPLUG;
4636         else
4637                 return cpt_digital_port_connected(encoder);
4638 }
4639
4640 static bool bxt_digital_port_connected(struct intel_encoder *encoder)
4641 {
4642         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4643         u32 bit;
4644
4645         switch (encoder->hpd_pin) {
4646         case HPD_PORT_A:
4647                 bit = BXT_DE_PORT_HP_DDIA;
4648                 break;
4649         case HPD_PORT_B:
4650                 bit = BXT_DE_PORT_HP_DDIB;
4651                 break;
4652         case HPD_PORT_C:
4653                 bit = BXT_DE_PORT_HP_DDIC;
4654                 break;
4655         default:
4656                 MISSING_CASE(encoder->hpd_pin);
4657                 return false;
4658         }
4659
4660         return I915_READ(GEN8_DE_PORT_ISR) & bit;
4661 }
4662
4663 /*
4664  * intel_digital_port_connected - is the specified port connected?
4665  * @encoder: intel_encoder
4666  *
4667  * Return %true if port is connected, %false otherwise.
4668  */
4669 bool intel_digital_port_connected(struct intel_encoder *encoder)
4670 {
4671         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4672
4673         if (HAS_GMCH_DISPLAY(dev_priv)) {
4674                 if (IS_GM45(dev_priv))
4675                         return gm45_digital_port_connected(encoder);
4676                 else
4677                         return g4x_digital_port_connected(encoder);
4678         }
4679
4680         if (IS_GEN5(dev_priv))
4681                 return ilk_digital_port_connected(encoder);
4682         else if (IS_GEN6(dev_priv))
4683                 return snb_digital_port_connected(encoder);
4684         else if (IS_GEN7(dev_priv))
4685                 return ivb_digital_port_connected(encoder);
4686         else if (IS_GEN8(dev_priv))
4687                 return bdw_digital_port_connected(encoder);
4688         else if (IS_GEN9_LP(dev_priv))
4689                 return bxt_digital_port_connected(encoder);
4690         else
4691                 return spt_digital_port_connected(encoder);
4692 }
4693
4694 static struct edid *
4695 intel_dp_get_edid(struct intel_dp *intel_dp)
4696 {
4697         struct intel_connector *intel_connector = intel_dp->attached_connector;
4698
4699         /* use cached edid if we have one */
4700         if (intel_connector->edid) {
4701                 /* invalid edid */
4702                 if (IS_ERR(intel_connector->edid))
4703                         return NULL;
4704
4705                 return drm_edid_duplicate(intel_connector->edid);
4706         } else
4707                 return drm_get_edid(&intel_connector->base,
4708                                     &intel_dp->aux.ddc);
4709 }
4710
4711 static void
4712 intel_dp_set_edid(struct intel_dp *intel_dp)
4713 {
4714         struct intel_connector *intel_connector = intel_dp->attached_connector;
4715         struct edid *edid;
4716
4717         intel_dp_unset_edid(intel_dp);
4718         edid = intel_dp_get_edid(intel_dp);
4719         intel_connector->detect_edid = edid;
4720
4721         intel_dp->has_audio = drm_detect_monitor_audio(edid);
4722 }
4723
4724 static void
4725 intel_dp_unset_edid(struct intel_dp *intel_dp)
4726 {
4727         struct intel_connector *intel_connector = intel_dp->attached_connector;
4728
4729         kfree(intel_connector->detect_edid);
4730         intel_connector->detect_edid = NULL;
4731
4732         intel_dp->has_audio = false;
4733 }
4734
4735 static int
4736 intel_dp_long_pulse(struct intel_connector *connector)
4737 {
4738         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
4739         struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
4740         enum drm_connector_status status;
4741         u8 sink_irq_vector = 0;
4742
4743         WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
4744
4745         intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
4746
4747         /* Can't disconnect eDP, but you can close the lid... */
4748         if (intel_dp_is_edp(intel_dp))
4749                 status = edp_detect(intel_dp);
4750         else if (intel_digital_port_connected(&dp_to_dig_port(intel_dp)->base))
4751                 status = intel_dp_detect_dpcd(intel_dp);
4752         else
4753                 status = connector_status_disconnected;
4754
4755         if (status == connector_status_disconnected) {
4756                 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
4757
4758                 if (intel_dp->is_mst) {
4759                         DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
4760                                       intel_dp->is_mst,
4761                                       intel_dp->mst_mgr.mst_state);
4762                         intel_dp->is_mst = false;
4763                         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
4764                                                         intel_dp->is_mst);
4765                 }
4766
4767                 goto out;
4768         }
4769
4770         if (intel_dp->reset_link_params) {
4771                 /* Initial max link lane count */
4772                 intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp);
4773
4774                 /* Initial max link rate */
4775                 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
4776
4777                 intel_dp->reset_link_params = false;
4778         }
4779
4780         intel_dp_print_rates(intel_dp);
4781
4782         drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
4783                          drm_dp_is_branch(intel_dp->dpcd));
4784
4785         intel_dp_configure_mst(intel_dp);
4786
4787         if (intel_dp->is_mst) {
4788                 /*
4789                  * If we are in MST mode then this connector
4790                  * won't appear connected or have anything
4791                  * with EDID on it
4792                  */
4793                 status = connector_status_disconnected;
4794                 goto out;
4795         } else {
4796                 /*
4797                  * If display is now connected check links status,
4798                  * there has been known issues of link loss triggerring
4799                  * long pulse.
4800                  *
4801                  * Some sinks (eg. ASUS PB287Q) seem to perform some
4802                  * weird HPD ping pong during modesets. So we can apparently
4803                  * end up with HPD going low during a modeset, and then
4804                  * going back up soon after. And once that happens we must
4805                  * retrain the link to get a picture. That's in case no
4806                  * userspace component reacted to intermittent HPD dip.
4807                  */
4808                 intel_dp_check_link_status(intel_dp);
4809         }
4810
4811         /*
4812          * Clearing NACK and defer counts to get their exact values
4813          * while reading EDID which are required by Compliance tests
4814          * 4.2.2.4 and 4.2.2.5
4815          */
4816         intel_dp->aux.i2c_nack_count = 0;
4817         intel_dp->aux.i2c_defer_count = 0;
4818
4819         intel_dp_set_edid(intel_dp);
4820         if (intel_dp_is_edp(intel_dp) || connector->detect_edid)
4821                 status = connector_status_connected;
4822         intel_dp->detect_done = true;
4823
4824         /* Try to read the source of the interrupt */
4825         if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4826             intel_dp_get_sink_irq(intel_dp, &sink_irq_vector) &&
4827             sink_irq_vector != 0) {
4828                 /* Clear interrupt source */
4829                 drm_dp_dpcd_writeb(&intel_dp->aux,
4830                                    DP_DEVICE_SERVICE_IRQ_VECTOR,
4831                                    sink_irq_vector);
4832
4833                 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4834                         intel_dp_handle_test_request(intel_dp);
4835                 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4836                         DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4837         }
4838
4839 out:
4840         if (status != connector_status_connected && !intel_dp->is_mst)
4841                 intel_dp_unset_edid(intel_dp);
4842
4843         intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
4844         return status;
4845 }
4846
4847 static int
4848 intel_dp_detect(struct drm_connector *connector,
4849                 struct drm_modeset_acquire_ctx *ctx,
4850                 bool force)
4851 {
4852         struct intel_dp *intel_dp = intel_attached_dp(connector);
4853         int status = connector->status;
4854
4855         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4856                       connector->base.id, connector->name);
4857
4858         /* If full detect is not performed yet, do a full detect */
4859         if (!intel_dp->detect_done) {
4860                 struct drm_crtc *crtc;
4861                 int ret;
4862
4863                 crtc = connector->state->crtc;
4864                 if (crtc) {
4865                         ret = drm_modeset_lock(&crtc->mutex, ctx);
4866                         if (ret)
4867                                 return ret;
4868                 }
4869
4870                 status = intel_dp_long_pulse(intel_dp->attached_connector);
4871         }
4872
4873         intel_dp->detect_done = false;
4874
4875         return status;
4876 }
4877
4878 static void
4879 intel_dp_force(struct drm_connector *connector)
4880 {
4881         struct intel_dp *intel_dp = intel_attached_dp(connector);
4882         struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4883         struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
4884
4885         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4886                       connector->base.id, connector->name);
4887         intel_dp_unset_edid(intel_dp);
4888
4889         if (connector->status != connector_status_connected)
4890                 return;
4891
4892         intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
4893
4894         intel_dp_set_edid(intel_dp);
4895
4896         intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
4897 }
4898
4899 static int intel_dp_get_modes(struct drm_connector *connector)
4900 {
4901         struct intel_connector *intel_connector = to_intel_connector(connector);
4902         struct edid *edid;
4903
4904         edid = intel_connector->detect_edid;
4905         if (edid) {
4906                 int ret = intel_connector_update_modes(connector, edid);
4907                 if (ret)
4908                         return ret;
4909         }
4910
4911         /* if eDP has no EDID, fall back to fixed mode */
4912         if (intel_dp_is_edp(intel_attached_dp(connector)) &&
4913             intel_connector->panel.fixed_mode) {
4914                 struct drm_display_mode *mode;
4915
4916                 mode = drm_mode_duplicate(connector->dev,
4917                                           intel_connector->panel.fixed_mode);
4918                 if (mode) {
4919                         drm_mode_probed_add(connector, mode);
4920                         return 1;
4921                 }
4922         }
4923
4924         return 0;
4925 }
4926
4927 static int
4928 intel_dp_connector_register(struct drm_connector *connector)
4929 {
4930         struct intel_dp *intel_dp = intel_attached_dp(connector);
4931         int ret;
4932
4933         ret = intel_connector_register(connector);
4934         if (ret)
4935                 return ret;
4936
4937         i915_debugfs_connector_add(connector);
4938
4939         DRM_DEBUG_KMS("registering %s bus for %s\n",
4940                       intel_dp->aux.name, connector->kdev->kobj.name);
4941
4942         intel_dp->aux.dev = connector->kdev;
4943         return drm_dp_aux_register(&intel_dp->aux);
4944 }
4945
4946 static void
4947 intel_dp_connector_unregister(struct drm_connector *connector)
4948 {
4949         drm_dp_aux_unregister(&intel_attached_dp(connector)->aux);
4950         intel_connector_unregister(connector);
4951 }
4952
4953 static void
4954 intel_dp_connector_destroy(struct drm_connector *connector)
4955 {
4956         struct intel_connector *intel_connector = to_intel_connector(connector);
4957
4958         kfree(intel_connector->detect_edid);
4959
4960         if (!IS_ERR_OR_NULL(intel_connector->edid))
4961                 kfree(intel_connector->edid);
4962
4963         /*
4964          * Can't call intel_dp_is_edp() since the encoder may have been
4965          * destroyed already.
4966          */
4967         if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4968                 intel_panel_fini(&intel_connector->panel);
4969
4970         drm_connector_cleanup(connector);
4971         kfree(connector);
4972 }
4973
4974 void intel_dp_encoder_destroy(struct drm_encoder *encoder)
4975 {
4976         struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4977         struct intel_dp *intel_dp = &intel_dig_port->dp;
4978
4979         intel_dp_mst_encoder_cleanup(intel_dig_port);
4980         if (intel_dp_is_edp(intel_dp)) {
4981                 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4982                 /*
4983                  * vdd might still be enabled do to the delayed vdd off.
4984                  * Make sure vdd is actually turned off here.
4985                  */
4986                 pps_lock(intel_dp);
4987                 edp_panel_vdd_off_sync(intel_dp);
4988                 pps_unlock(intel_dp);
4989
4990                 if (intel_dp->edp_notifier.notifier_call) {
4991                         unregister_reboot_notifier(&intel_dp->edp_notifier);
4992                         intel_dp->edp_notifier.notifier_call = NULL;
4993                 }
4994         }
4995
4996         intel_dp_aux_fini(intel_dp);
4997
4998         drm_encoder_cleanup(encoder);
4999         kfree(intel_dig_port);
5000 }
5001
5002 void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
5003 {
5004         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
5005
5006         if (!intel_dp_is_edp(intel_dp))
5007                 return;
5008
5009         /*
5010          * vdd might still be enabled do to the delayed vdd off.
5011          * Make sure vdd is actually turned off here.
5012          */
5013         cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5014         pps_lock(intel_dp);
5015         edp_panel_vdd_off_sync(intel_dp);
5016         pps_unlock(intel_dp);
5017 }
5018
5019 static
5020 int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
5021                                 u8 *an)
5022 {
5023         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_dig_port->base.base);
5024         uint8_t txbuf[4], rxbuf[2], reply = 0;
5025         ssize_t dpcd_ret;
5026         int ret;
5027
5028         /* Output An first, that's easy */
5029         dpcd_ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux, DP_AUX_HDCP_AN,
5030                                      an, DRM_HDCP_AN_LEN);
5031         if (dpcd_ret != DRM_HDCP_AN_LEN) {
5032                 DRM_ERROR("Failed to write An over DP/AUX (%zd)\n", dpcd_ret);
5033                 return dpcd_ret >= 0 ? -EIO : dpcd_ret;
5034         }
5035
5036         /*
5037          * Since Aksv is Oh-So-Secret, we can't access it in software. So in
5038          * order to get it on the wire, we need to create the AUX header as if
5039          * we were writing the data, and then tickle the hardware to output the
5040          * data once the header is sent out.
5041          */
5042         txbuf[0] = (DP_AUX_NATIVE_WRITE << 4) |
5043                    ((DP_AUX_HDCP_AKSV >> 16) & 0xf);
5044         txbuf[1] = (DP_AUX_HDCP_AKSV >> 8) & 0xff;
5045         txbuf[2] = DP_AUX_HDCP_AKSV & 0xff;
5046         txbuf[3] = DRM_HDCP_KSV_LEN - 1;
5047
5048         ret = intel_dp_aux_ch(intel_dp, txbuf, sizeof(txbuf), rxbuf,
5049                               sizeof(rxbuf), true);
5050         if (ret < 0) {
5051                 DRM_ERROR("Write Aksv over DP/AUX failed (%d)\n", ret);
5052                 return ret;
5053         } else if (ret == 0) {
5054                 DRM_ERROR("Aksv write over DP/AUX was empty\n");
5055                 return -EIO;
5056         }
5057
5058         reply = (rxbuf[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK;
5059         return reply == DP_AUX_NATIVE_REPLY_ACK ? 0 : -EIO;
5060 }
5061
5062 static int intel_dp_hdcp_read_bksv(struct intel_digital_port *intel_dig_port,
5063                                    u8 *bksv)
5064 {
5065         ssize_t ret;
5066         ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BKSV, bksv,
5067                                DRM_HDCP_KSV_LEN);
5068         if (ret != DRM_HDCP_KSV_LEN) {
5069                 DRM_ERROR("Read Bksv from DP/AUX failed (%zd)\n", ret);
5070                 return ret >= 0 ? -EIO : ret;
5071         }
5072         return 0;
5073 }
5074
5075 static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port,
5076                                       u8 *bstatus)
5077 {
5078         ssize_t ret;
5079         /*
5080          * For some reason the HDMI and DP HDCP specs call this register
5081          * definition by different names. In the HDMI spec, it's called BSTATUS,
5082          * but in DP it's called BINFO.
5083          */
5084         ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BINFO,
5085                                bstatus, DRM_HDCP_BSTATUS_LEN);
5086         if (ret != DRM_HDCP_BSTATUS_LEN) {
5087                 DRM_ERROR("Read bstatus from DP/AUX failed (%zd)\n", ret);
5088                 return ret >= 0 ? -EIO : ret;
5089         }
5090         return 0;
5091 }
5092
5093 static
5094 int intel_dp_hdcp_read_bcaps(struct intel_digital_port *intel_dig_port,
5095                              u8 *bcaps)
5096 {
5097         ssize_t ret;
5098
5099         ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BCAPS,
5100                                bcaps, 1);
5101         if (ret != 1) {
5102                 DRM_ERROR("Read bcaps from DP/AUX failed (%zd)\n", ret);
5103                 return ret >= 0 ? -EIO : ret;
5104         }
5105
5106         return 0;
5107 }
5108
5109 static
5110 int intel_dp_hdcp_repeater_present(struct intel_digital_port *intel_dig_port,
5111                                    bool *repeater_present)
5112 {
5113         ssize_t ret;
5114         u8 bcaps;
5115
5116         ret = intel_dp_hdcp_read_bcaps(intel_dig_port, &bcaps);
5117         if (ret)
5118                 return ret;
5119
5120         *repeater_present = bcaps & DP_BCAPS_REPEATER_PRESENT;
5121         return 0;
5122 }
5123
5124 static
5125 int intel_dp_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port,
5126                                 u8 *ri_prime)
5127 {
5128         ssize_t ret;
5129         ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_RI_PRIME,
5130                                ri_prime, DRM_HDCP_RI_LEN);
5131         if (ret != DRM_HDCP_RI_LEN) {
5132                 DRM_ERROR("Read Ri' from DP/AUX failed (%zd)\n", ret);
5133                 return ret >= 0 ? -EIO : ret;
5134         }
5135         return 0;
5136 }
5137
5138 static
5139 int intel_dp_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port,
5140                                  bool *ksv_ready)
5141 {
5142         ssize_t ret;
5143         u8 bstatus;
5144         ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
5145                                &bstatus, 1);
5146         if (ret != 1) {
5147                 DRM_ERROR("Read bstatus from DP/AUX failed (%zd)\n", ret);
5148                 return ret >= 0 ? -EIO : ret;
5149         }
5150         *ksv_ready = bstatus & DP_BSTATUS_READY;
5151         return 0;
5152 }
5153
5154 static
5155 int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port,
5156                                 int num_downstream, u8 *ksv_fifo)
5157 {
5158         ssize_t ret;
5159         int i;
5160
5161         /* KSV list is read via 15 byte window (3 entries @ 5 bytes each) */
5162         for (i = 0; i < num_downstream; i += 3) {
5163                 size_t len = min(num_downstream - i, 3) * DRM_HDCP_KSV_LEN;
5164                 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
5165                                        DP_AUX_HDCP_KSV_FIFO,
5166                                        ksv_fifo + i * DRM_HDCP_KSV_LEN,
5167                                        len);
5168                 if (ret != len) {
5169                         DRM_ERROR("Read ksv[%d] from DP/AUX failed (%zd)\n", i,
5170                                   ret);
5171                         return ret >= 0 ? -EIO : ret;
5172                 }
5173         }
5174         return 0;
5175 }
5176
5177 static
5178 int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port,
5179                                     int i, u32 *part)
5180 {
5181         ssize_t ret;
5182
5183         if (i >= DRM_HDCP_V_PRIME_NUM_PARTS)
5184                 return -EINVAL;
5185
5186         ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
5187                                DP_AUX_HDCP_V_PRIME(i), part,
5188                                DRM_HDCP_V_PRIME_PART_LEN);
5189         if (ret != DRM_HDCP_V_PRIME_PART_LEN) {
5190                 DRM_ERROR("Read v'[%d] from DP/AUX failed (%zd)\n", i, ret);
5191                 return ret >= 0 ? -EIO : ret;
5192         }
5193         return 0;
5194 }
5195
5196 static
5197 int intel_dp_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port,
5198                                     bool enable)
5199 {
5200         /* Not used for single stream DisplayPort setups */
5201         return 0;
5202 }
5203
5204 static
5205 bool intel_dp_hdcp_check_link(struct intel_digital_port *intel_dig_port)
5206 {
5207         ssize_t ret;
5208         u8 bstatus;
5209
5210         ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
5211                                &bstatus, 1);
5212         if (ret != 1) {
5213                 DRM_ERROR("Read bstatus from DP/AUX failed (%zd)\n", ret);
5214                 return false;
5215         }
5216
5217         return !(bstatus & (DP_BSTATUS_LINK_FAILURE | DP_BSTATUS_REAUTH_REQ));
5218 }
5219
5220 static
5221 int intel_dp_hdcp_capable(struct intel_digital_port *intel_dig_port,
5222                           bool *hdcp_capable)
5223 {
5224         ssize_t ret;
5225         u8 bcaps;
5226
5227         ret = intel_dp_hdcp_read_bcaps(intel_dig_port, &bcaps);
5228         if (ret)
5229                 return ret;
5230
5231         *hdcp_capable = bcaps & DP_BCAPS_HDCP_CAPABLE;
5232         return 0;
5233 }
5234
5235 static const struct intel_hdcp_shim intel_dp_hdcp_shim = {
5236         .write_an_aksv = intel_dp_hdcp_write_an_aksv,
5237         .read_bksv = intel_dp_hdcp_read_bksv,
5238         .read_bstatus = intel_dp_hdcp_read_bstatus,
5239         .repeater_present = intel_dp_hdcp_repeater_present,
5240         .read_ri_prime = intel_dp_hdcp_read_ri_prime,
5241         .read_ksv_ready = intel_dp_hdcp_read_ksv_ready,
5242         .read_ksv_fifo = intel_dp_hdcp_read_ksv_fifo,
5243         .read_v_prime_part = intel_dp_hdcp_read_v_prime_part,
5244         .toggle_signalling = intel_dp_hdcp_toggle_signalling,
5245         .check_link = intel_dp_hdcp_check_link,
5246         .hdcp_capable = intel_dp_hdcp_capable,
5247 };
5248
5249 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
5250 {
5251         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
5252
5253         lockdep_assert_held(&dev_priv->pps_mutex);
5254
5255         if (!edp_have_panel_vdd(intel_dp))
5256                 return;
5257
5258         /*
5259          * The VDD bit needs a power domain reference, so if the bit is
5260          * already enabled when we boot or resume, grab this reference and
5261          * schedule a vdd off, so we don't hold on to the reference
5262          * indefinitely.
5263          */
5264         DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
5265         intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
5266
5267         edp_panel_vdd_schedule_off(intel_dp);
5268 }
5269
5270 static enum pipe vlv_active_pipe(struct intel_dp *intel_dp)
5271 {
5272         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
5273
5274         if ((intel_dp->DP & DP_PORT_EN) == 0)
5275                 return INVALID_PIPE;
5276
5277         if (IS_CHERRYVIEW(dev_priv))
5278                 return DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5279         else
5280                 return PORT_TO_PIPE(intel_dp->DP);
5281 }
5282
5283 void intel_dp_encoder_reset(struct drm_encoder *encoder)
5284 {
5285         struct drm_i915_private *dev_priv = to_i915(encoder->dev);
5286         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
5287         struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
5288
5289         if (!HAS_DDI(dev_priv))
5290                 intel_dp->DP = I915_READ(intel_dp->output_reg);
5291
5292         if (lspcon->active)
5293                 lspcon_resume(lspcon);
5294
5295         intel_dp->reset_link_params = true;
5296
5297         pps_lock(intel_dp);
5298
5299         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5300                 intel_dp->active_pipe = vlv_active_pipe(intel_dp);
5301
5302         if (intel_dp_is_edp(intel_dp)) {
5303                 /* Reinit the power sequencer, in case BIOS did something with it. */
5304                 intel_dp_pps_init(intel_dp);
5305                 intel_edp_panel_vdd_sanitize(intel_dp);
5306         }
5307
5308         pps_unlock(intel_dp);
5309 }
5310
5311 static const struct drm_connector_funcs intel_dp_connector_funcs = {
5312         .force = intel_dp_force,
5313         .fill_modes = drm_helper_probe_single_connector_modes,
5314         .atomic_get_property = intel_digital_connector_atomic_get_property,
5315         .atomic_set_property = intel_digital_connector_atomic_set_property,
5316         .late_register = intel_dp_connector_register,
5317         .early_unregister = intel_dp_connector_unregister,
5318         .destroy = intel_dp_connector_destroy,
5319         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5320         .atomic_duplicate_state = intel_digital_connector_duplicate_state,
5321 };
5322
5323 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
5324         .detect_ctx = intel_dp_detect,
5325         .get_modes = intel_dp_get_modes,
5326         .mode_valid = intel_dp_mode_valid,
5327         .atomic_check = intel_digital_connector_atomic_check,
5328 };
5329
5330 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
5331         .reset = intel_dp_encoder_reset,
5332         .destroy = intel_dp_encoder_destroy,
5333 };
5334
5335 enum irqreturn
5336 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
5337 {
5338         struct intel_dp *intel_dp = &intel_dig_port->dp;
5339         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
5340         enum irqreturn ret = IRQ_NONE;
5341
5342         if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
5343                 /*
5344                  * vdd off can generate a long pulse on eDP which
5345                  * would require vdd on to handle it, and thus we
5346                  * would end up in an endless cycle of
5347                  * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
5348                  */
5349                 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
5350                               port_name(intel_dig_port->base.port));
5351                 return IRQ_HANDLED;
5352         }
5353
5354         DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
5355                       port_name(intel_dig_port->base.port),
5356                       long_hpd ? "long" : "short");
5357
5358         if (long_hpd) {
5359                 intel_dp->reset_link_params = true;
5360                 intel_dp->detect_done = false;
5361                 return IRQ_NONE;
5362         }
5363
5364         intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
5365
5366         if (intel_dp->is_mst) {
5367                 if (intel_dp_check_mst_status(intel_dp) == -EINVAL) {
5368                         /*
5369                          * If we were in MST mode, and device is not
5370                          * there, get out of MST mode
5371                          */
5372                         DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
5373                                       intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
5374                         intel_dp->is_mst = false;
5375                         drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
5376                                                         intel_dp->is_mst);
5377                         intel_dp->detect_done = false;
5378                         goto put_power;
5379                 }
5380         }
5381
5382         if (!intel_dp->is_mst) {
5383                 struct drm_modeset_acquire_ctx ctx;
5384                 struct drm_connector *connector = &intel_dp->attached_connector->base;
5385                 struct drm_crtc *crtc;
5386                 int iret;
5387                 bool handled = false;
5388
5389                 drm_modeset_acquire_init(&ctx, 0);
5390 retry:
5391                 iret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, &ctx);
5392                 if (iret)
5393                         goto err;
5394
5395                 crtc = connector->state->crtc;
5396                 if (crtc) {
5397                         iret = drm_modeset_lock(&crtc->mutex, &ctx);
5398                         if (iret)
5399                                 goto err;
5400                 }
5401
5402                 handled = intel_dp_short_pulse(intel_dp);
5403
5404 err:
5405                 if (iret == -EDEADLK) {
5406                         drm_modeset_backoff(&ctx);
5407                         goto retry;
5408                 }
5409
5410                 drm_modeset_drop_locks(&ctx);
5411                 drm_modeset_acquire_fini(&ctx);
5412                 WARN(iret, "Acquiring modeset locks failed with %i\n", iret);
5413
5414                 /* Short pulse can signify loss of hdcp authentication */
5415                 intel_hdcp_check_link(intel_dp->attached_connector);
5416
5417                 if (!handled) {
5418                         intel_dp->detect_done = false;
5419                         goto put_power;
5420                 }
5421         }
5422
5423         ret = IRQ_HANDLED;
5424
5425 put_power:
5426         intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
5427
5428         return ret;
5429 }
5430
5431 /* check the VBT to see whether the eDP is on another port */
5432 bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
5433 {
5434         /*
5435          * eDP not supported on g4x. so bail out early just
5436          * for a bit extra safety in case the VBT is bonkers.
5437          */
5438         if (INTEL_GEN(dev_priv) < 5)
5439                 return false;
5440
5441         if (INTEL_GEN(dev_priv) < 9 && port == PORT_A)
5442                 return true;
5443
5444         return intel_bios_is_port_edp(dev_priv, port);
5445 }
5446
5447 static void
5448 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5449 {
5450         struct drm_i915_private *dev_priv = to_i915(connector->dev);
5451         enum port port = dp_to_dig_port(intel_dp)->base.port;
5452
5453         if (!IS_G4X(dev_priv) && port != PORT_A)
5454                 intel_attach_force_audio_property(connector);
5455
5456         intel_attach_broadcast_rgb_property(connector);
5457
5458         if (intel_dp_is_edp(intel_dp)) {
5459                 u32 allowed_scalers;
5460
5461                 allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN);
5462                 if (!HAS_GMCH_DISPLAY(dev_priv))
5463                         allowed_scalers |= BIT(DRM_MODE_SCALE_CENTER);
5464
5465                 drm_connector_attach_scaling_mode_property(connector, allowed_scalers);
5466
5467                 connector->state->scaling_mode = DRM_MODE_SCALE_ASPECT;
5468
5469         }
5470 }
5471
5472 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5473 {
5474         intel_dp->panel_power_off_time = ktime_get_boottime();
5475         intel_dp->last_power_on = jiffies;
5476         intel_dp->last_backlight_off = jiffies;
5477 }
5478
5479 static void
5480 intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
5481 {
5482         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
5483         u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
5484         struct pps_registers regs;
5485
5486         intel_pps_get_registers(intel_dp, &regs);
5487
5488         /* Workaround: Need to write PP_CONTROL with the unlock key as
5489          * the very first thing. */
5490         pp_ctl = ironlake_get_pp_control(intel_dp);
5491
5492         pp_on = I915_READ(regs.pp_on);
5493         pp_off = I915_READ(regs.pp_off);
5494         if (!IS_GEN9_LP(dev_priv) && !HAS_PCH_CNP(dev_priv) &&
5495             !HAS_PCH_ICP(dev_priv)) {
5496                 I915_WRITE(regs.pp_ctrl, pp_ctl);
5497                 pp_div = I915_READ(regs.pp_div);
5498         }
5499
5500         /* Pull timing values out of registers */
5501         seq->t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5502                      PANEL_POWER_UP_DELAY_SHIFT;
5503
5504         seq->t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5505                   PANEL_LIGHT_ON_DELAY_SHIFT;
5506
5507         seq->t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5508                   PANEL_LIGHT_OFF_DELAY_SHIFT;
5509
5510         seq->t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5511                    PANEL_POWER_DOWN_DELAY_SHIFT;
5512
5513         if (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv) ||
5514             HAS_PCH_ICP(dev_priv)) {
5515                 seq->t11_t12 = ((pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
5516                                 BXT_POWER_CYCLE_DELAY_SHIFT) * 1000;
5517         } else {
5518                 seq->t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
5519                        PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
5520         }
5521 }
5522
5523 static void
5524 intel_pps_dump_state(const char *state_name, const struct edp_power_seq *seq)
5525 {
5526         DRM_DEBUG_KMS("%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5527                       state_name,
5528                       seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12);
5529 }
5530
5531 static void
5532 intel_pps_verify_state(struct intel_dp *intel_dp)
5533 {
5534         struct edp_power_seq hw;
5535         struct edp_power_seq *sw = &intel_dp->pps_delays;
5536
5537         intel_pps_readout_hw_state(intel_dp, &hw);
5538
5539         if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 ||
5540             hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) {
5541                 DRM_ERROR("PPS state mismatch\n");
5542                 intel_pps_dump_state("sw", sw);
5543                 intel_pps_dump_state("hw", &hw);
5544         }
5545 }
5546
5547 static void
5548 intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp)
5549 {
5550         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
5551         struct edp_power_seq cur, vbt, spec,
5552                 *final = &intel_dp->pps_delays;
5553
5554         lockdep_assert_held(&dev_priv->pps_mutex);
5555
5556         /* already initialized? */
5557         if (final->t11_t12 != 0)
5558                 return;
5559
5560         intel_pps_readout_hw_state(intel_dp, &cur);
5561
5562         intel_pps_dump_state("cur", &cur);
5563
5564         vbt = dev_priv->vbt.edp.pps;
5565         /* On Toshiba Satellite P50-C-18C system the VBT T12 delay
5566          * of 500ms appears to be too short. Ocassionally the panel
5567          * just fails to power back on. Increasing the delay to 800ms
5568          * seems sufficient to avoid this problem.
5569          */
5570         if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) {
5571                 vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10);
5572                 DRM_DEBUG_KMS("Increasing T12 panel delay as per the quirk to %d\n",
5573                               vbt.t11_t12);
5574         }
5575         /* T11_T12 delay is special and actually in units of 100ms, but zero
5576          * based in the hw (so we need to add 100 ms). But the sw vbt
5577          * table multiplies it with 1000 to make it in units of 100usec,
5578          * too. */
5579         vbt.t11_t12 += 100 * 10;
5580
5581         /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5582          * our hw here, which are all in 100usec. */
5583         spec.t1_t3 = 210 * 10;
5584         spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5585         spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5586         spec.t10 = 500 * 10;
5587         /* This one is special and actually in units of 100ms, but zero
5588          * based in the hw (so we need to add 100 ms). But the sw vbt
5589          * table multiplies it with 1000 to make it in units of 100usec,
5590          * too. */
5591         spec.t11_t12 = (510 + 100) * 10;
5592
5593         intel_pps_dump_state("vbt", &vbt);
5594
5595         /* Use the max of the register settings and vbt. If both are
5596          * unset, fall back to the spec limits. */
5597 #define assign_final(field)     final->field = (max(cur.field, vbt.field) == 0 ? \
5598                                        spec.field : \
5599                                        max(cur.field, vbt.field))
5600         assign_final(t1_t3);
5601         assign_final(t8);
5602         assign_final(t9);
5603         assign_final(t10);
5604         assign_final(t11_t12);
5605 #undef assign_final
5606
5607 #define get_delay(field)        (DIV_ROUND_UP(final->field, 10))
5608         intel_dp->panel_power_up_delay = get_delay(t1_t3);
5609         intel_dp->backlight_on_delay = get_delay(t8);
5610         intel_dp->backlight_off_delay = get_delay(t9);
5611         intel_dp->panel_power_down_delay = get_delay(t10);
5612         intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5613 #undef get_delay
5614
5615         DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5616                       intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5617                       intel_dp->panel_power_cycle_delay);
5618
5619         DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5620                       intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
5621
5622         /*
5623          * We override the HW backlight delays to 1 because we do manual waits
5624          * on them. For T8, even BSpec recommends doing it. For T9, if we
5625          * don't do this, we'll end up waiting for the backlight off delay
5626          * twice: once when we do the manual sleep, and once when we disable
5627          * the panel and wait for the PP_STATUS bit to become zero.
5628          */
5629         final->t8 = 1;
5630         final->t9 = 1;
5631
5632         /*
5633          * HW has only a 100msec granularity for t11_t12 so round it up
5634          * accordingly.
5635          */
5636         final->t11_t12 = roundup(final->t11_t12, 100 * 10);
5637 }
5638
5639 static void
5640 intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
5641                                               bool force_disable_vdd)
5642 {
5643         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
5644         u32 pp_on, pp_off, pp_div, port_sel = 0;
5645         int div = dev_priv->rawclk_freq / 1000;
5646         struct pps_registers regs;
5647         enum port port = dp_to_dig_port(intel_dp)->base.port;
5648         const struct edp_power_seq *seq = &intel_dp->pps_delays;
5649
5650         lockdep_assert_held(&dev_priv->pps_mutex);
5651
5652         intel_pps_get_registers(intel_dp, &regs);
5653
5654         /*
5655          * On some VLV machines the BIOS can leave the VDD
5656          * enabled even on power seqeuencers which aren't
5657          * hooked up to any port. This would mess up the
5658          * power domain tracking the first time we pick
5659          * one of these power sequencers for use since
5660          * edp_panel_vdd_on() would notice that the VDD was
5661          * already on and therefore wouldn't grab the power
5662          * domain reference. Disable VDD first to avoid this.
5663          * This also avoids spuriously turning the VDD on as
5664          * soon as the new power seqeuencer gets initialized.
5665          */
5666         if (force_disable_vdd) {
5667                 u32 pp = ironlake_get_pp_control(intel_dp);
5668
5669                 WARN(pp & PANEL_POWER_ON, "Panel power already on\n");
5670
5671                 if (pp & EDP_FORCE_VDD)
5672                         DRM_DEBUG_KMS("VDD already on, disabling first\n");
5673
5674                 pp &= ~EDP_FORCE_VDD;
5675
5676                 I915_WRITE(regs.pp_ctrl, pp);
5677         }
5678
5679         pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
5680                 (seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT);
5681         pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
5682                  (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
5683         /* Compute the divisor for the pp clock, simply match the Bspec
5684          * formula. */
5685         if (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv) ||
5686             HAS_PCH_ICP(dev_priv)) {
5687                 pp_div = I915_READ(regs.pp_ctrl);
5688                 pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
5689                 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5690                                 << BXT_POWER_CYCLE_DELAY_SHIFT);
5691         } else {
5692                 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5693                 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5694                                 << PANEL_POWER_CYCLE_DELAY_SHIFT);
5695         }
5696
5697         /* Haswell doesn't have any port selection bits for the panel
5698          * power sequencer any more. */
5699         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
5700                 port_sel = PANEL_PORT_SELECT_VLV(port);
5701         } else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
5702                 if (port == PORT_A)
5703                         port_sel = PANEL_PORT_SELECT_DPA;
5704                 else
5705                         port_sel = PANEL_PORT_SELECT_DPD;
5706         }
5707
5708         pp_on |= port_sel;
5709
5710         I915_WRITE(regs.pp_on, pp_on);
5711         I915_WRITE(regs.pp_off, pp_off);
5712         if (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv) ||
5713             HAS_PCH_ICP(dev_priv))
5714                 I915_WRITE(regs.pp_ctrl, pp_div);
5715         else
5716                 I915_WRITE(regs.pp_div, pp_div);
5717
5718         DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
5719                       I915_READ(regs.pp_on),
5720                       I915_READ(regs.pp_off),
5721                       (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv)  ||
5722                        HAS_PCH_ICP(dev_priv)) ?
5723                       (I915_READ(regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK) :
5724                       I915_READ(regs.pp_div));
5725 }
5726
5727 static void intel_dp_pps_init(struct intel_dp *intel_dp)
5728 {
5729         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
5730
5731         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
5732                 vlv_initial_power_sequencer_setup(intel_dp);
5733         } else {
5734                 intel_dp_init_panel_power_sequencer(intel_dp);
5735                 intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
5736         }
5737 }
5738
5739 /**
5740  * intel_dp_set_drrs_state - program registers for RR switch to take effect
5741  * @dev_priv: i915 device
5742  * @crtc_state: a pointer to the active intel_crtc_state
5743  * @refresh_rate: RR to be programmed
5744  *
5745  * This function gets called when refresh rate (RR) has to be changed from
5746  * one frequency to another. Switches can be between high and low RR
5747  * supported by the panel or to any other RR based on media playback (in
5748  * this case, RR value needs to be passed from user space).
5749  *
5750  * The caller of this function needs to take a lock on dev_priv->drrs.
5751  */
5752 static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
5753                                     const struct intel_crtc_state *crtc_state,
5754                                     int refresh_rate)
5755 {
5756         struct intel_encoder *encoder;
5757         struct intel_digital_port *dig_port = NULL;
5758         struct intel_dp *intel_dp = dev_priv->drrs.dp;
5759         struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
5760         enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
5761
5762         if (refresh_rate <= 0) {
5763                 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5764                 return;
5765         }
5766
5767         if (intel_dp == NULL) {
5768                 DRM_DEBUG_KMS("DRRS not supported.\n");
5769                 return;
5770         }
5771
5772         dig_port = dp_to_dig_port(intel_dp);
5773         encoder = &dig_port->base;
5774
5775         if (!intel_crtc) {
5776                 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5777                 return;
5778         }
5779
5780         if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
5781                 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5782                 return;
5783         }
5784
5785         if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5786                         refresh_rate)
5787                 index = DRRS_LOW_RR;
5788
5789         if (index == dev_priv->drrs.refresh_rate_type) {
5790                 DRM_DEBUG_KMS(
5791                         "DRRS requested for previously set RR...ignoring\n");
5792                 return;
5793         }
5794
5795         if (!crtc_state->base.active) {
5796                 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5797                 return;
5798         }
5799
5800         if (INTEL_GEN(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) {
5801                 switch (index) {
5802                 case DRRS_HIGH_RR:
5803                         intel_dp_set_m_n(intel_crtc, M1_N1);
5804                         break;
5805                 case DRRS_LOW_RR:
5806                         intel_dp_set_m_n(intel_crtc, M2_N2);
5807                         break;
5808                 case DRRS_MAX_RR:
5809                 default:
5810                         DRM_ERROR("Unsupported refreshrate type\n");
5811                 }
5812         } else if (INTEL_GEN(dev_priv) > 6) {
5813                 i915_reg_t reg = PIPECONF(crtc_state->cpu_transcoder);
5814                 u32 val;
5815
5816                 val = I915_READ(reg);
5817                 if (index > DRRS_HIGH_RR) {
5818                         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5819                                 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5820                         else
5821                                 val |= PIPECONF_EDP_RR_MODE_SWITCH;
5822                 } else {
5823                         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5824                                 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5825                         else
5826                                 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
5827                 }
5828                 I915_WRITE(reg, val);
5829         }
5830
5831         dev_priv->drrs.refresh_rate_type = index;
5832
5833         DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5834 }
5835
5836 /**
5837  * intel_edp_drrs_enable - init drrs struct if supported
5838  * @intel_dp: DP struct
5839  * @crtc_state: A pointer to the active crtc state.
5840  *
5841  * Initializes frontbuffer_bits and drrs.dp
5842  */
5843 void intel_edp_drrs_enable(struct intel_dp *intel_dp,
5844                            const struct intel_crtc_state *crtc_state)
5845 {
5846         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
5847
5848         if (!crtc_state->has_drrs) {
5849                 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5850                 return;
5851         }
5852
5853         if (dev_priv->psr.enabled) {
5854                 DRM_DEBUG_KMS("PSR enabled. Not enabling DRRS.\n");
5855                 return;
5856         }
5857
5858         mutex_lock(&dev_priv->drrs.mutex);
5859         if (WARN_ON(dev_priv->drrs.dp)) {
5860                 DRM_ERROR("DRRS already enabled\n");
5861                 goto unlock;
5862         }
5863
5864         dev_priv->drrs.busy_frontbuffer_bits = 0;
5865
5866         dev_priv->drrs.dp = intel_dp;
5867
5868 unlock:
5869         mutex_unlock(&dev_priv->drrs.mutex);
5870 }
5871
5872 /**
5873  * intel_edp_drrs_disable - Disable DRRS
5874  * @intel_dp: DP struct
5875  * @old_crtc_state: Pointer to old crtc_state.
5876  *
5877  */
5878 void intel_edp_drrs_disable(struct intel_dp *intel_dp,
5879                             const struct intel_crtc_state *old_crtc_state)
5880 {
5881         struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
5882
5883         if (!old_crtc_state->has_drrs)
5884                 return;
5885
5886         mutex_lock(&dev_priv->drrs.mutex);
5887         if (!dev_priv->drrs.dp) {
5888                 mutex_unlock(&dev_priv->drrs.mutex);
5889                 return;
5890         }
5891
5892         if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5893                 intel_dp_set_drrs_state(dev_priv, old_crtc_state,
5894                         intel_dp->attached_connector->panel.fixed_mode->vrefresh);
5895
5896         dev_priv->drrs.dp = NULL;
5897         mutex_unlock(&dev_priv->drrs.mutex);
5898
5899         cancel_delayed_work_sync(&dev_priv->drrs.work);
5900 }
5901
5902 static void intel_edp_drrs_downclock_work(struct work_struct *work)
5903 {
5904         struct drm_i915_private *dev_priv =
5905                 container_of(work, typeof(*dev_priv), drrs.work.work);
5906         struct intel_dp *intel_dp;
5907
5908         mutex_lock(&dev_priv->drrs.mutex);
5909
5910         intel_dp = dev_priv->drrs.dp;
5911
5912         if (!intel_dp)
5913                 goto unlock;
5914
5915         /*
5916          * The delayed work can race with an invalidate hence we need to
5917          * recheck.
5918          */
5919
5920         if (dev_priv->drrs.busy_frontbuffer_bits)
5921                 goto unlock;
5922
5923         if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR) {
5924                 struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
5925
5926                 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
5927                         intel_dp->attached_connector->panel.downclock_mode->vrefresh);
5928         }
5929
5930 unlock:
5931         mutex_unlock(&dev_priv->drrs.mutex);
5932 }
5933
5934 /**
5935  * intel_edp_drrs_invalidate - Disable Idleness DRRS
5936  * @dev_priv: i915 device
5937  * @frontbuffer_bits: frontbuffer plane tracking bits
5938  *
5939  * This function gets called everytime rendering on the given planes start.
5940  * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
5941  *
5942  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5943  */
5944 void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
5945                                unsigned int frontbuffer_bits)
5946 {
5947         struct drm_crtc *crtc;
5948         enum pipe pipe;
5949
5950         if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5951                 return;
5952
5953         cancel_delayed_work(&dev_priv->drrs.work);
5954
5955         mutex_lock(&dev_priv->drrs.mutex);
5956         if (!dev_priv->drrs.dp) {
5957                 mutex_unlock(&dev_priv->drrs.mutex);
5958                 return;
5959         }
5960
5961         crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5962         pipe = to_intel_crtc(crtc)->pipe;
5963
5964         frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5965         dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5966
5967         /* invalidate means busy screen hence upclock */
5968         if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5969                 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
5970                         dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh);
5971
5972         mutex_unlock(&dev_priv->drrs.mutex);
5973 }
5974
5975 /**
5976  * intel_edp_drrs_flush - Restart Idleness DRRS
5977  * @dev_priv: i915 device
5978  * @frontbuffer_bits: frontbuffer plane tracking bits
5979  *
5980  * This function gets called every time rendering on the given planes has
5981  * completed or flip on a crtc is completed. So DRRS should be upclocked
5982  * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5983  * if no other planes are dirty.
5984  *
5985  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5986  */
5987 void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
5988                           unsigned int frontbuffer_bits)
5989 {
5990         struct drm_crtc *crtc;
5991         enum pipe pipe;
5992
5993         if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5994                 return;
5995
5996         cancel_delayed_work(&dev_priv->drrs.work);
5997
5998         mutex_lock(&dev_priv->drrs.mutex);
5999         if (!dev_priv->drrs.dp) {
6000                 mutex_unlock(&dev_priv->drrs.mutex);
6001                 return;
6002         }
6003
6004         crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
6005         pipe = to_intel_crtc(crtc)->pipe;
6006
6007         frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
6008         dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
6009
6010         /* flush means busy screen hence upclock */
6011         if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
6012                 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
6013                                 dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh);
6014
6015         /*
6016          * flush also means no more activity hence schedule downclock, if all
6017          * other fbs are quiescent too
6018          */
6019         if (!dev_priv->drrs.busy_frontbuffer_bits)
6020                 schedule_delayed_work(&dev_priv->drrs.work,
6021                                 msecs_to_jiffies(1000));
6022         mutex_unlock(&dev_priv->drrs.mutex);
6023 }
6024
6025 /**
6026  * DOC: Display Refresh Rate Switching (DRRS)
6027  *
6028  * Display Refresh Rate Switching (DRRS) is a power conservation feature
6029  * which enables swtching between low and high refresh rates,
6030  * dynamically, based on the usage scenario. This feature is applicable
6031  * for internal panels.
6032  *
6033  * Indication that the panel supports DRRS is given by the panel EDID, which
6034  * would list multiple refresh rates for one resolution.
6035  *
6036  * DRRS is of 2 types - static and seamless.
6037  * Static DRRS involves changing refresh rate (RR) by doing a full modeset
6038  * (may appear as a blink on screen) and is used in dock-undock scenario.
6039  * Seamless DRRS involves changing RR without any visual effect to the user
6040  * and can be used during normal system usage. This is done by programming
6041  * certain registers.
6042  *
6043  * Support for static/seamless DRRS may be indicated in the VBT based on
6044  * inputs from the panel spec.
6045  *
6046  * DRRS saves power by switching to low RR based on usage scenarios.
6047  *
6048  * The implementation is based on frontbuffer tracking implementation.  When
6049  * there is a disturbance on the screen triggered by user activity or a periodic
6050  * system activity, DRRS is disabled (RR is changed to high RR).  When there is
6051  * no movement on screen, after a timeout of 1 second, a switch to low RR is
6052  * made.
6053  *
6054  * For integration with frontbuffer tracking code, intel_edp_drrs_invalidate()
6055  * and intel_edp_drrs_flush() are called.
6056  *
6057  * DRRS can be further extended to support other internal panels and also
6058  * the scenario of video playback wherein RR is set based on the rate
6059  * requested by userspace.
6060  */
6061
6062 /**
6063  * intel_dp_drrs_init - Init basic DRRS work and mutex.
6064  * @connector: eDP connector
6065  * @fixed_mode: preferred mode of panel
6066  *
6067  * This function is  called only once at driver load to initialize basic
6068  * DRRS stuff.
6069  *
6070  * Returns:
6071  * Downclock mode if panel supports it, else return NULL.
6072  * DRRS support is determined by the presence of downclock mode (apart
6073  * from VBT setting).
6074  */
6075 static struct drm_display_mode *
6076 intel_dp_drrs_init(struct intel_connector *connector,
6077                    struct drm_display_mode *fixed_mode)
6078 {
6079         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
6080         struct drm_display_mode *downclock_mode = NULL;
6081
6082         INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
6083         mutex_init(&dev_priv->drrs.mutex);
6084
6085         if (INTEL_GEN(dev_priv) <= 6) {
6086                 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
6087                 return NULL;
6088         }
6089
6090         if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
6091                 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
6092                 return NULL;
6093         }
6094
6095         downclock_mode = intel_find_panel_downclock(dev_priv, fixed_mode,
6096                                                     &connector->base);
6097
6098         if (!downclock_mode) {
6099                 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
6100                 return NULL;
6101         }
6102
6103         dev_priv->drrs.type = dev_priv->vbt.drrs_type;
6104
6105         dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
6106         DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
6107         return downclock_mode;
6108 }
6109
6110 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
6111                                      struct intel_connector *intel_connector)
6112 {
6113         struct drm_device *dev = intel_dp_to_dev(intel_dp);
6114         struct drm_i915_private *dev_priv = to_i915(dev);
6115         struct drm_connector *connector = &intel_connector->base;
6116         struct drm_display_mode *fixed_mode = NULL;
6117         struct drm_display_mode *alt_fixed_mode = NULL;
6118         struct drm_display_mode *downclock_mode = NULL;
6119         bool has_dpcd;
6120         struct drm_display_mode *scan;
6121         struct edid *edid;
6122         enum pipe pipe = INVALID_PIPE;
6123
6124         if (!intel_dp_is_edp(intel_dp))
6125                 return true;
6126
6127         /*
6128          * On IBX/CPT we may get here with LVDS already registered. Since the
6129          * driver uses the only internal power sequencer available for both
6130          * eDP and LVDS bail out early in this case to prevent interfering
6131          * with an already powered-on LVDS power sequencer.
6132          */
6133         if (intel_get_lvds_encoder(&dev_priv->drm)) {
6134                 WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
6135                 DRM_INFO("LVDS was detected, not registering eDP\n");
6136
6137                 return false;
6138         }
6139
6140         pps_lock(intel_dp);
6141
6142         intel_dp_init_panel_power_timestamps(intel_dp);
6143         intel_dp_pps_init(intel_dp);
6144         intel_edp_panel_vdd_sanitize(intel_dp);
6145
6146         pps_unlock(intel_dp);
6147
6148         /* Cache DPCD and EDID for edp. */
6149         has_dpcd = intel_edp_init_dpcd(intel_dp);
6150
6151         if (!has_dpcd) {
6152                 /* if this fails, presume the device is a ghost */
6153                 DRM_INFO("failed to retrieve link info, disabling eDP\n");
6154                 goto out_vdd_off;
6155         }
6156
6157         mutex_lock(&dev->mode_config.mutex);
6158         edid = drm_get_edid(connector, &intel_dp->aux.ddc);
6159         if (edid) {
6160                 if (drm_add_edid_modes(connector, edid)) {
6161                         drm_mode_connector_update_edid_property(connector,
6162                                                                 edid);
6163                 } else {
6164                         kfree(edid);
6165                         edid = ERR_PTR(-EINVAL);
6166                 }
6167         } else {
6168                 edid = ERR_PTR(-ENOENT);
6169         }
6170         intel_connector->edid = edid;
6171
6172         /* prefer fixed mode from EDID if available, save an alt mode also */
6173         list_for_each_entry(scan, &connector->probed_modes, head) {
6174                 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
6175                         fixed_mode = drm_mode_duplicate(dev, scan);
6176                         downclock_mode = intel_dp_drrs_init(
6177                                                 intel_connector, fixed_mode);
6178                 } else if (!alt_fixed_mode) {
6179                         alt_fixed_mode = drm_mode_duplicate(dev, scan);
6180                 }
6181         }
6182
6183         /* fallback to VBT if available for eDP */
6184         if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
6185                 fixed_mode = drm_mode_duplicate(dev,
6186                                         dev_priv->vbt.lfp_lvds_vbt_mode);
6187                 if (fixed_mode) {
6188                         fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
6189                         connector->display_info.width_mm = fixed_mode->width_mm;
6190                         connector->display_info.height_mm = fixed_mode->height_mm;
6191                 }
6192         }
6193         mutex_unlock(&dev->mode_config.mutex);
6194
6195         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
6196                 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
6197                 register_reboot_notifier(&intel_dp->edp_notifier);
6198
6199                 /*
6200                  * Figure out the current pipe for the initial backlight setup.
6201                  * If the current pipe isn't valid, try the PPS pipe, and if that
6202                  * fails just assume pipe A.
6203                  */
6204                 pipe = vlv_active_pipe(intel_dp);
6205
6206                 if (pipe != PIPE_A && pipe != PIPE_B)
6207                         pipe = intel_dp->pps_pipe;
6208
6209                 if (pipe != PIPE_A && pipe != PIPE_B)
6210                         pipe = PIPE_A;
6211
6212                 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
6213                               pipe_name(pipe));
6214         }
6215
6216         intel_panel_init(&intel_connector->panel, fixed_mode, alt_fixed_mode,
6217                          downclock_mode);
6218         intel_connector->panel.backlight.power = intel_edp_backlight_power;
6219         intel_panel_setup_backlight(connector, pipe);
6220
6221         return true;
6222
6223 out_vdd_off:
6224         cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
6225         /*
6226          * vdd might still be enabled do to the delayed vdd off.
6227          * Make sure vdd is actually turned off here.
6228          */
6229         pps_lock(intel_dp);
6230         edp_panel_vdd_off_sync(intel_dp);
6231         pps_unlock(intel_dp);
6232
6233         return false;
6234 }
6235
6236 static void intel_dp_modeset_retry_work_fn(struct work_struct *work)
6237 {
6238         struct intel_connector *intel_connector;
6239         struct drm_connector *connector;
6240
6241         intel_connector = container_of(work, typeof(*intel_connector),
6242                                        modeset_retry_work);
6243         connector = &intel_connector->base;
6244         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
6245                       connector->name);
6246
6247         /* Grab the locks before changing connector property*/
6248         mutex_lock(&connector->dev->mode_config.mutex);
6249         /* Set connector link status to BAD and send a Uevent to notify
6250          * userspace to do a modeset.
6251          */
6252         drm_mode_connector_set_link_status_property(connector,
6253                                                     DRM_MODE_LINK_STATUS_BAD);
6254         mutex_unlock(&connector->dev->mode_config.mutex);
6255         /* Send Hotplug uevent so userspace can reprobe */
6256         drm_kms_helper_hotplug_event(connector->dev);
6257 }
6258
6259 bool
6260 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
6261                         struct intel_connector *intel_connector)
6262 {
6263         struct drm_connector *connector = &intel_connector->base;
6264         struct intel_dp *intel_dp = &intel_dig_port->dp;
6265         struct intel_encoder *intel_encoder = &intel_dig_port->base;
6266         struct drm_device *dev = intel_encoder->base.dev;
6267         struct drm_i915_private *dev_priv = to_i915(dev);
6268         enum port port = intel_encoder->port;
6269         int type;
6270
6271         /* Initialize the work for modeset in case of link train failure */
6272         INIT_WORK(&intel_connector->modeset_retry_work,
6273                   intel_dp_modeset_retry_work_fn);
6274
6275         if (WARN(intel_dig_port->max_lanes < 1,
6276                  "Not enough lanes (%d) for DP on port %c\n",
6277                  intel_dig_port->max_lanes, port_name(port)))
6278                 return false;
6279
6280         intel_dp_set_source_rates(intel_dp);
6281
6282         intel_dp->reset_link_params = true;
6283         intel_dp->pps_pipe = INVALID_PIPE;
6284         intel_dp->active_pipe = INVALID_PIPE;
6285
6286         /* intel_dp vfuncs */
6287         if (HAS_DDI(dev_priv))
6288                 intel_dp->prepare_link_retrain = intel_ddi_prepare_link_retrain;
6289
6290         /* Preserve the current hw state. */
6291         intel_dp->DP = I915_READ(intel_dp->output_reg);
6292         intel_dp->attached_connector = intel_connector;
6293
6294         if (intel_dp_is_port_edp(dev_priv, port))
6295                 type = DRM_MODE_CONNECTOR_eDP;
6296         else
6297                 type = DRM_MODE_CONNECTOR_DisplayPort;
6298
6299         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6300                 intel_dp->active_pipe = vlv_active_pipe(intel_dp);
6301
6302         /*
6303          * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
6304          * for DP the encoder type can be set by the caller to
6305          * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
6306          */
6307         if (type == DRM_MODE_CONNECTOR_eDP)
6308                 intel_encoder->type = INTEL_OUTPUT_EDP;
6309
6310         /* eDP only on port B and/or C on vlv/chv */
6311         if (WARN_ON((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
6312                     intel_dp_is_edp(intel_dp) &&
6313                     port != PORT_B && port != PORT_C))
6314                 return false;
6315
6316         DRM_DEBUG_KMS("Adding %s connector on port %c\n",
6317                         type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
6318                         port_name(port));
6319
6320         drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
6321         drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
6322
6323         if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
6324                 connector->interlace_allowed = true;
6325         connector->doublescan_allowed = 0;
6326
6327         intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port);
6328
6329         intel_dp_aux_init(intel_dp);
6330
6331         INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
6332                           edp_panel_vdd_work);
6333
6334         intel_connector_attach_encoder(intel_connector, intel_encoder);
6335
6336         if (HAS_DDI(dev_priv))
6337                 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
6338         else
6339                 intel_connector->get_hw_state = intel_connector_get_hw_state;
6340
6341         /* init MST on ports that can support it */
6342         if (HAS_DP_MST(dev_priv) && !intel_dp_is_edp(intel_dp) &&
6343             (port == PORT_B || port == PORT_C ||
6344              port == PORT_D || port == PORT_F))
6345                 intel_dp_mst_encoder_init(intel_dig_port,
6346                                           intel_connector->base.base.id);
6347
6348         if (!intel_edp_init_connector(intel_dp, intel_connector)) {
6349                 intel_dp_aux_fini(intel_dp);
6350                 intel_dp_mst_encoder_cleanup(intel_dig_port);
6351                 goto fail;
6352         }
6353
6354         intel_dp_add_properties(intel_dp, connector);
6355
6356         if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_edp(intel_dp)) {
6357                 int ret = intel_hdcp_init(intel_connector, &intel_dp_hdcp_shim);
6358                 if (ret)
6359                         DRM_DEBUG_KMS("HDCP init failed, skipping.\n");
6360         }
6361
6362         /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
6363          * 0xd.  Failure to do so will result in spurious interrupts being
6364          * generated on the port when a cable is not attached.
6365          */
6366         if (IS_G4X(dev_priv) && !IS_GM45(dev_priv)) {
6367                 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
6368                 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
6369         }
6370
6371         return true;
6372
6373 fail:
6374         drm_connector_cleanup(connector);
6375
6376         return false;
6377 }
6378
6379 bool intel_dp_init(struct drm_i915_private *dev_priv,
6380                    i915_reg_t output_reg,
6381                    enum port port)
6382 {
6383         struct intel_digital_port *intel_dig_port;
6384         struct intel_encoder *intel_encoder;
6385         struct drm_encoder *encoder;
6386         struct intel_connector *intel_connector;
6387
6388         intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
6389         if (!intel_dig_port)
6390                 return false;
6391
6392         intel_connector = intel_connector_alloc();
6393         if (!intel_connector)
6394                 goto err_connector_alloc;
6395
6396         intel_encoder = &intel_dig_port->base;
6397         encoder = &intel_encoder->base;
6398
6399         if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
6400                              &intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS,
6401                              "DP %c", port_name(port)))
6402                 goto err_encoder_init;
6403
6404         intel_encoder->compute_config = intel_dp_compute_config;
6405         intel_encoder->get_hw_state = intel_dp_get_hw_state;
6406         intel_encoder->get_config = intel_dp_get_config;
6407         intel_encoder->suspend = intel_dp_encoder_suspend;
6408         if (IS_CHERRYVIEW(dev_priv)) {
6409                 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
6410                 intel_encoder->pre_enable = chv_pre_enable_dp;
6411                 intel_encoder->enable = vlv_enable_dp;
6412                 intel_encoder->disable = vlv_disable_dp;
6413                 intel_encoder->post_disable = chv_post_disable_dp;
6414                 intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
6415         } else if (IS_VALLEYVIEW(dev_priv)) {
6416                 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
6417                 intel_encoder->pre_enable = vlv_pre_enable_dp;
6418                 intel_encoder->enable = vlv_enable_dp;
6419                 intel_encoder->disable = vlv_disable_dp;
6420                 intel_encoder->post_disable = vlv_post_disable_dp;
6421         } else if (INTEL_GEN(dev_priv) >= 5) {
6422                 intel_encoder->pre_enable = g4x_pre_enable_dp;
6423                 intel_encoder->enable = g4x_enable_dp;
6424                 intel_encoder->disable = ilk_disable_dp;
6425                 intel_encoder->post_disable = ilk_post_disable_dp;
6426         } else {
6427                 intel_encoder->pre_enable = g4x_pre_enable_dp;
6428                 intel_encoder->enable = g4x_enable_dp;
6429                 intel_encoder->disable = g4x_disable_dp;
6430         }
6431
6432         intel_dig_port->dp.output_reg = output_reg;
6433         intel_dig_port->max_lanes = 4;
6434
6435         intel_encoder->type = INTEL_OUTPUT_DP;
6436         intel_encoder->power_domain = intel_port_to_power_domain(port);
6437         if (IS_CHERRYVIEW(dev_priv)) {
6438                 if (port == PORT_D)
6439                         intel_encoder->crtc_mask = 1 << 2;
6440                 else
6441                         intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
6442         } else {
6443                 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
6444         }
6445         intel_encoder->cloneable = 0;
6446         intel_encoder->port = port;
6447
6448         intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
6449         dev_priv->hotplug.irq_port[port] = intel_dig_port;
6450
6451         if (port != PORT_A)
6452                 intel_infoframe_init(intel_dig_port);
6453
6454         if (!intel_dp_init_connector(intel_dig_port, intel_connector))
6455                 goto err_init_connector;
6456
6457         return true;
6458
6459 err_init_connector:
6460         drm_encoder_cleanup(encoder);
6461 err_encoder_init:
6462         kfree(intel_connector);
6463 err_connector_alloc:
6464         kfree(intel_dig_port);
6465         return false;
6466 }
6467
6468 void intel_dp_mst_suspend(struct drm_device *dev)
6469 {
6470         struct drm_i915_private *dev_priv = to_i915(dev);
6471         int i;
6472
6473         /* disable MST */
6474         for (i = 0; i < I915_MAX_PORTS; i++) {
6475                 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
6476
6477                 if (!intel_dig_port || !intel_dig_port->dp.can_mst)
6478                         continue;
6479
6480                 if (intel_dig_port->dp.is_mst)
6481                         drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
6482         }
6483 }
6484
6485 void intel_dp_mst_resume(struct drm_device *dev)
6486 {
6487         struct drm_i915_private *dev_priv = to_i915(dev);
6488         int i;
6489
6490         for (i = 0; i < I915_MAX_PORTS; i++) {
6491                 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
6492                 int ret;
6493
6494                 if (!intel_dig_port || !intel_dig_port->dp.can_mst)
6495                         continue;
6496
6497                 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
6498                 if (ret)
6499                         intel_dp_check_mst_status(&intel_dig_port->dp);
6500         }
6501 }