drm/i915/tc: Don't connect the PHY in intel_tc_port_connected()
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / display / intel_tc.c
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5
6 #include "i915_drv.h"
7 #include "i915_reg.h"
8 #include "intel_ddi.h"
9 #include "intel_de.h"
10 #include "intel_display.h"
11 #include "intel_display_power_map.h"
12 #include "intel_display_types.h"
13 #include "intel_dkl_phy_regs.h"
14 #include "intel_dp_mst.h"
15 #include "intel_mg_phy_regs.h"
16 #include "intel_tc.h"
17
18 enum tc_port_mode {
19         TC_PORT_DISCONNECTED,
20         TC_PORT_TBT_ALT,
21         TC_PORT_DP_ALT,
22         TC_PORT_LEGACY,
23 };
24
25 struct intel_tc_port;
26
27 struct intel_tc_phy_ops {
28         enum intel_display_power_domain (*cold_off_domain)(struct intel_tc_port *tc);
29         u32 (*hpd_live_status)(struct intel_tc_port *tc);
30         bool (*is_ready)(struct intel_tc_port *tc);
31         bool (*is_owned)(struct intel_tc_port *tc);
32         void (*get_hw_state)(struct intel_tc_port *tc);
33         bool (*connect)(struct intel_tc_port *tc, int required_lanes);
34         void (*disconnect)(struct intel_tc_port *tc);
35         void (*init)(struct intel_tc_port *tc);
36 };
37
38 struct intel_tc_port {
39         struct intel_digital_port *dig_port;
40
41         const struct intel_tc_phy_ops *phy_ops;
42
43         struct mutex lock;      /* protects the TypeC port mode */
44         intel_wakeref_t lock_wakeref;
45 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
46         enum intel_display_power_domain lock_power_domain;
47 #endif
48         struct delayed_work disconnect_phy_work;
49         int link_refcount;
50         bool legacy_port:1;
51         char port_name[8];
52         enum tc_port_mode mode;
53         enum tc_port_mode init_mode;
54         enum phy_fia phy_fia;
55         u8 phy_fia_idx;
56 };
57
58 static enum intel_display_power_domain
59 tc_phy_cold_off_domain(struct intel_tc_port *);
60 static u32 tc_phy_hpd_live_status(struct intel_tc_port *tc);
61 static bool tc_phy_is_ready(struct intel_tc_port *tc);
62 static bool tc_phy_take_ownership(struct intel_tc_port *tc, bool take);
63 static enum tc_port_mode tc_phy_get_current_mode(struct intel_tc_port *tc);
64
65 static const char *tc_port_mode_name(enum tc_port_mode mode)
66 {
67         static const char * const names[] = {
68                 [TC_PORT_DISCONNECTED] = "disconnected",
69                 [TC_PORT_TBT_ALT] = "tbt-alt",
70                 [TC_PORT_DP_ALT] = "dp-alt",
71                 [TC_PORT_LEGACY] = "legacy",
72         };
73
74         if (WARN_ON(mode >= ARRAY_SIZE(names)))
75                 mode = TC_PORT_DISCONNECTED;
76
77         return names[mode];
78 }
79
80 static struct intel_tc_port *to_tc_port(struct intel_digital_port *dig_port)
81 {
82         return dig_port->tc;
83 }
84
85 static struct drm_i915_private *tc_to_i915(struct intel_tc_port *tc)
86 {
87         return to_i915(tc->dig_port->base.base.dev);
88 }
89
90 static bool intel_tc_port_in_mode(struct intel_digital_port *dig_port,
91                                   enum tc_port_mode mode)
92 {
93         struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
94         enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
95         struct intel_tc_port *tc = to_tc_port(dig_port);
96
97         return intel_phy_is_tc(i915, phy) && tc->mode == mode;
98 }
99
100 bool intel_tc_port_in_tbt_alt_mode(struct intel_digital_port *dig_port)
101 {
102         return intel_tc_port_in_mode(dig_port, TC_PORT_TBT_ALT);
103 }
104
105 bool intel_tc_port_in_dp_alt_mode(struct intel_digital_port *dig_port)
106 {
107         return intel_tc_port_in_mode(dig_port, TC_PORT_DP_ALT);
108 }
109
110 bool intel_tc_port_in_legacy_mode(struct intel_digital_port *dig_port)
111 {
112         return intel_tc_port_in_mode(dig_port, TC_PORT_LEGACY);
113 }
114
115 /*
116  * The display power domains used for TC ports depending on the
117  * platform and TC mode (legacy, DP-alt, TBT):
118  *
119  * POWER_DOMAIN_DISPLAY_CORE:
120  * --------------------------
121  * ADLP/all modes:
122  *   - TCSS/IOM access for PHY ready state.
123  * ADLP+/all modes:
124  *   - DE/north-,south-HPD ISR access for HPD live state.
125  *
126  * POWER_DOMAIN_PORT_DDI_LANES_<port>:
127  * -----------------------------------
128  * ICL+/all modes:
129  *   - DE/DDI_BUF access for port enabled state.
130  * ADLP/all modes:
131  *   - DE/DDI_BUF access for PHY owned state.
132  *
133  * POWER_DOMAIN_AUX_USBC<TC port index>:
134  * -------------------------------------
135  * ICL/legacy mode:
136  *   - TCSS/IOM,FIA access for PHY ready, owned and HPD live state
137  *   - TCSS/PHY: block TC-cold power state for using the PHY AUX and
138  *     main lanes.
139  * ADLP/legacy, DP-alt modes:
140  *   - TCSS/PHY: block TC-cold power state for using the PHY AUX and
141  *     main lanes.
142  *
143  * POWER_DOMAIN_TC_COLD_OFF:
144  * -------------------------
145  * TGL/legacy, DP-alt modes:
146  *   - TCSS/IOM,FIA access for PHY ready, owned and HPD live state
147  *   - TCSS/PHY: block TC-cold power state for using the PHY AUX and
148  *     main lanes.
149  *
150  * ICL, TGL, ADLP/TBT mode:
151  *   - TCSS/IOM,FIA access for HPD live state
152  *   - TCSS/TBT: block TC-cold power state for using the (TBT DP-IN)
153  *     AUX and main lanes.
154  */
155 bool intel_tc_cold_requires_aux_pw(struct intel_digital_port *dig_port)
156 {
157         struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
158         struct intel_tc_port *tc = to_tc_port(dig_port);
159
160         return tc_phy_cold_off_domain(tc) ==
161                intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch);
162 }
163
164 static intel_wakeref_t
165 __tc_cold_block(struct intel_tc_port *tc, enum intel_display_power_domain *domain)
166 {
167         struct drm_i915_private *i915 = tc_to_i915(tc);
168
169         *domain = tc_phy_cold_off_domain(tc);
170
171         return intel_display_power_get(i915, *domain);
172 }
173
174 static intel_wakeref_t
175 tc_cold_block(struct intel_tc_port *tc)
176 {
177         enum intel_display_power_domain domain;
178         intel_wakeref_t wakeref;
179
180         wakeref = __tc_cold_block(tc, &domain);
181 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
182         tc->lock_power_domain = domain;
183 #endif
184         return wakeref;
185 }
186
187 static void
188 __tc_cold_unblock(struct intel_tc_port *tc, enum intel_display_power_domain domain,
189                   intel_wakeref_t wakeref)
190 {
191         struct drm_i915_private *i915 = tc_to_i915(tc);
192
193         intel_display_power_put(i915, domain, wakeref);
194 }
195
196 static void
197 tc_cold_unblock(struct intel_tc_port *tc, intel_wakeref_t wakeref)
198 {
199         enum intel_display_power_domain domain = tc_phy_cold_off_domain(tc);
200
201 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
202         drm_WARN_ON(&tc_to_i915(tc)->drm, tc->lock_power_domain != domain);
203 #endif
204         __tc_cold_unblock(tc, domain, wakeref);
205 }
206
207 static void
208 assert_display_core_power_enabled(struct intel_tc_port *tc)
209 {
210         struct drm_i915_private *i915 = tc_to_i915(tc);
211
212         drm_WARN_ON(&i915->drm,
213                     !intel_display_power_is_enabled(i915, POWER_DOMAIN_DISPLAY_CORE));
214 }
215
216 static void
217 assert_tc_cold_blocked(struct intel_tc_port *tc)
218 {
219         struct drm_i915_private *i915 = tc_to_i915(tc);
220         bool enabled;
221
222         enabled = intel_display_power_is_enabled(i915,
223                                                  tc_phy_cold_off_domain(tc));
224         drm_WARN_ON(&i915->drm, !enabled);
225 }
226
227 static enum intel_display_power_domain
228 tc_port_power_domain(struct intel_tc_port *tc)
229 {
230         struct drm_i915_private *i915 = tc_to_i915(tc);
231         enum tc_port tc_port = intel_port_to_tc(i915, tc->dig_port->base.port);
232
233         return POWER_DOMAIN_PORT_DDI_LANES_TC1 + tc_port - TC_PORT_1;
234 }
235
236 static void
237 assert_tc_port_power_enabled(struct intel_tc_port *tc)
238 {
239         struct drm_i915_private *i915 = tc_to_i915(tc);
240
241         drm_WARN_ON(&i915->drm,
242                     !intel_display_power_is_enabled(i915, tc_port_power_domain(tc)));
243 }
244
245 u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port)
246 {
247         struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
248         struct intel_tc_port *tc = to_tc_port(dig_port);
249         u32 lane_mask;
250
251         lane_mask = intel_de_read(i915, PORT_TX_DFLEXDPSP(tc->phy_fia));
252
253         drm_WARN_ON(&i915->drm, lane_mask == 0xffffffff);
254         assert_tc_cold_blocked(tc);
255
256         lane_mask &= DP_LANE_ASSIGNMENT_MASK(tc->phy_fia_idx);
257         return lane_mask >> DP_LANE_ASSIGNMENT_SHIFT(tc->phy_fia_idx);
258 }
259
260 u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port)
261 {
262         struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
263         struct intel_tc_port *tc = to_tc_port(dig_port);
264         u32 pin_mask;
265
266         pin_mask = intel_de_read(i915, PORT_TX_DFLEXPA1(tc->phy_fia));
267
268         drm_WARN_ON(&i915->drm, pin_mask == 0xffffffff);
269         assert_tc_cold_blocked(tc);
270
271         return (pin_mask & DP_PIN_ASSIGNMENT_MASK(tc->phy_fia_idx)) >>
272                DP_PIN_ASSIGNMENT_SHIFT(tc->phy_fia_idx);
273 }
274
275 int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port)
276 {
277         struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
278         struct intel_tc_port *tc = to_tc_port(dig_port);
279         enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
280         intel_wakeref_t wakeref;
281         u32 lane_mask;
282
283         if (!intel_phy_is_tc(i915, phy) || tc->mode != TC_PORT_DP_ALT)
284                 return 4;
285
286         assert_tc_cold_blocked(tc);
287
288         lane_mask = 0;
289         with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref)
290                 lane_mask = intel_tc_port_get_lane_mask(dig_port);
291
292         switch (lane_mask) {
293         default:
294                 MISSING_CASE(lane_mask);
295                 fallthrough;
296         case 0x1:
297         case 0x2:
298         case 0x4:
299         case 0x8:
300                 return 1;
301         case 0x3:
302         case 0xc:
303                 return 2;
304         case 0xf:
305                 return 4;
306         }
307 }
308
309 void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
310                                       int required_lanes)
311 {
312         struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
313         struct intel_tc_port *tc = to_tc_port(dig_port);
314         bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
315         u32 val;
316
317         drm_WARN_ON(&i915->drm,
318                     lane_reversal && tc->mode != TC_PORT_LEGACY);
319
320         assert_tc_cold_blocked(tc);
321
322         val = intel_de_read(i915, PORT_TX_DFLEXDPMLE1(tc->phy_fia));
323         val &= ~DFLEXDPMLE1_DPMLETC_MASK(tc->phy_fia_idx);
324
325         switch (required_lanes) {
326         case 1:
327                 val |= lane_reversal ?
328                         DFLEXDPMLE1_DPMLETC_ML3(tc->phy_fia_idx) :
329                         DFLEXDPMLE1_DPMLETC_ML0(tc->phy_fia_idx);
330                 break;
331         case 2:
332                 val |= lane_reversal ?
333                         DFLEXDPMLE1_DPMLETC_ML3_2(tc->phy_fia_idx) :
334                         DFLEXDPMLE1_DPMLETC_ML1_0(tc->phy_fia_idx);
335                 break;
336         case 4:
337                 val |= DFLEXDPMLE1_DPMLETC_ML3_0(tc->phy_fia_idx);
338                 break;
339         default:
340                 MISSING_CASE(required_lanes);
341         }
342
343         intel_de_write(i915, PORT_TX_DFLEXDPMLE1(tc->phy_fia), val);
344 }
345
346 static void tc_port_fixup_legacy_flag(struct intel_tc_port *tc,
347                                       u32 live_status_mask)
348 {
349         struct drm_i915_private *i915 = tc_to_i915(tc);
350         u32 valid_hpd_mask;
351
352         drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_DISCONNECTED);
353
354         if (hweight32(live_status_mask) != 1)
355                 return;
356
357         if (tc->legacy_port)
358                 valid_hpd_mask = BIT(TC_PORT_LEGACY);
359         else
360                 valid_hpd_mask = BIT(TC_PORT_DP_ALT) |
361                                  BIT(TC_PORT_TBT_ALT);
362
363         if (!(live_status_mask & ~valid_hpd_mask))
364                 return;
365
366         /* If live status mismatches the VBT flag, trust the live status. */
367         drm_dbg_kms(&i915->drm,
368                     "Port %s: live status %08x mismatches the legacy port flag %08x, fixing flag\n",
369                     tc->port_name, live_status_mask, valid_hpd_mask);
370
371         tc->legacy_port = !tc->legacy_port;
372 }
373
374 static void tc_phy_load_fia_params(struct intel_tc_port *tc, bool modular_fia)
375 {
376         struct drm_i915_private *i915 = tc_to_i915(tc);
377         enum port port = tc->dig_port->base.port;
378         enum tc_port tc_port = intel_port_to_tc(i915, port);
379
380         /*
381          * Each Modular FIA instance houses 2 TC ports. In SOC that has more
382          * than two TC ports, there are multiple instances of Modular FIA.
383          */
384         if (modular_fia) {
385                 tc->phy_fia = tc_port / 2;
386                 tc->phy_fia_idx = tc_port % 2;
387         } else {
388                 tc->phy_fia = FIA1;
389                 tc->phy_fia_idx = tc_port;
390         }
391 }
392
393 /*
394  * ICL TC PHY handlers
395  * -------------------
396  */
397 static enum intel_display_power_domain
398 icl_tc_phy_cold_off_domain(struct intel_tc_port *tc)
399 {
400         struct drm_i915_private *i915 = tc_to_i915(tc);
401         struct intel_digital_port *dig_port = tc->dig_port;
402
403         if (tc->legacy_port)
404                 return intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch);
405
406         return POWER_DOMAIN_TC_COLD_OFF;
407 }
408
409 static u32 icl_tc_phy_hpd_live_status(struct intel_tc_port *tc)
410 {
411         struct drm_i915_private *i915 = tc_to_i915(tc);
412         struct intel_digital_port *dig_port = tc->dig_port;
413         u32 isr_bit = i915->display.hotplug.pch_hpd[dig_port->base.hpd_pin];
414         intel_wakeref_t wakeref;
415         u32 fia_isr;
416         u32 pch_isr;
417         u32 mask = 0;
418
419         with_intel_display_power(i915, tc_phy_cold_off_domain(tc), wakeref) {
420                 fia_isr = intel_de_read(i915, PORT_TX_DFLEXDPSP(tc->phy_fia));
421                 pch_isr = intel_de_read(i915, SDEISR);
422         }
423
424         if (fia_isr == 0xffffffff) {
425                 drm_dbg_kms(&i915->drm,
426                             "Port %s: PHY in TCCOLD, nothing connected\n",
427                             tc->port_name);
428                 return mask;
429         }
430
431         if (fia_isr & TC_LIVE_STATE_TBT(tc->phy_fia_idx))
432                 mask |= BIT(TC_PORT_TBT_ALT);
433         if (fia_isr & TC_LIVE_STATE_TC(tc->phy_fia_idx))
434                 mask |= BIT(TC_PORT_DP_ALT);
435
436         if (pch_isr & isr_bit)
437                 mask |= BIT(TC_PORT_LEGACY);
438
439         return mask;
440 }
441
442 /*
443  * Return the PHY status complete flag indicating that display can acquire the
444  * PHY ownership. The IOM firmware sets this flag when a DP-alt or legacy sink
445  * is connected and it's ready to switch the ownership to display. The flag
446  * will be left cleared when a TBT-alt sink is connected, where the PHY is
447  * owned by the TBT subsystem and so switching the ownership to display is not
448  * required.
449  */
450 static bool icl_tc_phy_is_ready(struct intel_tc_port *tc)
451 {
452         struct drm_i915_private *i915 = tc_to_i915(tc);
453         u32 val;
454
455         assert_tc_cold_blocked(tc);
456
457         val = intel_de_read(i915, PORT_TX_DFLEXDPPMS(tc->phy_fia));
458         if (val == 0xffffffff) {
459                 drm_dbg_kms(&i915->drm,
460                             "Port %s: PHY in TCCOLD, assuming not ready\n",
461                             tc->port_name);
462                 return false;
463         }
464
465         return val & DP_PHY_MODE_STATUS_COMPLETED(tc->phy_fia_idx);
466 }
467
468 static bool icl_tc_phy_take_ownership(struct intel_tc_port *tc,
469                                       bool take)
470 {
471         struct drm_i915_private *i915 = tc_to_i915(tc);
472         u32 val;
473
474         assert_tc_cold_blocked(tc);
475
476         val = intel_de_read(i915, PORT_TX_DFLEXDPCSSS(tc->phy_fia));
477         if (val == 0xffffffff) {
478                 drm_dbg_kms(&i915->drm,
479                             "Port %s: PHY in TCCOLD, can't %s ownership\n",
480                             tc->port_name, take ? "take" : "release");
481
482                 return false;
483         }
484
485         val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc->phy_fia_idx);
486         if (take)
487                 val |= DP_PHY_MODE_STATUS_NOT_SAFE(tc->phy_fia_idx);
488
489         intel_de_write(i915, PORT_TX_DFLEXDPCSSS(tc->phy_fia), val);
490
491         return true;
492 }
493
494 static bool icl_tc_phy_is_owned(struct intel_tc_port *tc)
495 {
496         struct drm_i915_private *i915 = tc_to_i915(tc);
497         u32 val;
498
499         assert_tc_cold_blocked(tc);
500
501         val = intel_de_read(i915, PORT_TX_DFLEXDPCSSS(tc->phy_fia));
502         if (val == 0xffffffff) {
503                 drm_dbg_kms(&i915->drm,
504                             "Port %s: PHY in TCCOLD, assume not owned\n",
505                             tc->port_name);
506                 return false;
507         }
508
509         return val & DP_PHY_MODE_STATUS_NOT_SAFE(tc->phy_fia_idx);
510 }
511
512 static void icl_tc_phy_get_hw_state(struct intel_tc_port *tc)
513 {
514         enum intel_display_power_domain domain;
515         intel_wakeref_t tc_cold_wref;
516
517         tc_cold_wref = __tc_cold_block(tc, &domain);
518
519         tc->mode = tc_phy_get_current_mode(tc);
520         if (tc->mode != TC_PORT_DISCONNECTED)
521                 tc->lock_wakeref = tc_cold_block(tc);
522
523         __tc_cold_unblock(tc, domain, tc_cold_wref);
524 }
525
526 /*
527  * This function implements the first part of the Connect Flow described by our
528  * specification, Gen11 TypeC Programming chapter. The rest of the flow (reading
529  * lanes, EDID, etc) is done as needed in the typical places.
530  *
531  * Unlike the other ports, type-C ports are not available to use as soon as we
532  * get a hotplug. The type-C PHYs can be shared between multiple controllers:
533  * display, USB, etc. As a result, handshaking through FIA is required around
534  * connect and disconnect to cleanly transfer ownership with the controller and
535  * set the type-C power state.
536  */
537 static bool tc_phy_verify_legacy_or_dp_alt_mode(struct intel_tc_port *tc,
538                                                 int required_lanes)
539 {
540         struct drm_i915_private *i915 = tc_to_i915(tc);
541         struct intel_digital_port *dig_port = tc->dig_port;
542         int max_lanes;
543
544         max_lanes = intel_tc_port_fia_max_lane_count(dig_port);
545         if (tc->mode == TC_PORT_LEGACY) {
546                 drm_WARN_ON(&i915->drm, max_lanes != 4);
547                 return true;
548         }
549
550         drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_DP_ALT);
551
552         /*
553          * Now we have to re-check the live state, in case the port recently
554          * became disconnected. Not necessary for legacy mode.
555          */
556         if (!(tc_phy_hpd_live_status(tc) & BIT(TC_PORT_DP_ALT))) {
557                 drm_dbg_kms(&i915->drm, "Port %s: PHY sudden disconnect\n",
558                             tc->port_name);
559                 return false;
560         }
561
562         if (max_lanes < required_lanes) {
563                 drm_dbg_kms(&i915->drm,
564                             "Port %s: PHY max lanes %d < required lanes %d\n",
565                             tc->port_name,
566                             max_lanes, required_lanes);
567                 return false;
568         }
569
570         return true;
571 }
572
573 static bool icl_tc_phy_connect(struct intel_tc_port *tc,
574                                int required_lanes)
575 {
576         struct drm_i915_private *i915 = tc_to_i915(tc);
577
578         tc->lock_wakeref = tc_cold_block(tc);
579
580         if (tc->mode == TC_PORT_TBT_ALT)
581                 return true;
582
583         if ((!tc_phy_is_ready(tc) ||
584              !tc_phy_take_ownership(tc, true)) &&
585             !drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY)) {
586                 drm_dbg_kms(&i915->drm, "Port %s: can't take PHY ownership (ready %s)\n",
587                             tc->port_name,
588                             str_yes_no(tc_phy_is_ready(tc)));
589                 goto out_unblock_tc_cold;
590         }
591
592
593         if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes))
594                 goto out_release_phy;
595
596         return true;
597
598 out_release_phy:
599         tc_phy_take_ownership(tc, false);
600 out_unblock_tc_cold:
601         tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
602
603         return false;
604 }
605
606 /*
607  * See the comment at the connect function. This implements the Disconnect
608  * Flow.
609  */
610 static void icl_tc_phy_disconnect(struct intel_tc_port *tc)
611 {
612         switch (tc->mode) {
613         case TC_PORT_LEGACY:
614         case TC_PORT_DP_ALT:
615                 tc_phy_take_ownership(tc, false);
616                 fallthrough;
617         case TC_PORT_TBT_ALT:
618                 tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
619                 break;
620         default:
621                 MISSING_CASE(tc->mode);
622         }
623 }
624
625 static void icl_tc_phy_init(struct intel_tc_port *tc)
626 {
627         tc_phy_load_fia_params(tc, false);
628 }
629
630 static const struct intel_tc_phy_ops icl_tc_phy_ops = {
631         .cold_off_domain = icl_tc_phy_cold_off_domain,
632         .hpd_live_status = icl_tc_phy_hpd_live_status,
633         .is_ready = icl_tc_phy_is_ready,
634         .is_owned = icl_tc_phy_is_owned,
635         .get_hw_state = icl_tc_phy_get_hw_state,
636         .connect = icl_tc_phy_connect,
637         .disconnect = icl_tc_phy_disconnect,
638         .init = icl_tc_phy_init,
639 };
640
641 /*
642  * TGL TC PHY handlers
643  * -------------------
644  */
645 static enum intel_display_power_domain
646 tgl_tc_phy_cold_off_domain(struct intel_tc_port *tc)
647 {
648         return POWER_DOMAIN_TC_COLD_OFF;
649 }
650
651 static void tgl_tc_phy_init(struct intel_tc_port *tc)
652 {
653         struct drm_i915_private *i915 = tc_to_i915(tc);
654         intel_wakeref_t wakeref;
655         u32 val;
656
657         with_intel_display_power(i915, tc_phy_cold_off_domain(tc), wakeref)
658                 val = intel_de_read(i915, PORT_TX_DFLEXDPSP(FIA1));
659
660         drm_WARN_ON(&i915->drm, val == 0xffffffff);
661
662         tc_phy_load_fia_params(tc, val & MODULAR_FIA_MASK);
663 }
664
665 static const struct intel_tc_phy_ops tgl_tc_phy_ops = {
666         .cold_off_domain = tgl_tc_phy_cold_off_domain,
667         .hpd_live_status = icl_tc_phy_hpd_live_status,
668         .is_ready = icl_tc_phy_is_ready,
669         .is_owned = icl_tc_phy_is_owned,
670         .get_hw_state = icl_tc_phy_get_hw_state,
671         .connect = icl_tc_phy_connect,
672         .disconnect = icl_tc_phy_disconnect,
673         .init = tgl_tc_phy_init,
674 };
675
676 /*
677  * ADLP TC PHY handlers
678  * --------------------
679  */
680 static enum intel_display_power_domain
681 adlp_tc_phy_cold_off_domain(struct intel_tc_port *tc)
682 {
683         struct drm_i915_private *i915 = tc_to_i915(tc);
684         struct intel_digital_port *dig_port = tc->dig_port;
685
686         if (tc->mode != TC_PORT_TBT_ALT)
687                 return intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch);
688
689         return POWER_DOMAIN_TC_COLD_OFF;
690 }
691
692 static u32 adlp_tc_phy_hpd_live_status(struct intel_tc_port *tc)
693 {
694         struct drm_i915_private *i915 = tc_to_i915(tc);
695         struct intel_digital_port *dig_port = tc->dig_port;
696         enum hpd_pin hpd_pin = dig_port->base.hpd_pin;
697         u32 cpu_isr_bits = i915->display.hotplug.hpd[hpd_pin];
698         u32 pch_isr_bit = i915->display.hotplug.pch_hpd[hpd_pin];
699         intel_wakeref_t wakeref;
700         u32 cpu_isr;
701         u32 pch_isr;
702         u32 mask = 0;
703
704         with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref) {
705                 cpu_isr = intel_de_read(i915, GEN11_DE_HPD_ISR);
706                 pch_isr = intel_de_read(i915, SDEISR);
707         }
708
709         if (cpu_isr & (cpu_isr_bits & GEN11_DE_TC_HOTPLUG_MASK))
710                 mask |= BIT(TC_PORT_DP_ALT);
711         if (cpu_isr & (cpu_isr_bits & GEN11_DE_TBT_HOTPLUG_MASK))
712                 mask |= BIT(TC_PORT_TBT_ALT);
713
714         if (pch_isr & pch_isr_bit)
715                 mask |= BIT(TC_PORT_LEGACY);
716
717         return mask;
718 }
719
720 /*
721  * Return the PHY status complete flag indicating that display can acquire the
722  * PHY ownership. The IOM firmware sets this flag when it's ready to switch
723  * the ownership to display, regardless of what sink is connected (TBT-alt,
724  * DP-alt, legacy or nothing). For TBT-alt sinks the PHY is owned by the TBT
725  * subsystem and so switching the ownership to display is not required.
726  */
727 static bool adlp_tc_phy_is_ready(struct intel_tc_port *tc)
728 {
729         struct drm_i915_private *i915 = tc_to_i915(tc);
730         enum tc_port tc_port = intel_port_to_tc(i915, tc->dig_port->base.port);
731         u32 val;
732
733         assert_display_core_power_enabled(tc);
734
735         val = intel_de_read(i915, TCSS_DDI_STATUS(tc_port));
736         if (val == 0xffffffff) {
737                 drm_dbg_kms(&i915->drm,
738                             "Port %s: PHY in TCCOLD, assuming not ready\n",
739                             tc->port_name);
740                 return false;
741         }
742
743         return val & TCSS_DDI_STATUS_READY;
744 }
745
746 static bool adlp_tc_phy_take_ownership(struct intel_tc_port *tc,
747                                        bool take)
748 {
749         struct drm_i915_private *i915 = tc_to_i915(tc);
750         enum port port = tc->dig_port->base.port;
751
752         assert_tc_port_power_enabled(tc);
753
754         intel_de_rmw(i915, DDI_BUF_CTL(port), DDI_BUF_CTL_TC_PHY_OWNERSHIP,
755                      take ? DDI_BUF_CTL_TC_PHY_OWNERSHIP : 0);
756
757         return true;
758 }
759
760 static bool adlp_tc_phy_is_owned(struct intel_tc_port *tc)
761 {
762         struct drm_i915_private *i915 = tc_to_i915(tc);
763         enum port port = tc->dig_port->base.port;
764         u32 val;
765
766         assert_tc_port_power_enabled(tc);
767
768         val = intel_de_read(i915, DDI_BUF_CTL(port));
769         return val & DDI_BUF_CTL_TC_PHY_OWNERSHIP;
770 }
771
772 static void adlp_tc_phy_init(struct intel_tc_port *tc)
773 {
774         tc_phy_load_fia_params(tc, true);
775 }
776
777 static const struct intel_tc_phy_ops adlp_tc_phy_ops = {
778         .cold_off_domain = adlp_tc_phy_cold_off_domain,
779         .hpd_live_status = adlp_tc_phy_hpd_live_status,
780         .is_ready = adlp_tc_phy_is_ready,
781         .is_owned = adlp_tc_phy_is_owned,
782         .get_hw_state = icl_tc_phy_get_hw_state,
783         .connect = icl_tc_phy_connect,
784         .disconnect = icl_tc_phy_disconnect,
785         .init = adlp_tc_phy_init,
786 };
787
788 /*
789  * Generic TC PHY handlers
790  * -----------------------
791  */
792 static enum intel_display_power_domain
793 tc_phy_cold_off_domain(struct intel_tc_port *tc)
794 {
795         return tc->phy_ops->cold_off_domain(tc);
796 }
797
798 static u32 tc_phy_hpd_live_status(struct intel_tc_port *tc)
799 {
800         struct drm_i915_private *i915 = tc_to_i915(tc);
801         u32 mask;
802
803         mask = tc->phy_ops->hpd_live_status(tc);
804
805         /* The sink can be connected only in a single mode. */
806         drm_WARN_ON_ONCE(&i915->drm, hweight32(mask) > 1);
807
808         return mask;
809 }
810
811 static bool tc_phy_is_ready(struct intel_tc_port *tc)
812 {
813         return tc->phy_ops->is_ready(tc);
814 }
815
816 static bool tc_phy_is_owned(struct intel_tc_port *tc)
817 {
818         return tc->phy_ops->is_owned(tc);
819 }
820
821 static void tc_phy_get_hw_state(struct intel_tc_port *tc)
822 {
823         tc->phy_ops->get_hw_state(tc);
824 }
825
826 static bool tc_phy_take_ownership(struct intel_tc_port *tc, bool take)
827 {
828         struct drm_i915_private *i915 = tc_to_i915(tc);
829
830         if (IS_ALDERLAKE_P(i915))
831                 return adlp_tc_phy_take_ownership(tc, take);
832
833         return icl_tc_phy_take_ownership(tc, take);
834 }
835
836 static bool tc_phy_is_ready_and_owned(struct intel_tc_port *tc,
837                                       bool phy_is_ready, bool phy_is_owned)
838 {
839         struct drm_i915_private *i915 = tc_to_i915(tc);
840
841         drm_WARN_ON(&i915->drm, phy_is_owned && !phy_is_ready);
842
843         return phy_is_ready && phy_is_owned;
844 }
845
846 static bool tc_phy_is_connected(struct intel_tc_port *tc,
847                                 enum icl_port_dpll_id port_pll_type)
848 {
849         struct intel_encoder *encoder = &tc->dig_port->base;
850         struct drm_i915_private *i915 = to_i915(encoder->base.dev);
851         bool phy_is_ready = tc_phy_is_ready(tc);
852         bool phy_is_owned = tc_phy_is_owned(tc);
853         bool is_connected;
854
855         if (tc_phy_is_ready_and_owned(tc, phy_is_ready, phy_is_owned))
856                 is_connected = port_pll_type == ICL_PORT_DPLL_MG_PHY;
857         else
858                 is_connected = port_pll_type == ICL_PORT_DPLL_DEFAULT;
859
860         drm_dbg_kms(&i915->drm,
861                     "Port %s: PHY connected: %s (ready: %s, owned: %s, pll_type: %s)\n",
862                     tc->port_name,
863                     str_yes_no(is_connected),
864                     str_yes_no(phy_is_ready),
865                     str_yes_no(phy_is_owned),
866                     port_pll_type == ICL_PORT_DPLL_DEFAULT ? "tbt" : "non-tbt");
867
868         return is_connected;
869 }
870
871 static void tc_phy_wait_for_ready(struct intel_tc_port *tc)
872 {
873         struct drm_i915_private *i915 = tc_to_i915(tc);
874
875         if (wait_for(tc_phy_is_ready(tc), 100))
876                 drm_err(&i915->drm, "Port %s: timeout waiting for PHY ready\n",
877                         tc->port_name);
878 }
879
880 static enum tc_port_mode
881 hpd_mask_to_tc_mode(u32 live_status_mask)
882 {
883         if (live_status_mask)
884                 return fls(live_status_mask) - 1;
885
886         return TC_PORT_DISCONNECTED;
887 }
888
889 static enum tc_port_mode
890 tc_phy_hpd_live_mode(struct intel_tc_port *tc)
891 {
892         u32 live_status_mask = tc_phy_hpd_live_status(tc);
893
894         return hpd_mask_to_tc_mode(live_status_mask);
895 }
896
897 static enum tc_port_mode
898 get_tc_mode_in_phy_owned_state(struct intel_tc_port *tc,
899                                enum tc_port_mode live_mode)
900 {
901         switch (live_mode) {
902         case TC_PORT_LEGACY:
903         case TC_PORT_DP_ALT:
904                 return live_mode;
905         default:
906                 MISSING_CASE(live_mode);
907                 fallthrough;
908         case TC_PORT_TBT_ALT:
909         case TC_PORT_DISCONNECTED:
910                 if (tc->legacy_port)
911                         return TC_PORT_LEGACY;
912                 else
913                         return TC_PORT_DP_ALT;
914         }
915 }
916
917 static enum tc_port_mode
918 get_tc_mode_in_phy_not_owned_state(struct intel_tc_port *tc,
919                                    enum tc_port_mode live_mode)
920 {
921         switch (live_mode) {
922         case TC_PORT_LEGACY:
923                 return TC_PORT_DISCONNECTED;
924         case TC_PORT_DP_ALT:
925         case TC_PORT_TBT_ALT:
926                 return TC_PORT_TBT_ALT;
927         default:
928                 MISSING_CASE(live_mode);
929                 fallthrough;
930         case TC_PORT_DISCONNECTED:
931                 if (tc->legacy_port)
932                         return TC_PORT_DISCONNECTED;
933                 else
934                         return TC_PORT_TBT_ALT;
935         }
936 }
937
938 static enum tc_port_mode
939 tc_phy_get_current_mode(struct intel_tc_port *tc)
940 {
941         struct drm_i915_private *i915 = tc_to_i915(tc);
942         enum tc_port_mode live_mode = tc_phy_hpd_live_mode(tc);
943         bool phy_is_ready;
944         bool phy_is_owned;
945         enum tc_port_mode mode;
946
947         /*
948          * For legacy ports the IOM firmware initializes the PHY during boot-up
949          * and system resume whether or not a sink is connected. Wait here for
950          * the initialization to get ready.
951          */
952         if (tc->legacy_port)
953                 tc_phy_wait_for_ready(tc);
954
955         phy_is_ready = tc_phy_is_ready(tc);
956         phy_is_owned = tc_phy_is_owned(tc);
957
958         if (!tc_phy_is_ready_and_owned(tc, phy_is_ready, phy_is_owned)) {
959                 mode = get_tc_mode_in_phy_not_owned_state(tc, live_mode);
960         } else {
961                 drm_WARN_ON(&i915->drm, live_mode == TC_PORT_TBT_ALT);
962                 mode = get_tc_mode_in_phy_owned_state(tc, live_mode);
963         }
964
965         drm_dbg_kms(&i915->drm,
966                     "Port %s: PHY mode: %s (ready: %s, owned: %s, HPD: %s)\n",
967                     tc->port_name,
968                     tc_port_mode_name(mode),
969                     str_yes_no(phy_is_ready),
970                     str_yes_no(phy_is_owned),
971                     tc_port_mode_name(live_mode));
972
973         return mode;
974 }
975
976 static enum tc_port_mode default_tc_mode(struct intel_tc_port *tc)
977 {
978         if (tc->legacy_port)
979                 return TC_PORT_LEGACY;
980
981         return TC_PORT_TBT_ALT;
982 }
983
984 static enum tc_port_mode
985 hpd_mask_to_target_mode(struct intel_tc_port *tc, u32 live_status_mask)
986 {
987         enum tc_port_mode mode = hpd_mask_to_tc_mode(live_status_mask);
988
989         if (mode != TC_PORT_DISCONNECTED)
990                 return mode;
991
992         return default_tc_mode(tc);
993 }
994
995 static enum tc_port_mode
996 tc_phy_get_target_mode(struct intel_tc_port *tc)
997 {
998         u32 live_status_mask = tc_phy_hpd_live_status(tc);
999
1000         return hpd_mask_to_target_mode(tc, live_status_mask);
1001 }
1002
1003 static void tc_phy_connect(struct intel_tc_port *tc, int required_lanes)
1004 {
1005         struct drm_i915_private *i915 = tc_to_i915(tc);
1006         u32 live_status_mask = tc_phy_hpd_live_status(tc);
1007         bool connected;
1008
1009         tc_port_fixup_legacy_flag(tc, live_status_mask);
1010
1011         tc->mode = hpd_mask_to_target_mode(tc, live_status_mask);
1012
1013         connected = tc->phy_ops->connect(tc, required_lanes);
1014         if (!connected && tc->mode != default_tc_mode(tc)) {
1015                 tc->mode = default_tc_mode(tc);
1016                 connected = tc->phy_ops->connect(tc, required_lanes);
1017         }
1018
1019         drm_WARN_ON(&i915->drm, !connected);
1020 }
1021
1022 static void tc_phy_disconnect(struct intel_tc_port *tc)
1023 {
1024         if (tc->mode != TC_PORT_DISCONNECTED) {
1025                 tc->phy_ops->disconnect(tc);
1026                 tc->mode = TC_PORT_DISCONNECTED;
1027         }
1028 }
1029
1030 static void tc_phy_init(struct intel_tc_port *tc)
1031 {
1032         mutex_lock(&tc->lock);
1033         tc->phy_ops->init(tc);
1034         mutex_unlock(&tc->lock);
1035 }
1036
1037 static void intel_tc_port_reset_mode(struct intel_tc_port *tc,
1038                                      int required_lanes, bool force_disconnect)
1039 {
1040         struct drm_i915_private *i915 = tc_to_i915(tc);
1041         struct intel_digital_port *dig_port = tc->dig_port;
1042         enum tc_port_mode old_tc_mode = tc->mode;
1043
1044         intel_display_power_flush_work(i915);
1045         if (!intel_tc_cold_requires_aux_pw(dig_port)) {
1046                 enum intel_display_power_domain aux_domain;
1047                 bool aux_powered;
1048
1049                 aux_domain = intel_aux_power_domain(dig_port);
1050                 aux_powered = intel_display_power_is_enabled(i915, aux_domain);
1051                 drm_WARN_ON(&i915->drm, aux_powered);
1052         }
1053
1054         tc_phy_disconnect(tc);
1055         if (!force_disconnect)
1056                 tc_phy_connect(tc, required_lanes);
1057
1058         drm_dbg_kms(&i915->drm, "Port %s: TC port mode reset (%s -> %s)\n",
1059                     tc->port_name,
1060                     tc_port_mode_name(old_tc_mode),
1061                     tc_port_mode_name(tc->mode));
1062 }
1063
1064 static bool intel_tc_port_needs_reset(struct intel_tc_port *tc)
1065 {
1066         return tc_phy_get_target_mode(tc) != tc->mode;
1067 }
1068
1069 static void intel_tc_port_update_mode(struct intel_tc_port *tc,
1070                                       int required_lanes, bool force_disconnect)
1071 {
1072         if (force_disconnect ||
1073             intel_tc_port_needs_reset(tc))
1074                 intel_tc_port_reset_mode(tc, required_lanes, force_disconnect);
1075 }
1076
1077 static void __intel_tc_port_get_link(struct intel_tc_port *tc)
1078 {
1079         tc->link_refcount++;
1080 }
1081
1082 static void __intel_tc_port_put_link(struct intel_tc_port *tc)
1083 {
1084         tc->link_refcount--;
1085 }
1086
1087 static bool tc_port_is_enabled(struct intel_tc_port *tc)
1088 {
1089         struct drm_i915_private *i915 = tc_to_i915(tc);
1090         struct intel_digital_port *dig_port = tc->dig_port;
1091
1092         assert_tc_port_power_enabled(tc);
1093
1094         return intel_de_read(i915, DDI_BUF_CTL(dig_port->base.port)) &
1095                DDI_BUF_CTL_ENABLE;
1096 }
1097
1098 /**
1099  * intel_tc_port_init_mode: Read out HW state and init the given port's TypeC mode
1100  * @dig_port: digital port
1101  *
1102  * Read out the HW state and initialize the TypeC mode of @dig_port. The mode
1103  * will be locked until intel_tc_port_sanitize_mode() is called.
1104  */
1105 void intel_tc_port_init_mode(struct intel_digital_port *dig_port)
1106 {
1107         struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
1108         struct intel_tc_port *tc = to_tc_port(dig_port);
1109         bool update_mode = false;
1110
1111         mutex_lock(&tc->lock);
1112
1113         drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_DISCONNECTED);
1114         drm_WARN_ON(&i915->drm, tc->lock_wakeref);
1115         drm_WARN_ON(&i915->drm, tc->link_refcount);
1116
1117         tc_phy_get_hw_state(tc);
1118         /*
1119          * Save the initial mode for the state check in
1120          * intel_tc_port_sanitize_mode().
1121          */
1122         tc->init_mode = tc->mode;
1123
1124         /*
1125          * The PHY needs to be connected for AUX to work during HW readout and
1126          * MST topology resume, but the PHY mode can only be changed if the
1127          * port is disabled.
1128          *
1129          * An exception is the case where BIOS leaves the PHY incorrectly
1130          * disconnected on an enabled legacy port. Work around that by
1131          * connecting the PHY even though the port is enabled. This doesn't
1132          * cause a problem as the PHY ownership state is ignored by the
1133          * IOM/TCSS firmware (only display can own the PHY in that case).
1134          */
1135         if (!tc_port_is_enabled(tc)) {
1136                 update_mode = true;
1137         } else if (tc->mode == TC_PORT_DISCONNECTED) {
1138                 drm_WARN_ON(&i915->drm, !tc->legacy_port);
1139                 drm_err(&i915->drm,
1140                         "Port %s: PHY disconnected on enabled port, connecting it\n",
1141                         tc->port_name);
1142                 update_mode = true;
1143         }
1144
1145         if (update_mode)
1146                 intel_tc_port_update_mode(tc, 1, false);
1147
1148         /* Prevent changing tc->mode until intel_tc_port_sanitize_mode() is called. */
1149         __intel_tc_port_get_link(tc);
1150
1151         mutex_unlock(&tc->lock);
1152 }
1153
1154 static bool tc_port_has_active_links(struct intel_tc_port *tc,
1155                                      const struct intel_crtc_state *crtc_state)
1156 {
1157         struct drm_i915_private *i915 = tc_to_i915(tc);
1158         struct intel_digital_port *dig_port = tc->dig_port;
1159         enum icl_port_dpll_id pll_type = ICL_PORT_DPLL_DEFAULT;
1160         int active_links = 0;
1161
1162         if (dig_port->dp.is_mst) {
1163                 /* TODO: get the PLL type for MST, once HW readout is done for it. */
1164                 active_links = intel_dp_mst_encoder_active_links(dig_port);
1165         } else if (crtc_state && crtc_state->hw.active) {
1166                 pll_type = intel_ddi_port_pll_type(&dig_port->base, crtc_state);
1167                 active_links = 1;
1168         }
1169
1170         if (active_links && !tc_phy_is_connected(tc, pll_type))
1171                 drm_err(&i915->drm,
1172                         "Port %s: PHY disconnected with %d active link(s)\n",
1173                         tc->port_name, active_links);
1174
1175         return active_links;
1176 }
1177
1178 /**
1179  * intel_tc_port_sanitize_mode: Sanitize the given port's TypeC mode
1180  * @dig_port: digital port
1181  * @crtc_state: atomic state of CRTC connected to @dig_port
1182  *
1183  * Sanitize @dig_port's TypeC mode wrt. the encoder's state right after driver
1184  * loading and system resume:
1185  * If the encoder is enabled keep the TypeC mode/PHY connected state locked until
1186  * the encoder is disabled.
1187  * If the encoder is disabled make sure the PHY is disconnected.
1188  * @crtc_state is valid if @dig_port is enabled, NULL otherwise.
1189  */
1190 void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port,
1191                                  const struct intel_crtc_state *crtc_state)
1192 {
1193         struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
1194         struct intel_tc_port *tc = to_tc_port(dig_port);
1195
1196         mutex_lock(&tc->lock);
1197
1198         drm_WARN_ON(&i915->drm, tc->link_refcount != 1);
1199         if (!tc_port_has_active_links(tc, crtc_state)) {
1200                 /*
1201                  * TBT-alt is the default mode in any case the PHY ownership is not
1202                  * held (regardless of the sink's connected live state), so
1203                  * we'll just switch to disconnected mode from it here without
1204                  * a note.
1205                  */
1206                 if (tc->init_mode != TC_PORT_TBT_ALT &&
1207                     tc->init_mode != TC_PORT_DISCONNECTED)
1208                         drm_dbg_kms(&i915->drm,
1209                                     "Port %s: PHY left in %s mode on disabled port, disconnecting it\n",
1210                                     tc->port_name,
1211                                     tc_port_mode_name(tc->init_mode));
1212                 tc_phy_disconnect(tc);
1213                 __intel_tc_port_put_link(tc);
1214         }
1215
1216         drm_dbg_kms(&i915->drm, "Port %s: sanitize mode (%s)\n",
1217                     tc->port_name,
1218                     tc_port_mode_name(tc->mode));
1219
1220         mutex_unlock(&tc->lock);
1221 }
1222
1223 /*
1224  * The type-C ports are different because even when they are connected, they may
1225  * not be available/usable by the graphics driver: see the comment on
1226  * icl_tc_phy_connect(). So in our driver instead of adding the additional
1227  * concept of "usable" and make everything check for "connected and usable" we
1228  * define a port as "connected" when it is not only connected, but also when it
1229  * is usable by the rest of the driver. That maintains the old assumption that
1230  * connected ports are usable, and avoids exposing to the users objects they
1231  * can't really use.
1232  */
1233 bool intel_tc_port_connected_locked(struct intel_encoder *encoder)
1234 {
1235         struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1236         struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
1237         struct intel_tc_port *tc = to_tc_port(dig_port);
1238         u32 mask = ~0;
1239
1240         drm_WARN_ON(&i915->drm, !intel_tc_port_ref_held(dig_port));
1241
1242         if (tc->mode != TC_PORT_DISCONNECTED)
1243                 mask = BIT(tc->mode);
1244
1245         return tc_phy_hpd_live_status(tc) & mask;
1246 }
1247
1248 bool intel_tc_port_connected(struct intel_encoder *encoder)
1249 {
1250         struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1251         struct intel_tc_port *tc = to_tc_port(dig_port);
1252         bool is_connected;
1253
1254         mutex_lock(&tc->lock);
1255         is_connected = intel_tc_port_connected_locked(encoder);
1256         mutex_unlock(&tc->lock);
1257
1258         return is_connected;
1259 }
1260
1261 static void __intel_tc_port_lock(struct intel_tc_port *tc,
1262                                  int required_lanes)
1263 {
1264         struct drm_i915_private *i915 = tc_to_i915(tc);
1265
1266         mutex_lock(&tc->lock);
1267
1268         cancel_delayed_work(&tc->disconnect_phy_work);
1269
1270         if (!tc->link_refcount)
1271                 intel_tc_port_update_mode(tc, required_lanes,
1272                                           false);
1273
1274         drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_DISCONNECTED);
1275         drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_TBT_ALT &&
1276                                 !tc_phy_is_owned(tc));
1277 }
1278
1279 void intel_tc_port_lock(struct intel_digital_port *dig_port)
1280 {
1281         __intel_tc_port_lock(to_tc_port(dig_port), 1);
1282 }
1283
1284 /**
1285  * intel_tc_port_disconnect_phy_work: disconnect TypeC PHY from display port
1286  * @dig_port: digital port
1287  *
1288  * Disconnect the given digital port from its TypeC PHY (handing back the
1289  * control of the PHY to the TypeC subsystem). This will happen in a delayed
1290  * manner after each aux transactions and modeset disables.
1291  */
1292 static void intel_tc_port_disconnect_phy_work(struct work_struct *work)
1293 {
1294         struct intel_tc_port *tc =
1295                 container_of(work, struct intel_tc_port, disconnect_phy_work.work);
1296
1297         mutex_lock(&tc->lock);
1298
1299         if (!tc->link_refcount)
1300                 intel_tc_port_update_mode(tc, 1, true);
1301
1302         mutex_unlock(&tc->lock);
1303 }
1304
1305 /**
1306  * intel_tc_port_flush_work: flush the work disconnecting the PHY
1307  * @dig_port: digital port
1308  *
1309  * Flush the delayed work disconnecting an idle PHY.
1310  */
1311 void intel_tc_port_flush_work(struct intel_digital_port *dig_port)
1312 {
1313         flush_delayed_work(&to_tc_port(dig_port)->disconnect_phy_work);
1314 }
1315
1316 void intel_tc_port_unlock(struct intel_digital_port *dig_port)
1317 {
1318         struct intel_tc_port *tc = to_tc_port(dig_port);
1319
1320         if (!tc->link_refcount && tc->mode != TC_PORT_DISCONNECTED)
1321                 queue_delayed_work(system_unbound_wq, &tc->disconnect_phy_work,
1322                                    msecs_to_jiffies(1000));
1323
1324         mutex_unlock(&tc->lock);
1325 }
1326
1327 bool intel_tc_port_ref_held(struct intel_digital_port *dig_port)
1328 {
1329         struct intel_tc_port *tc = to_tc_port(dig_port);
1330
1331         return mutex_is_locked(&tc->lock) ||
1332                tc->link_refcount;
1333 }
1334
1335 void intel_tc_port_get_link(struct intel_digital_port *dig_port,
1336                             int required_lanes)
1337 {
1338         struct intel_tc_port *tc = to_tc_port(dig_port);
1339
1340         __intel_tc_port_lock(tc, required_lanes);
1341         __intel_tc_port_get_link(tc);
1342         intel_tc_port_unlock(dig_port);
1343 }
1344
1345 void intel_tc_port_put_link(struct intel_digital_port *dig_port)
1346 {
1347         struct intel_tc_port *tc = to_tc_port(dig_port);
1348
1349         intel_tc_port_lock(dig_port);
1350         __intel_tc_port_put_link(tc);
1351         intel_tc_port_unlock(dig_port);
1352
1353         /*
1354          * Disconnecting the PHY after the PHY's PLL gets disabled may
1355          * hang the system on ADL-P, so disconnect the PHY here synchronously.
1356          * TODO: remove this once the root cause of the ordering requirement
1357          * is found/fixed.
1358          */
1359         intel_tc_port_flush_work(dig_port);
1360 }
1361
1362 int intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy)
1363 {
1364         struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
1365         struct intel_tc_port *tc;
1366         enum port port = dig_port->base.port;
1367         enum tc_port tc_port = intel_port_to_tc(i915, port);
1368
1369         if (drm_WARN_ON(&i915->drm, tc_port == TC_PORT_NONE))
1370                 return -EINVAL;
1371
1372         tc = kzalloc(sizeof(*tc), GFP_KERNEL);
1373         if (!tc)
1374                 return -ENOMEM;
1375
1376         dig_port->tc = tc;
1377         tc->dig_port = dig_port;
1378
1379         if (DISPLAY_VER(i915) >= 13)
1380                 tc->phy_ops = &adlp_tc_phy_ops;
1381         else if (DISPLAY_VER(i915) >= 12)
1382                 tc->phy_ops = &tgl_tc_phy_ops;
1383         else
1384                 tc->phy_ops = &icl_tc_phy_ops;
1385
1386         snprintf(tc->port_name, sizeof(tc->port_name),
1387                  "%c/TC#%d", port_name(port), tc_port + 1);
1388
1389         mutex_init(&tc->lock);
1390         INIT_DELAYED_WORK(&tc->disconnect_phy_work, intel_tc_port_disconnect_phy_work);
1391         tc->legacy_port = is_legacy;
1392         tc->mode = TC_PORT_DISCONNECTED;
1393         tc->link_refcount = 0;
1394
1395         tc_phy_init(tc);
1396
1397         intel_tc_port_init_mode(dig_port);
1398
1399         return 0;
1400 }
1401
1402 void intel_tc_port_cleanup(struct intel_digital_port *dig_port)
1403 {
1404         intel_tc_port_flush_work(dig_port);
1405
1406         kfree(dig_port->tc);
1407         dig_port->tc = NULL;
1408 }