Merge tag 'drm-intel-next-2019-10-21' of git://anongit.freedesktop.org/drm/drm-intel...
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / display / intel_display_power.c
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5
6 #include "display/intel_crt.h"
7 #include "display/intel_dp.h"
8
9 #include "i915_drv.h"
10 #include "i915_irq.h"
11 #include "intel_cdclk.h"
12 #include "intel_combo_phy.h"
13 #include "intel_csr.h"
14 #include "intel_display_power.h"
15 #include "intel_display_types.h"
16 #include "intel_dpio_phy.h"
17 #include "intel_hotplug.h"
18 #include "intel_sideband.h"
19 #include "intel_tc.h"
20 #include "intel_vga.h"
21
22 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
23                                          enum i915_power_well_id power_well_id);
24
25 const char *
26 intel_display_power_domain_str(enum intel_display_power_domain domain)
27 {
28         switch (domain) {
29         case POWER_DOMAIN_DISPLAY_CORE:
30                 return "DISPLAY_CORE";
31         case POWER_DOMAIN_PIPE_A:
32                 return "PIPE_A";
33         case POWER_DOMAIN_PIPE_B:
34                 return "PIPE_B";
35         case POWER_DOMAIN_PIPE_C:
36                 return "PIPE_C";
37         case POWER_DOMAIN_PIPE_D:
38                 return "PIPE_D";
39         case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
40                 return "PIPE_A_PANEL_FITTER";
41         case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
42                 return "PIPE_B_PANEL_FITTER";
43         case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
44                 return "PIPE_C_PANEL_FITTER";
45         case POWER_DOMAIN_PIPE_D_PANEL_FITTER:
46                 return "PIPE_D_PANEL_FITTER";
47         case POWER_DOMAIN_TRANSCODER_A:
48                 return "TRANSCODER_A";
49         case POWER_DOMAIN_TRANSCODER_B:
50                 return "TRANSCODER_B";
51         case POWER_DOMAIN_TRANSCODER_C:
52                 return "TRANSCODER_C";
53         case POWER_DOMAIN_TRANSCODER_D:
54                 return "TRANSCODER_D";
55         case POWER_DOMAIN_TRANSCODER_EDP:
56                 return "TRANSCODER_EDP";
57         case POWER_DOMAIN_TRANSCODER_VDSC_PW2:
58                 return "TRANSCODER_VDSC_PW2";
59         case POWER_DOMAIN_TRANSCODER_DSI_A:
60                 return "TRANSCODER_DSI_A";
61         case POWER_DOMAIN_TRANSCODER_DSI_C:
62                 return "TRANSCODER_DSI_C";
63         case POWER_DOMAIN_PORT_DDI_A_LANES:
64                 return "PORT_DDI_A_LANES";
65         case POWER_DOMAIN_PORT_DDI_B_LANES:
66                 return "PORT_DDI_B_LANES";
67         case POWER_DOMAIN_PORT_DDI_C_LANES:
68                 return "PORT_DDI_C_LANES";
69         case POWER_DOMAIN_PORT_DDI_D_LANES:
70                 return "PORT_DDI_D_LANES";
71         case POWER_DOMAIN_PORT_DDI_E_LANES:
72                 return "PORT_DDI_E_LANES";
73         case POWER_DOMAIN_PORT_DDI_F_LANES:
74                 return "PORT_DDI_F_LANES";
75         case POWER_DOMAIN_PORT_DDI_G_LANES:
76                 return "PORT_DDI_G_LANES";
77         case POWER_DOMAIN_PORT_DDI_H_LANES:
78                 return "PORT_DDI_H_LANES";
79         case POWER_DOMAIN_PORT_DDI_I_LANES:
80                 return "PORT_DDI_I_LANES";
81         case POWER_DOMAIN_PORT_DDI_A_IO:
82                 return "PORT_DDI_A_IO";
83         case POWER_DOMAIN_PORT_DDI_B_IO:
84                 return "PORT_DDI_B_IO";
85         case POWER_DOMAIN_PORT_DDI_C_IO:
86                 return "PORT_DDI_C_IO";
87         case POWER_DOMAIN_PORT_DDI_D_IO:
88                 return "PORT_DDI_D_IO";
89         case POWER_DOMAIN_PORT_DDI_E_IO:
90                 return "PORT_DDI_E_IO";
91         case POWER_DOMAIN_PORT_DDI_F_IO:
92                 return "PORT_DDI_F_IO";
93         case POWER_DOMAIN_PORT_DDI_G_IO:
94                 return "PORT_DDI_G_IO";
95         case POWER_DOMAIN_PORT_DDI_H_IO:
96                 return "PORT_DDI_H_IO";
97         case POWER_DOMAIN_PORT_DDI_I_IO:
98                 return "PORT_DDI_I_IO";
99         case POWER_DOMAIN_PORT_DSI:
100                 return "PORT_DSI";
101         case POWER_DOMAIN_PORT_CRT:
102                 return "PORT_CRT";
103         case POWER_DOMAIN_PORT_OTHER:
104                 return "PORT_OTHER";
105         case POWER_DOMAIN_VGA:
106                 return "VGA";
107         case POWER_DOMAIN_AUDIO:
108                 return "AUDIO";
109         case POWER_DOMAIN_AUX_A:
110                 return "AUX_A";
111         case POWER_DOMAIN_AUX_B:
112                 return "AUX_B";
113         case POWER_DOMAIN_AUX_C:
114                 return "AUX_C";
115         case POWER_DOMAIN_AUX_D:
116                 return "AUX_D";
117         case POWER_DOMAIN_AUX_E:
118                 return "AUX_E";
119         case POWER_DOMAIN_AUX_F:
120                 return "AUX_F";
121         case POWER_DOMAIN_AUX_G:
122                 return "AUX_G";
123         case POWER_DOMAIN_AUX_H:
124                 return "AUX_H";
125         case POWER_DOMAIN_AUX_I:
126                 return "AUX_I";
127         case POWER_DOMAIN_AUX_IO_A:
128                 return "AUX_IO_A";
129         case POWER_DOMAIN_AUX_C_TBT:
130                 return "AUX_C_TBT";
131         case POWER_DOMAIN_AUX_D_TBT:
132                 return "AUX_D_TBT";
133         case POWER_DOMAIN_AUX_E_TBT:
134                 return "AUX_E_TBT";
135         case POWER_DOMAIN_AUX_F_TBT:
136                 return "AUX_F_TBT";
137         case POWER_DOMAIN_AUX_G_TBT:
138                 return "AUX_G_TBT";
139         case POWER_DOMAIN_AUX_H_TBT:
140                 return "AUX_H_TBT";
141         case POWER_DOMAIN_AUX_I_TBT:
142                 return "AUX_I_TBT";
143         case POWER_DOMAIN_GMBUS:
144                 return "GMBUS";
145         case POWER_DOMAIN_INIT:
146                 return "INIT";
147         case POWER_DOMAIN_MODESET:
148                 return "MODESET";
149         case POWER_DOMAIN_GT_IRQ:
150                 return "GT_IRQ";
151         case POWER_DOMAIN_DPLL_DC_OFF:
152                 return "DPLL_DC_OFF";
153         default:
154                 MISSING_CASE(domain);
155                 return "?";
156         }
157 }
158
159 static void intel_power_well_enable(struct drm_i915_private *dev_priv,
160                                     struct i915_power_well *power_well)
161 {
162         DRM_DEBUG_KMS("enabling %s\n", power_well->desc->name);
163         power_well->desc->ops->enable(dev_priv, power_well);
164         power_well->hw_enabled = true;
165 }
166
167 static void intel_power_well_disable(struct drm_i915_private *dev_priv,
168                                      struct i915_power_well *power_well)
169 {
170         DRM_DEBUG_KMS("disabling %s\n", power_well->desc->name);
171         power_well->hw_enabled = false;
172         power_well->desc->ops->disable(dev_priv, power_well);
173 }
174
175 static void intel_power_well_get(struct drm_i915_private *dev_priv,
176                                  struct i915_power_well *power_well)
177 {
178         if (!power_well->count++)
179                 intel_power_well_enable(dev_priv, power_well);
180 }
181
182 static void intel_power_well_put(struct drm_i915_private *dev_priv,
183                                  struct i915_power_well *power_well)
184 {
185         WARN(!power_well->count, "Use count on power well %s is already zero",
186              power_well->desc->name);
187
188         if (!--power_well->count)
189                 intel_power_well_disable(dev_priv, power_well);
190 }
191
192 /**
193  * __intel_display_power_is_enabled - unlocked check for a power domain
194  * @dev_priv: i915 device instance
195  * @domain: power domain to check
196  *
197  * This is the unlocked version of intel_display_power_is_enabled() and should
198  * only be used from error capture and recovery code where deadlocks are
199  * possible.
200  *
201  * Returns:
202  * True when the power domain is enabled, false otherwise.
203  */
204 bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
205                                       enum intel_display_power_domain domain)
206 {
207         struct i915_power_well *power_well;
208         bool is_enabled;
209
210         if (dev_priv->runtime_pm.suspended)
211                 return false;
212
213         is_enabled = true;
214
215         for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) {
216                 if (power_well->desc->always_on)
217                         continue;
218
219                 if (!power_well->hw_enabled) {
220                         is_enabled = false;
221                         break;
222                 }
223         }
224
225         return is_enabled;
226 }
227
228 /**
229  * intel_display_power_is_enabled - check for a power domain
230  * @dev_priv: i915 device instance
231  * @domain: power domain to check
232  *
233  * This function can be used to check the hw power domain state. It is mostly
234  * used in hardware state readout functions. Everywhere else code should rely
235  * upon explicit power domain reference counting to ensure that the hardware
236  * block is powered up before accessing it.
237  *
238  * Callers must hold the relevant modesetting locks to ensure that concurrent
239  * threads can't disable the power well while the caller tries to read a few
240  * registers.
241  *
242  * Returns:
243  * True when the power domain is enabled, false otherwise.
244  */
245 bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
246                                     enum intel_display_power_domain domain)
247 {
248         struct i915_power_domains *power_domains;
249         bool ret;
250
251         power_domains = &dev_priv->power_domains;
252
253         mutex_lock(&power_domains->lock);
254         ret = __intel_display_power_is_enabled(dev_priv, domain);
255         mutex_unlock(&power_domains->lock);
256
257         return ret;
258 }
259
260 /*
261  * Starting with Haswell, we have a "Power Down Well" that can be turned off
262  * when not needed anymore. We have 4 registers that can request the power well
263  * to be enabled, and it will only be disabled if none of the registers is
264  * requesting it to be enabled.
265  */
266 static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv,
267                                        u8 irq_pipe_mask, bool has_vga)
268 {
269         if (has_vga)
270                 intel_vga_reset_io_mem(dev_priv);
271
272         if (irq_pipe_mask)
273                 gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask);
274 }
275
276 static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv,
277                                        u8 irq_pipe_mask)
278 {
279         if (irq_pipe_mask)
280                 gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask);
281 }
282
283 static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
284                                            struct i915_power_well *power_well)
285 {
286         const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
287         int pw_idx = power_well->desc->hsw.idx;
288
289         /* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
290         if (intel_de_wait_for_set(dev_priv, regs->driver,
291                                   HSW_PWR_WELL_CTL_STATE(pw_idx), 1)) {
292                 DRM_DEBUG_KMS("%s power well enable timeout\n",
293                               power_well->desc->name);
294
295                 /* An AUX timeout is expected if the TBT DP tunnel is down. */
296                 WARN_ON(!power_well->desc->hsw.is_tc_tbt);
297         }
298 }
299
300 static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv,
301                                      const struct i915_power_well_regs *regs,
302                                      int pw_idx)
303 {
304         u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
305         u32 ret;
306
307         ret = I915_READ(regs->bios) & req_mask ? 1 : 0;
308         ret |= I915_READ(regs->driver) & req_mask ? 2 : 0;
309         if (regs->kvmr.reg)
310                 ret |= I915_READ(regs->kvmr) & req_mask ? 4 : 0;
311         ret |= I915_READ(regs->debug) & req_mask ? 8 : 0;
312
313         return ret;
314 }
315
316 static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv,
317                                             struct i915_power_well *power_well)
318 {
319         const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
320         int pw_idx = power_well->desc->hsw.idx;
321         bool disabled;
322         u32 reqs;
323
324         /*
325          * Bspec doesn't require waiting for PWs to get disabled, but still do
326          * this for paranoia. The known cases where a PW will be forced on:
327          * - a KVMR request on any power well via the KVMR request register
328          * - a DMC request on PW1 and MISC_IO power wells via the BIOS and
329          *   DEBUG request registers
330          * Skip the wait in case any of the request bits are set and print a
331          * diagnostic message.
332          */
333         wait_for((disabled = !(I915_READ(regs->driver) &
334                                HSW_PWR_WELL_CTL_STATE(pw_idx))) ||
335                  (reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1);
336         if (disabled)
337                 return;
338
339         DRM_DEBUG_KMS("%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n",
340                       power_well->desc->name,
341                       !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8));
342 }
343
344 static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv,
345                                            enum skl_power_gate pg)
346 {
347         /* Timeout 5us for PG#0, for other PGs 1us */
348         WARN_ON(intel_de_wait_for_set(dev_priv, SKL_FUSE_STATUS,
349                                       SKL_FUSE_PG_DIST_STATUS(pg), 1));
350 }
351
352 static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
353                                   struct i915_power_well *power_well)
354 {
355         const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
356         int pw_idx = power_well->desc->hsw.idx;
357         bool wait_fuses = power_well->desc->hsw.has_fuses;
358         enum skl_power_gate uninitialized_var(pg);
359         u32 val;
360
361         if (wait_fuses) {
362                 pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
363                                                  SKL_PW_CTL_IDX_TO_PG(pw_idx);
364                 /*
365                  * For PW1 we have to wait both for the PW0/PG0 fuse state
366                  * before enabling the power well and PW1/PG1's own fuse
367                  * state after the enabling. For all other power wells with
368                  * fuses we only have to wait for that PW/PG's fuse state
369                  * after the enabling.
370                  */
371                 if (pg == SKL_PG1)
372                         gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0);
373         }
374
375         val = I915_READ(regs->driver);
376         I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx));
377         hsw_wait_for_power_well_enable(dev_priv, power_well);
378
379         /* Display WA #1178: cnl */
380         if (IS_CANNONLAKE(dev_priv) &&
381             pw_idx >= GLK_PW_CTL_IDX_AUX_B &&
382             pw_idx <= CNL_PW_CTL_IDX_AUX_F) {
383                 val = I915_READ(CNL_AUX_ANAOVRD1(pw_idx));
384                 val |= CNL_AUX_ANAOVRD1_ENABLE | CNL_AUX_ANAOVRD1_LDO_BYPASS;
385                 I915_WRITE(CNL_AUX_ANAOVRD1(pw_idx), val);
386         }
387
388         if (wait_fuses)
389                 gen9_wait_for_power_well_fuses(dev_priv, pg);
390
391         hsw_power_well_post_enable(dev_priv,
392                                    power_well->desc->hsw.irq_pipe_mask,
393                                    power_well->desc->hsw.has_vga);
394 }
395
396 static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
397                                    struct i915_power_well *power_well)
398 {
399         const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
400         int pw_idx = power_well->desc->hsw.idx;
401         u32 val;
402
403         hsw_power_well_pre_disable(dev_priv,
404                                    power_well->desc->hsw.irq_pipe_mask);
405
406         val = I915_READ(regs->driver);
407         I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
408         hsw_wait_for_power_well_disable(dev_priv, power_well);
409 }
410
411 #define ICL_AUX_PW_TO_PHY(pw_idx)       ((pw_idx) - ICL_PW_CTL_IDX_AUX_A)
412
413 static void
414 icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
415                                     struct i915_power_well *power_well)
416 {
417         const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
418         int pw_idx = power_well->desc->hsw.idx;
419         enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx);
420         u32 val;
421         int wa_idx_max;
422
423         val = I915_READ(regs->driver);
424         I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx));
425
426         if (INTEL_GEN(dev_priv) < 12) {
427                 val = I915_READ(ICL_PORT_CL_DW12(phy));
428                 I915_WRITE(ICL_PORT_CL_DW12(phy), val | ICL_LANE_ENABLE_AUX);
429         }
430
431         hsw_wait_for_power_well_enable(dev_priv, power_well);
432
433         /* Display WA #1178: icl, tgl */
434         if (IS_TIGERLAKE(dev_priv))
435                 wa_idx_max = ICL_PW_CTL_IDX_AUX_C;
436         else
437                 wa_idx_max = ICL_PW_CTL_IDX_AUX_B;
438
439         if (!IS_ELKHARTLAKE(dev_priv) &&
440             pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= wa_idx_max &&
441             !intel_bios_is_port_edp(dev_priv, (enum port)phy)) {
442                 val = I915_READ(ICL_AUX_ANAOVRD1(pw_idx));
443                 val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS;
444                 I915_WRITE(ICL_AUX_ANAOVRD1(pw_idx), val);
445         }
446 }
447
448 static void
449 icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
450                                      struct i915_power_well *power_well)
451 {
452         const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
453         int pw_idx = power_well->desc->hsw.idx;
454         enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx);
455         u32 val;
456
457         if (INTEL_GEN(dev_priv) < 12) {
458                 val = I915_READ(ICL_PORT_CL_DW12(phy));
459                 I915_WRITE(ICL_PORT_CL_DW12(phy), val & ~ICL_LANE_ENABLE_AUX);
460         }
461
462         val = I915_READ(regs->driver);
463         I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
464
465         hsw_wait_for_power_well_disable(dev_priv, power_well);
466 }
467
468 #define ICL_AUX_PW_TO_CH(pw_idx)        \
469         ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A)
470
471 #define ICL_TBT_AUX_PW_TO_CH(pw_idx)    \
472         ((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C)
473
474 static enum aux_ch icl_tc_phy_aux_ch(struct drm_i915_private *dev_priv,
475                                      struct i915_power_well *power_well)
476 {
477         int pw_idx = power_well->desc->hsw.idx;
478
479         return power_well->desc->hsw.is_tc_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) :
480                                                  ICL_AUX_PW_TO_CH(pw_idx);
481 }
482
483 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
484
485 static u64 async_put_domains_mask(struct i915_power_domains *power_domains);
486
487 static int power_well_async_ref_count(struct drm_i915_private *dev_priv,
488                                       struct i915_power_well *power_well)
489 {
490         int refs = hweight64(power_well->desc->domains &
491                              async_put_domains_mask(&dev_priv->power_domains));
492
493         WARN_ON(refs > power_well->count);
494
495         return refs;
496 }
497
498 static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
499                                         struct i915_power_well *power_well)
500 {
501         enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
502         struct intel_digital_port *dig_port = NULL;
503         struct intel_encoder *encoder;
504
505         /* Bypass the check if all references are released asynchronously */
506         if (power_well_async_ref_count(dev_priv, power_well) ==
507             power_well->count)
508                 return;
509
510         aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
511
512         for_each_intel_encoder(&dev_priv->drm, encoder) {
513                 enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
514
515                 if (!intel_phy_is_tc(dev_priv, phy))
516                         continue;
517
518                 /* We'll check the MST primary port */
519                 if (encoder->type == INTEL_OUTPUT_DP_MST)
520                         continue;
521
522                 dig_port = enc_to_dig_port(&encoder->base);
523                 if (WARN_ON(!dig_port))
524                         continue;
525
526                 if (dig_port->aux_ch != aux_ch) {
527                         dig_port = NULL;
528                         continue;
529                 }
530
531                 break;
532         }
533
534         if (WARN_ON(!dig_port))
535                 return;
536
537         WARN_ON(!intel_tc_port_ref_held(dig_port));
538 }
539
540 #else
541
542 static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
543                                         struct i915_power_well *power_well)
544 {
545 }
546
547 #endif
548
549 #define TGL_AUX_PW_TO_TC_PORT(pw_idx)   ((pw_idx) - TGL_PW_CTL_IDX_AUX_TC1)
550
551 static void
552 icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
553                                  struct i915_power_well *power_well)
554 {
555         enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
556         u32 val;
557
558         icl_tc_port_assert_ref_held(dev_priv, power_well);
559
560         val = I915_READ(DP_AUX_CH_CTL(aux_ch));
561         val &= ~DP_AUX_CH_CTL_TBT_IO;
562         if (power_well->desc->hsw.is_tc_tbt)
563                 val |= DP_AUX_CH_CTL_TBT_IO;
564         I915_WRITE(DP_AUX_CH_CTL(aux_ch), val);
565
566         hsw_power_well_enable(dev_priv, power_well);
567
568         if (INTEL_GEN(dev_priv) >= 12 && !power_well->desc->hsw.is_tc_tbt) {
569                 enum tc_port tc_port;
570
571                 tc_port = TGL_AUX_PW_TO_TC_PORT(power_well->desc->hsw.idx);
572                 I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x2));
573
574                 if (intel_de_wait_for_set(dev_priv, DKL_CMN_UC_DW_27(tc_port),
575                                           DKL_CMN_UC_DW27_UC_HEALTH, 1))
576                         DRM_WARN("Timeout waiting TC uC health\n");
577         }
578 }
579
580 static void
581 icl_tc_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
582                                   struct i915_power_well *power_well)
583 {
584         icl_tc_port_assert_ref_held(dev_priv, power_well);
585
586         hsw_power_well_disable(dev_priv, power_well);
587 }
588
589 /*
590  * We should only use the power well if we explicitly asked the hardware to
591  * enable it, so check if it's enabled and also check if we've requested it to
592  * be enabled.
593  */
594 static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
595                                    struct i915_power_well *power_well)
596 {
597         const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
598         enum i915_power_well_id id = power_well->desc->id;
599         int pw_idx = power_well->desc->hsw.idx;
600         u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) |
601                    HSW_PWR_WELL_CTL_STATE(pw_idx);
602         u32 val;
603
604         val = I915_READ(regs->driver);
605
606         /*
607          * On GEN9 big core due to a DMC bug the driver's request bits for PW1
608          * and the MISC_IO PW will be not restored, so check instead for the
609          * BIOS's own request bits, which are forced-on for these power wells
610          * when exiting DC5/6.
611          */
612         if (IS_GEN(dev_priv, 9) && !IS_GEN9_LP(dev_priv) &&
613             (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO))
614                 val |= I915_READ(regs->bios);
615
616         return (val & mask) == mask;
617 }
618
619 static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
620 {
621         WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
622                   "DC9 already programmed to be enabled.\n");
623         WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
624                   "DC5 still not disabled to enable DC9.\n");
625         WARN_ONCE(I915_READ(HSW_PWR_WELL_CTL2) &
626                   HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2),
627                   "Power well 2 on.\n");
628         WARN_ONCE(intel_irqs_enabled(dev_priv),
629                   "Interrupts not disabled yet.\n");
630
631          /*
632           * TODO: check for the following to verify the conditions to enter DC9
633           * state are satisfied:
634           * 1] Check relevant display engine registers to verify if mode set
635           * disable sequence was followed.
636           * 2] Check if display uninitialize sequence is initialized.
637           */
638 }
639
640 static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
641 {
642         WARN_ONCE(intel_irqs_enabled(dev_priv),
643                   "Interrupts not disabled yet.\n");
644         WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
645                   "DC5 still not disabled.\n");
646
647          /*
648           * TODO: check for the following to verify DC9 state was indeed
649           * entered before programming to disable it:
650           * 1] Check relevant display engine registers to verify if mode
651           *  set disable sequence was followed.
652           * 2] Check if display uninitialize sequence is initialized.
653           */
654 }
655
656 static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
657                                 u32 state)
658 {
659         int rewrites = 0;
660         int rereads = 0;
661         u32 v;
662
663         I915_WRITE(DC_STATE_EN, state);
664
665         /* It has been observed that disabling the dc6 state sometimes
666          * doesn't stick and dmc keeps returning old value. Make sure
667          * the write really sticks enough times and also force rewrite until
668          * we are confident that state is exactly what we want.
669          */
670         do  {
671                 v = I915_READ(DC_STATE_EN);
672
673                 if (v != state) {
674                         I915_WRITE(DC_STATE_EN, state);
675                         rewrites++;
676                         rereads = 0;
677                 } else if (rereads++ > 5) {
678                         break;
679                 }
680
681         } while (rewrites < 100);
682
683         if (v != state)
684                 DRM_ERROR("Writing dc state to 0x%x failed, now 0x%x\n",
685                           state, v);
686
687         /* Most of the times we need one retry, avoid spam */
688         if (rewrites > 1)
689                 DRM_DEBUG_KMS("Rewrote dc state to 0x%x %d times\n",
690                               state, rewrites);
691 }
692
693 static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
694 {
695         u32 mask;
696
697         mask = DC_STATE_EN_UPTO_DC5;
698
699         if (INTEL_GEN(dev_priv) >= 12)
700                 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6
701                                           | DC_STATE_EN_DC9;
702         else if (IS_GEN(dev_priv, 11))
703                 mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9;
704         else if (IS_GEN9_LP(dev_priv))
705                 mask |= DC_STATE_EN_DC9;
706         else
707                 mask |= DC_STATE_EN_UPTO_DC6;
708
709         return mask;
710 }
711
712 static void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
713 {
714         u32 val;
715
716         val = I915_READ(DC_STATE_EN) & gen9_dc_mask(dev_priv);
717
718         DRM_DEBUG_KMS("Resetting DC state tracking from %02x to %02x\n",
719                       dev_priv->csr.dc_state, val);
720         dev_priv->csr.dc_state = val;
721 }
722
723 /**
724  * gen9_set_dc_state - set target display C power state
725  * @dev_priv: i915 device instance
726  * @state: target DC power state
727  * - DC_STATE_DISABLE
728  * - DC_STATE_EN_UPTO_DC5
729  * - DC_STATE_EN_UPTO_DC6
730  * - DC_STATE_EN_DC9
731  *
732  * Signal to DMC firmware/HW the target DC power state passed in @state.
733  * DMC/HW can turn off individual display clocks and power rails when entering
734  * a deeper DC power state (higher in number) and turns these back when exiting
735  * that state to a shallower power state (lower in number). The HW will decide
736  * when to actually enter a given state on an on-demand basis, for instance
737  * depending on the active state of display pipes. The state of display
738  * registers backed by affected power rails are saved/restored as needed.
739  *
740  * Based on the above enabling a deeper DC power state is asynchronous wrt.
741  * enabling it. Disabling a deeper power state is synchronous: for instance
742  * setting %DC_STATE_DISABLE won't complete until all HW resources are turned
743  * back on and register state is restored. This is guaranteed by the MMIO write
744  * to DC_STATE_EN blocking until the state is restored.
745  */
746 static void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
747 {
748         u32 val;
749         u32 mask;
750
751         if (WARN_ON_ONCE(state & ~dev_priv->csr.allowed_dc_mask))
752                 state &= dev_priv->csr.allowed_dc_mask;
753
754         val = I915_READ(DC_STATE_EN);
755         mask = gen9_dc_mask(dev_priv);
756         DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n",
757                       val & mask, state);
758
759         /* Check if DMC is ignoring our DC state requests */
760         if ((val & mask) != dev_priv->csr.dc_state)
761                 DRM_ERROR("DC state mismatch (0x%x -> 0x%x)\n",
762                           dev_priv->csr.dc_state, val & mask);
763
764         val &= ~mask;
765         val |= state;
766
767         gen9_write_dc_state(dev_priv, val);
768
769         dev_priv->csr.dc_state = val & mask;
770 }
771
772 static u32
773 sanitize_target_dc_state(struct drm_i915_private *dev_priv,
774                          u32 target_dc_state)
775 {
776         u32 states[] = {
777                 DC_STATE_EN_UPTO_DC6,
778                 DC_STATE_EN_UPTO_DC5,
779                 DC_STATE_EN_DC3CO,
780                 DC_STATE_DISABLE,
781         };
782         int i;
783
784         for (i = 0; i < ARRAY_SIZE(states) - 1; i++) {
785                 if (target_dc_state != states[i])
786                         continue;
787
788                 if (dev_priv->csr.allowed_dc_mask & target_dc_state)
789                         break;
790
791                 target_dc_state = states[i + 1];
792         }
793
794         return target_dc_state;
795 }
796
797 static void tgl_enable_dc3co(struct drm_i915_private *dev_priv)
798 {
799         DRM_DEBUG_KMS("Enabling DC3CO\n");
800         gen9_set_dc_state(dev_priv, DC_STATE_EN_DC3CO);
801 }
802
803 static void tgl_disable_dc3co(struct drm_i915_private *dev_priv)
804 {
805         u32 val;
806
807         DRM_DEBUG_KMS("Disabling DC3CO\n");
808         val = I915_READ(DC_STATE_EN);
809         val &= ~DC_STATE_DC3CO_STATUS;
810         I915_WRITE(DC_STATE_EN, val);
811         gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
812         /*
813          * Delay of 200us DC3CO Exit time B.Spec 49196
814          */
815         usleep_range(200, 210);
816 }
817
818 static void bxt_enable_dc9(struct drm_i915_private *dev_priv)
819 {
820         assert_can_enable_dc9(dev_priv);
821
822         DRM_DEBUG_KMS("Enabling DC9\n");
823         /*
824          * Power sequencer reset is not needed on
825          * platforms with South Display Engine on PCH,
826          * because PPS registers are always on.
827          */
828         if (!HAS_PCH_SPLIT(dev_priv))
829                 intel_power_sequencer_reset(dev_priv);
830         gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
831 }
832
833 static void bxt_disable_dc9(struct drm_i915_private *dev_priv)
834 {
835         assert_can_disable_dc9(dev_priv);
836
837         DRM_DEBUG_KMS("Disabling DC9\n");
838
839         gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
840
841         intel_pps_unlock_regs_wa(dev_priv);
842 }
843
844 static void assert_csr_loaded(struct drm_i915_private *dev_priv)
845 {
846         WARN_ONCE(!I915_READ(CSR_PROGRAM(0)),
847                   "CSR program storage start is NULL\n");
848         WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n");
849         WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n");
850 }
851
852 static struct i915_power_well *
853 lookup_power_well(struct drm_i915_private *dev_priv,
854                   enum i915_power_well_id power_well_id)
855 {
856         struct i915_power_well *power_well;
857
858         for_each_power_well(dev_priv, power_well)
859                 if (power_well->desc->id == power_well_id)
860                         return power_well;
861
862         /*
863          * It's not feasible to add error checking code to the callers since
864          * this condition really shouldn't happen and it doesn't even make sense
865          * to abort things like display initialization sequences. Just return
866          * the first power well and hope the WARN gets reported so we can fix
867          * our driver.
868          */
869         WARN(1, "Power well %d not defined for this platform\n", power_well_id);
870         return &dev_priv->power_domains.power_wells[0];
871 }
872
873 /**
874  * intel_display_power_set_target_dc_state - Set target dc state.
875  * @dev_priv: i915 device
876  * @state: state which needs to be set as target_dc_state.
877  *
878  * This function set the "DC off" power well target_dc_state,
879  * based upon this target_dc_stste, "DC off" power well will
880  * enable desired DC state.
881  */
882 void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv,
883                                              u32 state)
884 {
885         struct i915_power_well *power_well;
886         bool dc_off_enabled;
887         struct i915_power_domains *power_domains = &dev_priv->power_domains;
888
889         mutex_lock(&power_domains->lock);
890         power_well = lookup_power_well(dev_priv, SKL_DISP_DC_OFF);
891
892         if (WARN_ON(!power_well))
893                 goto unlock;
894
895         state = sanitize_target_dc_state(dev_priv, state);
896
897         if (state == dev_priv->csr.target_dc_state)
898                 goto unlock;
899
900         dc_off_enabled = power_well->desc->ops->is_enabled(dev_priv,
901                                                            power_well);
902         /*
903          * If DC off power well is disabled, need to enable and disable the
904          * DC off power well to effect target DC state.
905          */
906         if (!dc_off_enabled)
907                 power_well->desc->ops->enable(dev_priv, power_well);
908
909         dev_priv->csr.target_dc_state = state;
910
911         if (!dc_off_enabled)
912                 power_well->desc->ops->disable(dev_priv, power_well);
913
914 unlock:
915         mutex_unlock(&power_domains->lock);
916 }
917
918 static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
919 {
920         bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
921                                         SKL_DISP_PW_2);
922
923         WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n");
924
925         WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5),
926                   "DC5 already programmed to be enabled.\n");
927         assert_rpm_wakelock_held(&dev_priv->runtime_pm);
928
929         assert_csr_loaded(dev_priv);
930 }
931
932 static void gen9_enable_dc5(struct drm_i915_private *dev_priv)
933 {
934         assert_can_enable_dc5(dev_priv);
935
936         DRM_DEBUG_KMS("Enabling DC5\n");
937
938         /* Wa Display #1183: skl,kbl,cfl */
939         if (IS_GEN9_BC(dev_priv))
940                 I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
941                            SKL_SELECT_ALTERNATE_DC_EXIT);
942
943         gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
944 }
945
946 static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
947 {
948         WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
949                   "Backlight is not disabled.\n");
950         WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
951                   "DC6 already programmed to be enabled.\n");
952
953         assert_csr_loaded(dev_priv);
954 }
955
956 static void skl_enable_dc6(struct drm_i915_private *dev_priv)
957 {
958         assert_can_enable_dc6(dev_priv);
959
960         DRM_DEBUG_KMS("Enabling DC6\n");
961
962         /* Wa Display #1183: skl,kbl,cfl */
963         if (IS_GEN9_BC(dev_priv))
964                 I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
965                            SKL_SELECT_ALTERNATE_DC_EXIT);
966
967         gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
968 }
969
970 static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
971                                    struct i915_power_well *power_well)
972 {
973         const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
974         int pw_idx = power_well->desc->hsw.idx;
975         u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
976         u32 bios_req = I915_READ(regs->bios);
977
978         /* Take over the request bit if set by BIOS. */
979         if (bios_req & mask) {
980                 u32 drv_req = I915_READ(regs->driver);
981
982                 if (!(drv_req & mask))
983                         I915_WRITE(regs->driver, drv_req | mask);
984                 I915_WRITE(regs->bios, bios_req & ~mask);
985         }
986 }
987
988 static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
989                                            struct i915_power_well *power_well)
990 {
991         bxt_ddi_phy_init(dev_priv, power_well->desc->bxt.phy);
992 }
993
994 static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
995                                             struct i915_power_well *power_well)
996 {
997         bxt_ddi_phy_uninit(dev_priv, power_well->desc->bxt.phy);
998 }
999
1000 static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
1001                                             struct i915_power_well *power_well)
1002 {
1003         return bxt_ddi_phy_is_enabled(dev_priv, power_well->desc->bxt.phy);
1004 }
1005
1006 static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
1007 {
1008         struct i915_power_well *power_well;
1009
1010         power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A);
1011         if (power_well->count > 0)
1012                 bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
1013
1014         power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
1015         if (power_well->count > 0)
1016                 bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
1017
1018         if (IS_GEMINILAKE(dev_priv)) {
1019                 power_well = lookup_power_well(dev_priv,
1020                                                GLK_DISP_PW_DPIO_CMN_C);
1021                 if (power_well->count > 0)
1022                         bxt_ddi_phy_verify_state(dev_priv,
1023                                                  power_well->desc->bxt.phy);
1024         }
1025 }
1026
1027 static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
1028                                            struct i915_power_well *power_well)
1029 {
1030         return ((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC3CO) == 0 &&
1031                 (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0);
1032 }
1033
1034 static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
1035 {
1036         u32 tmp = I915_READ(DBUF_CTL);
1037
1038         WARN((tmp & (DBUF_POWER_STATE | DBUF_POWER_REQUEST)) !=
1039              (DBUF_POWER_STATE | DBUF_POWER_REQUEST),
1040              "Unexpected DBuf power power state (0x%08x)\n", tmp);
1041 }
1042
1043 static void gen9_disable_dc_states(struct drm_i915_private *dev_priv)
1044 {
1045         struct intel_cdclk_state cdclk_state = {};
1046
1047         if (dev_priv->csr.target_dc_state == DC_STATE_EN_DC3CO) {
1048                 tgl_disable_dc3co(dev_priv);
1049                 return;
1050         }
1051
1052         gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
1053
1054         dev_priv->display.get_cdclk(dev_priv, &cdclk_state);
1055         /* Can't read out voltage_level so can't use intel_cdclk_changed() */
1056         WARN_ON(intel_cdclk_needs_modeset(&dev_priv->cdclk.hw, &cdclk_state));
1057
1058         gen9_assert_dbuf_enabled(dev_priv);
1059
1060         if (IS_GEN9_LP(dev_priv))
1061                 bxt_verify_ddi_phy_power_wells(dev_priv);
1062
1063         if (INTEL_GEN(dev_priv) >= 11)
1064                 /*
1065                  * DMC retains HW context only for port A, the other combo
1066                  * PHY's HW context for port B is lost after DC transitions,
1067                  * so we need to restore it manually.
1068                  */
1069                 intel_combo_phy_init(dev_priv);
1070 }
1071
1072 static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
1073                                           struct i915_power_well *power_well)
1074 {
1075         gen9_disable_dc_states(dev_priv);
1076 }
1077
1078 static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
1079                                            struct i915_power_well *power_well)
1080 {
1081         if (!dev_priv->csr.dmc_payload)
1082                 return;
1083
1084         switch (dev_priv->csr.target_dc_state) {
1085         case DC_STATE_EN_DC3CO:
1086                 tgl_enable_dc3co(dev_priv);
1087                 break;
1088         case DC_STATE_EN_UPTO_DC6:
1089                 skl_enable_dc6(dev_priv);
1090                 break;
1091         case DC_STATE_EN_UPTO_DC5:
1092                 gen9_enable_dc5(dev_priv);
1093                 break;
1094         }
1095 }
1096
1097 static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv,
1098                                          struct i915_power_well *power_well)
1099 {
1100 }
1101
1102 static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
1103                                            struct i915_power_well *power_well)
1104 {
1105 }
1106
1107 static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
1108                                              struct i915_power_well *power_well)
1109 {
1110         return true;
1111 }
1112
1113 static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv,
1114                                          struct i915_power_well *power_well)
1115 {
1116         if ((I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0)
1117                 i830_enable_pipe(dev_priv, PIPE_A);
1118         if ((I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0)
1119                 i830_enable_pipe(dev_priv, PIPE_B);
1120 }
1121
1122 static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv,
1123                                           struct i915_power_well *power_well)
1124 {
1125         i830_disable_pipe(dev_priv, PIPE_B);
1126         i830_disable_pipe(dev_priv, PIPE_A);
1127 }
1128
1129 static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv,
1130                                           struct i915_power_well *power_well)
1131 {
1132         return I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE &&
1133                 I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
1134 }
1135
1136 static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv,
1137                                           struct i915_power_well *power_well)
1138 {
1139         if (power_well->count > 0)
1140                 i830_pipes_power_well_enable(dev_priv, power_well);
1141         else
1142                 i830_pipes_power_well_disable(dev_priv, power_well);
1143 }
1144
1145 static void vlv_set_power_well(struct drm_i915_private *dev_priv,
1146                                struct i915_power_well *power_well, bool enable)
1147 {
1148         int pw_idx = power_well->desc->vlv.idx;
1149         u32 mask;
1150         u32 state;
1151         u32 ctrl;
1152
1153         mask = PUNIT_PWRGT_MASK(pw_idx);
1154         state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) :
1155                          PUNIT_PWRGT_PWR_GATE(pw_idx);
1156
1157         vlv_punit_get(dev_priv);
1158
1159 #define COND \
1160         ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
1161
1162         if (COND)
1163                 goto out;
1164
1165         ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
1166         ctrl &= ~mask;
1167         ctrl |= state;
1168         vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
1169
1170         if (wait_for(COND, 100))
1171                 DRM_ERROR("timeout setting power well state %08x (%08x)\n",
1172                           state,
1173                           vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
1174
1175 #undef COND
1176
1177 out:
1178         vlv_punit_put(dev_priv);
1179 }
1180
1181 static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
1182                                   struct i915_power_well *power_well)
1183 {
1184         vlv_set_power_well(dev_priv, power_well, true);
1185 }
1186
1187 static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
1188                                    struct i915_power_well *power_well)
1189 {
1190         vlv_set_power_well(dev_priv, power_well, false);
1191 }
1192
1193 static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
1194                                    struct i915_power_well *power_well)
1195 {
1196         int pw_idx = power_well->desc->vlv.idx;
1197         bool enabled = false;
1198         u32 mask;
1199         u32 state;
1200         u32 ctrl;
1201
1202         mask = PUNIT_PWRGT_MASK(pw_idx);
1203         ctrl = PUNIT_PWRGT_PWR_ON(pw_idx);
1204
1205         vlv_punit_get(dev_priv);
1206
1207         state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
1208         /*
1209          * We only ever set the power-on and power-gate states, anything
1210          * else is unexpected.
1211          */
1212         WARN_ON(state != PUNIT_PWRGT_PWR_ON(pw_idx) &&
1213                 state != PUNIT_PWRGT_PWR_GATE(pw_idx));
1214         if (state == ctrl)
1215                 enabled = true;
1216
1217         /*
1218          * A transient state at this point would mean some unexpected party
1219          * is poking at the power controls too.
1220          */
1221         ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
1222         WARN_ON(ctrl != state);
1223
1224         vlv_punit_put(dev_priv);
1225
1226         return enabled;
1227 }
1228
1229 static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
1230 {
1231         u32 val;
1232
1233         /*
1234          * On driver load, a pipe may be active and driving a DSI display.
1235          * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
1236          * (and never recovering) in this case. intel_dsi_post_disable() will
1237          * clear it when we turn off the display.
1238          */
1239         val = I915_READ(DSPCLK_GATE_D);
1240         val &= DPOUNIT_CLOCK_GATE_DISABLE;
1241         val |= VRHUNIT_CLOCK_GATE_DISABLE;
1242         I915_WRITE(DSPCLK_GATE_D, val);
1243
1244         /*
1245          * Disable trickle feed and enable pnd deadline calculation
1246          */
1247         I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
1248         I915_WRITE(CBR1_VLV, 0);
1249
1250         WARN_ON(dev_priv->rawclk_freq == 0);
1251
1252         I915_WRITE(RAWCLK_FREQ_VLV,
1253                    DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 1000));
1254 }
1255
1256 static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
1257 {
1258         struct intel_encoder *encoder;
1259         enum pipe pipe;
1260
1261         /*
1262          * Enable the CRI clock source so we can get at the
1263          * display and the reference clock for VGA
1264          * hotplug / manual detection. Supposedly DSI also
1265          * needs the ref clock up and running.
1266          *
1267          * CHV DPLL B/C have some issues if VGA mode is enabled.
1268          */
1269         for_each_pipe(dev_priv, pipe) {
1270                 u32 val = I915_READ(DPLL(pipe));
1271
1272                 val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1273                 if (pipe != PIPE_A)
1274                         val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1275
1276                 I915_WRITE(DPLL(pipe), val);
1277         }
1278
1279         vlv_init_display_clock_gating(dev_priv);
1280
1281         spin_lock_irq(&dev_priv->irq_lock);
1282         valleyview_enable_display_irqs(dev_priv);
1283         spin_unlock_irq(&dev_priv->irq_lock);
1284
1285         /*
1286          * During driver initialization/resume we can avoid restoring the
1287          * part of the HW/SW state that will be inited anyway explicitly.
1288          */
1289         if (dev_priv->power_domains.initializing)
1290                 return;
1291
1292         intel_hpd_init(dev_priv);
1293
1294         /* Re-enable the ADPA, if we have one */
1295         for_each_intel_encoder(&dev_priv->drm, encoder) {
1296                 if (encoder->type == INTEL_OUTPUT_ANALOG)
1297                         intel_crt_reset(&encoder->base);
1298         }
1299
1300         intel_vga_redisable_power_on(dev_priv);
1301
1302         intel_pps_unlock_regs_wa(dev_priv);
1303 }
1304
1305 static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
1306 {
1307         spin_lock_irq(&dev_priv->irq_lock);
1308         valleyview_disable_display_irqs(dev_priv);
1309         spin_unlock_irq(&dev_priv->irq_lock);
1310
1311         /* make sure we're done processing display irqs */
1312         intel_synchronize_irq(dev_priv);
1313
1314         intel_power_sequencer_reset(dev_priv);
1315
1316         /* Prevent us from re-enabling polling on accident in late suspend */
1317         if (!dev_priv->drm.dev->power.is_suspended)
1318                 intel_hpd_poll_init(dev_priv);
1319 }
1320
1321 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
1322                                           struct i915_power_well *power_well)
1323 {
1324         vlv_set_power_well(dev_priv, power_well, true);
1325
1326         vlv_display_power_well_init(dev_priv);
1327 }
1328
1329 static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
1330                                            struct i915_power_well *power_well)
1331 {
1332         vlv_display_power_well_deinit(dev_priv);
1333
1334         vlv_set_power_well(dev_priv, power_well, false);
1335 }
1336
1337 static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1338                                            struct i915_power_well *power_well)
1339 {
1340         /* since ref/cri clock was enabled */
1341         udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1342
1343         vlv_set_power_well(dev_priv, power_well, true);
1344
1345         /*
1346          * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
1347          *  6.  De-assert cmn_reset/side_reset. Same as VLV X0.
1348          *   a. GUnit 0x2110 bit[0] set to 1 (def 0)
1349          *   b. The other bits such as sfr settings / modesel may all
1350          *      be set to 0.
1351          *
1352          * This should only be done on init and resume from S3 with
1353          * both PLLs disabled, or we risk losing DPIO and PLL
1354          * synchronization.
1355          */
1356         I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
1357 }
1358
1359 static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1360                                             struct i915_power_well *power_well)
1361 {
1362         enum pipe pipe;
1363
1364         for_each_pipe(dev_priv, pipe)
1365                 assert_pll_disabled(dev_priv, pipe);
1366
1367         /* Assert common reset */
1368         I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
1369
1370         vlv_set_power_well(dev_priv, power_well, false);
1371 }
1372
1373 #define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0))
1374
1375 #define BITS_SET(val, bits) (((val) & (bits)) == (bits))
1376
1377 static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
1378 {
1379         struct i915_power_well *cmn_bc =
1380                 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
1381         struct i915_power_well *cmn_d =
1382                 lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
1383         u32 phy_control = dev_priv->chv_phy_control;
1384         u32 phy_status = 0;
1385         u32 phy_status_mask = 0xffffffff;
1386
1387         /*
1388          * The BIOS can leave the PHY is some weird state
1389          * where it doesn't fully power down some parts.
1390          * Disable the asserts until the PHY has been fully
1391          * reset (ie. the power well has been disabled at
1392          * least once).
1393          */
1394         if (!dev_priv->chv_phy_assert[DPIO_PHY0])
1395                 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
1396                                      PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
1397                                      PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
1398                                      PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
1399                                      PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
1400                                      PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
1401
1402         if (!dev_priv->chv_phy_assert[DPIO_PHY1])
1403                 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
1404                                      PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
1405                                      PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
1406
1407         if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
1408                 phy_status |= PHY_POWERGOOD(DPIO_PHY0);
1409
1410                 /* this assumes override is only used to enable lanes */
1411                 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
1412                         phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
1413
1414                 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
1415                         phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
1416
1417                 /* CL1 is on whenever anything is on in either channel */
1418                 if (BITS_SET(phy_control,
1419                              PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
1420                              PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
1421                         phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
1422
1423                 /*
1424                  * The DPLLB check accounts for the pipe B + port A usage
1425                  * with CL2 powered up but all the lanes in the second channel
1426                  * powered down.
1427                  */
1428                 if (BITS_SET(phy_control,
1429                              PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
1430                     (I915_READ(DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
1431                         phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
1432
1433                 if (BITS_SET(phy_control,
1434                              PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
1435                         phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
1436                 if (BITS_SET(phy_control,
1437                              PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
1438                         phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
1439
1440                 if (BITS_SET(phy_control,
1441                              PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
1442                         phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
1443                 if (BITS_SET(phy_control,
1444                              PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
1445                         phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
1446         }
1447
1448         if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
1449                 phy_status |= PHY_POWERGOOD(DPIO_PHY1);
1450
1451                 /* this assumes override is only used to enable lanes */
1452                 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
1453                         phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
1454
1455                 if (BITS_SET(phy_control,
1456                              PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
1457                         phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
1458
1459                 if (BITS_SET(phy_control,
1460                              PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
1461                         phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
1462                 if (BITS_SET(phy_control,
1463                              PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
1464                         phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
1465         }
1466
1467         phy_status &= phy_status_mask;
1468
1469         /*
1470          * The PHY may be busy with some initial calibration and whatnot,
1471          * so the power state can take a while to actually change.
1472          */
1473         if (intel_de_wait_for_register(dev_priv, DISPLAY_PHY_STATUS,
1474                                        phy_status_mask, phy_status, 10))
1475                 DRM_ERROR("Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
1476                           I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask,
1477                            phy_status, dev_priv->chv_phy_control);
1478 }
1479
1480 #undef BITS_SET
1481
1482 static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1483                                            struct i915_power_well *power_well)
1484 {
1485         enum dpio_phy phy;
1486         enum pipe pipe;
1487         u32 tmp;
1488
1489         WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
1490                      power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
1491
1492         if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1493                 pipe = PIPE_A;
1494                 phy = DPIO_PHY0;
1495         } else {
1496                 pipe = PIPE_C;
1497                 phy = DPIO_PHY1;
1498         }
1499
1500         /* since ref/cri clock was enabled */
1501         udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1502         vlv_set_power_well(dev_priv, power_well, true);
1503
1504         /* Poll for phypwrgood signal */
1505         if (intel_de_wait_for_set(dev_priv, DISPLAY_PHY_STATUS,
1506                                   PHY_POWERGOOD(phy), 1))
1507                 DRM_ERROR("Display PHY %d is not power up\n", phy);
1508
1509         vlv_dpio_get(dev_priv);
1510
1511         /* Enable dynamic power down */
1512         tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
1513         tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
1514                 DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
1515         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
1516
1517         if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1518                 tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
1519                 tmp |= DPIO_DYNPWRDOWNEN_CH1;
1520                 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
1521         } else {
1522                 /*
1523                  * Force the non-existing CL2 off. BXT does this
1524                  * too, so maybe it saves some power even though
1525                  * CL2 doesn't exist?
1526                  */
1527                 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
1528                 tmp |= DPIO_CL2_LDOFUSE_PWRENB;
1529                 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
1530         }
1531
1532         vlv_dpio_put(dev_priv);
1533
1534         dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
1535         I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1536
1537         DRM_DEBUG_KMS("Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1538                       phy, dev_priv->chv_phy_control);
1539
1540         assert_chv_phy_status(dev_priv);
1541 }
1542
1543 static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1544                                             struct i915_power_well *power_well)
1545 {
1546         enum dpio_phy phy;
1547
1548         WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
1549                      power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
1550
1551         if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1552                 phy = DPIO_PHY0;
1553                 assert_pll_disabled(dev_priv, PIPE_A);
1554                 assert_pll_disabled(dev_priv, PIPE_B);
1555         } else {
1556                 phy = DPIO_PHY1;
1557                 assert_pll_disabled(dev_priv, PIPE_C);
1558         }
1559
1560         dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
1561         I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1562
1563         vlv_set_power_well(dev_priv, power_well, false);
1564
1565         DRM_DEBUG_KMS("Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1566                       phy, dev_priv->chv_phy_control);
1567
1568         /* PHY is fully reset now, so we can enable the PHY state asserts */
1569         dev_priv->chv_phy_assert[phy] = true;
1570
1571         assert_chv_phy_status(dev_priv);
1572 }
1573
1574 static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1575                                      enum dpio_channel ch, bool override, unsigned int mask)
1576 {
1577         enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
1578         u32 reg, val, expected, actual;
1579
1580         /*
1581          * The BIOS can leave the PHY is some weird state
1582          * where it doesn't fully power down some parts.
1583          * Disable the asserts until the PHY has been fully
1584          * reset (ie. the power well has been disabled at
1585          * least once).
1586          */
1587         if (!dev_priv->chv_phy_assert[phy])
1588                 return;
1589
1590         if (ch == DPIO_CH0)
1591                 reg = _CHV_CMN_DW0_CH0;
1592         else
1593                 reg = _CHV_CMN_DW6_CH1;
1594
1595         vlv_dpio_get(dev_priv);
1596         val = vlv_dpio_read(dev_priv, pipe, reg);
1597         vlv_dpio_put(dev_priv);
1598
1599         /*
1600          * This assumes !override is only used when the port is disabled.
1601          * All lanes should power down even without the override when
1602          * the port is disabled.
1603          */
1604         if (!override || mask == 0xf) {
1605                 expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1606                 /*
1607                  * If CH1 common lane is not active anymore
1608                  * (eg. for pipe B DPLL) the entire channel will
1609                  * shut down, which causes the common lane registers
1610                  * to read as 0. That means we can't actually check
1611                  * the lane power down status bits, but as the entire
1612                  * register reads as 0 it's a good indication that the
1613                  * channel is indeed entirely powered down.
1614                  */
1615                 if (ch == DPIO_CH1 && val == 0)
1616                         expected = 0;
1617         } else if (mask != 0x0) {
1618                 expected = DPIO_ANYDL_POWERDOWN;
1619         } else {
1620                 expected = 0;
1621         }
1622
1623         if (ch == DPIO_CH0)
1624                 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0;
1625         else
1626                 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
1627         actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1628
1629         WARN(actual != expected,
1630              "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
1631              !!(actual & DPIO_ALLDL_POWERDOWN), !!(actual & DPIO_ANYDL_POWERDOWN),
1632              !!(expected & DPIO_ALLDL_POWERDOWN), !!(expected & DPIO_ANYDL_POWERDOWN),
1633              reg, val);
1634 }
1635
1636 bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1637                           enum dpio_channel ch, bool override)
1638 {
1639         struct i915_power_domains *power_domains = &dev_priv->power_domains;
1640         bool was_override;
1641
1642         mutex_lock(&power_domains->lock);
1643
1644         was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1645
1646         if (override == was_override)
1647                 goto out;
1648
1649         if (override)
1650                 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1651         else
1652                 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1653
1654         I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1655
1656         DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
1657                       phy, ch, dev_priv->chv_phy_control);
1658
1659         assert_chv_phy_status(dev_priv);
1660
1661 out:
1662         mutex_unlock(&power_domains->lock);
1663
1664         return was_override;
1665 }
1666
1667 void chv_phy_powergate_lanes(struct intel_encoder *encoder,
1668                              bool override, unsigned int mask)
1669 {
1670         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1671         struct i915_power_domains *power_domains = &dev_priv->power_domains;
1672         enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(&encoder->base));
1673         enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
1674
1675         mutex_lock(&power_domains->lock);
1676
1677         dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
1678         dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
1679
1680         if (override)
1681                 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1682         else
1683                 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1684
1685         I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1686
1687         DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
1688                       phy, ch, mask, dev_priv->chv_phy_control);
1689
1690         assert_chv_phy_status(dev_priv);
1691
1692         assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
1693
1694         mutex_unlock(&power_domains->lock);
1695 }
1696
1697 static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
1698                                         struct i915_power_well *power_well)
1699 {
1700         enum pipe pipe = PIPE_A;
1701         bool enabled;
1702         u32 state, ctrl;
1703
1704         vlv_punit_get(dev_priv);
1705
1706         state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe);
1707         /*
1708          * We only ever set the power-on and power-gate states, anything
1709          * else is unexpected.
1710          */
1711         WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
1712         enabled = state == DP_SSS_PWR_ON(pipe);
1713
1714         /*
1715          * A transient state at this point would mean some unexpected party
1716          * is poking at the power controls too.
1717          */
1718         ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe);
1719         WARN_ON(ctrl << 16 != state);
1720
1721         vlv_punit_put(dev_priv);
1722
1723         return enabled;
1724 }
1725
1726 static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
1727                                     struct i915_power_well *power_well,
1728                                     bool enable)
1729 {
1730         enum pipe pipe = PIPE_A;
1731         u32 state;
1732         u32 ctrl;
1733
1734         state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
1735
1736         vlv_punit_get(dev_priv);
1737
1738 #define COND \
1739         ((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state)
1740
1741         if (COND)
1742                 goto out;
1743
1744         ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
1745         ctrl &= ~DP_SSC_MASK(pipe);
1746         ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
1747         vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl);
1748
1749         if (wait_for(COND, 100))
1750                 DRM_ERROR("timeout setting power well state %08x (%08x)\n",
1751                           state,
1752                           vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM));
1753
1754 #undef COND
1755
1756 out:
1757         vlv_punit_put(dev_priv);
1758 }
1759
1760 static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
1761                                        struct i915_power_well *power_well)
1762 {
1763         chv_set_pipe_power_well(dev_priv, power_well, true);
1764
1765         vlv_display_power_well_init(dev_priv);
1766 }
1767
1768 static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
1769                                         struct i915_power_well *power_well)
1770 {
1771         vlv_display_power_well_deinit(dev_priv);
1772
1773         chv_set_pipe_power_well(dev_priv, power_well, false);
1774 }
1775
1776 static u64 __async_put_domains_mask(struct i915_power_domains *power_domains)
1777 {
1778         return power_domains->async_put_domains[0] |
1779                power_domains->async_put_domains[1];
1780 }
1781
1782 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
1783
1784 static bool
1785 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
1786 {
1787         return !WARN_ON(power_domains->async_put_domains[0] &
1788                         power_domains->async_put_domains[1]);
1789 }
1790
1791 static bool
1792 __async_put_domains_state_ok(struct i915_power_domains *power_domains)
1793 {
1794         enum intel_display_power_domain domain;
1795         bool err = false;
1796
1797         err |= !assert_async_put_domain_masks_disjoint(power_domains);
1798         err |= WARN_ON(!!power_domains->async_put_wakeref !=
1799                        !!__async_put_domains_mask(power_domains));
1800
1801         for_each_power_domain(domain, __async_put_domains_mask(power_domains))
1802                 err |= WARN_ON(power_domains->domain_use_count[domain] != 1);
1803
1804         return !err;
1805 }
1806
1807 static void print_power_domains(struct i915_power_domains *power_domains,
1808                                 const char *prefix, u64 mask)
1809 {
1810         enum intel_display_power_domain domain;
1811
1812         DRM_DEBUG_DRIVER("%s (%lu):\n", prefix, hweight64(mask));
1813         for_each_power_domain(domain, mask)
1814                 DRM_DEBUG_DRIVER("%s use_count %d\n",
1815                                  intel_display_power_domain_str(domain),
1816                                  power_domains->domain_use_count[domain]);
1817 }
1818
1819 static void
1820 print_async_put_domains_state(struct i915_power_domains *power_domains)
1821 {
1822         DRM_DEBUG_DRIVER("async_put_wakeref %u\n",
1823                          power_domains->async_put_wakeref);
1824
1825         print_power_domains(power_domains, "async_put_domains[0]",
1826                             power_domains->async_put_domains[0]);
1827         print_power_domains(power_domains, "async_put_domains[1]",
1828                             power_domains->async_put_domains[1]);
1829 }
1830
1831 static void
1832 verify_async_put_domains_state(struct i915_power_domains *power_domains)
1833 {
1834         if (!__async_put_domains_state_ok(power_domains))
1835                 print_async_put_domains_state(power_domains);
1836 }
1837
1838 #else
1839
1840 static void
1841 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
1842 {
1843 }
1844
1845 static void
1846 verify_async_put_domains_state(struct i915_power_domains *power_domains)
1847 {
1848 }
1849
1850 #endif /* CONFIG_DRM_I915_DEBUG_RUNTIME_PM */
1851
1852 static u64 async_put_domains_mask(struct i915_power_domains *power_domains)
1853 {
1854         assert_async_put_domain_masks_disjoint(power_domains);
1855
1856         return __async_put_domains_mask(power_domains);
1857 }
1858
1859 static void
1860 async_put_domains_clear_domain(struct i915_power_domains *power_domains,
1861                                enum intel_display_power_domain domain)
1862 {
1863         assert_async_put_domain_masks_disjoint(power_domains);
1864
1865         power_domains->async_put_domains[0] &= ~BIT_ULL(domain);
1866         power_domains->async_put_domains[1] &= ~BIT_ULL(domain);
1867 }
1868
1869 static bool
1870 intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv,
1871                                        enum intel_display_power_domain domain)
1872 {
1873         struct i915_power_domains *power_domains = &dev_priv->power_domains;
1874         bool ret = false;
1875
1876         if (!(async_put_domains_mask(power_domains) & BIT_ULL(domain)))
1877                 goto out_verify;
1878
1879         async_put_domains_clear_domain(power_domains, domain);
1880
1881         ret = true;
1882
1883         if (async_put_domains_mask(power_domains))
1884                 goto out_verify;
1885
1886         cancel_delayed_work(&power_domains->async_put_work);
1887         intel_runtime_pm_put_raw(&dev_priv->runtime_pm,
1888                                  fetch_and_zero(&power_domains->async_put_wakeref));
1889 out_verify:
1890         verify_async_put_domains_state(power_domains);
1891
1892         return ret;
1893 }
1894
1895 static void
1896 __intel_display_power_get_domain(struct drm_i915_private *dev_priv,
1897                                  enum intel_display_power_domain domain)
1898 {
1899         struct i915_power_domains *power_domains = &dev_priv->power_domains;
1900         struct i915_power_well *power_well;
1901
1902         if (intel_display_power_grab_async_put_ref(dev_priv, domain))
1903                 return;
1904
1905         for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain))
1906                 intel_power_well_get(dev_priv, power_well);
1907
1908         power_domains->domain_use_count[domain]++;
1909 }
1910
1911 /**
1912  * intel_display_power_get - grab a power domain reference
1913  * @dev_priv: i915 device instance
1914  * @domain: power domain to reference
1915  *
1916  * This function grabs a power domain reference for @domain and ensures that the
1917  * power domain and all its parents are powered up. Therefore users should only
1918  * grab a reference to the innermost power domain they need.
1919  *
1920  * Any power domain reference obtained by this function must have a symmetric
1921  * call to intel_display_power_put() to release the reference again.
1922  */
1923 intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
1924                                         enum intel_display_power_domain domain)
1925 {
1926         struct i915_power_domains *power_domains = &dev_priv->power_domains;
1927         intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1928
1929         mutex_lock(&power_domains->lock);
1930         __intel_display_power_get_domain(dev_priv, domain);
1931         mutex_unlock(&power_domains->lock);
1932
1933         return wakeref;
1934 }
1935
1936 /**
1937  * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
1938  * @dev_priv: i915 device instance
1939  * @domain: power domain to reference
1940  *
1941  * This function grabs a power domain reference for @domain and ensures that the
1942  * power domain and all its parents are powered up. Therefore users should only
1943  * grab a reference to the innermost power domain they need.
1944  *
1945  * Any power domain reference obtained by this function must have a symmetric
1946  * call to intel_display_power_put() to release the reference again.
1947  */
1948 intel_wakeref_t
1949 intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
1950                                    enum intel_display_power_domain domain)
1951 {
1952         struct i915_power_domains *power_domains = &dev_priv->power_domains;
1953         intel_wakeref_t wakeref;
1954         bool is_enabled;
1955
1956         wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm);
1957         if (!wakeref)
1958                 return false;
1959
1960         mutex_lock(&power_domains->lock);
1961
1962         if (__intel_display_power_is_enabled(dev_priv, domain)) {
1963                 __intel_display_power_get_domain(dev_priv, domain);
1964                 is_enabled = true;
1965         } else {
1966                 is_enabled = false;
1967         }
1968
1969         mutex_unlock(&power_domains->lock);
1970
1971         if (!is_enabled) {
1972                 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1973                 wakeref = 0;
1974         }
1975
1976         return wakeref;
1977 }
1978
1979 static void
1980 __intel_display_power_put_domain(struct drm_i915_private *dev_priv,
1981                                  enum intel_display_power_domain domain)
1982 {
1983         struct i915_power_domains *power_domains;
1984         struct i915_power_well *power_well;
1985         const char *name = intel_display_power_domain_str(domain);
1986
1987         power_domains = &dev_priv->power_domains;
1988
1989         WARN(!power_domains->domain_use_count[domain],
1990              "Use count on domain %s is already zero\n",
1991              name);
1992         WARN(async_put_domains_mask(power_domains) & BIT_ULL(domain),
1993              "Async disabling of domain %s is pending\n",
1994              name);
1995
1996         power_domains->domain_use_count[domain]--;
1997
1998         for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain))
1999                 intel_power_well_put(dev_priv, power_well);
2000 }
2001
2002 static void __intel_display_power_put(struct drm_i915_private *dev_priv,
2003                                       enum intel_display_power_domain domain)
2004 {
2005         struct i915_power_domains *power_domains = &dev_priv->power_domains;
2006
2007         mutex_lock(&power_domains->lock);
2008         __intel_display_power_put_domain(dev_priv, domain);
2009         mutex_unlock(&power_domains->lock);
2010 }
2011
2012 /**
2013  * intel_display_power_put_unchecked - release an unchecked power domain reference
2014  * @dev_priv: i915 device instance
2015  * @domain: power domain to reference
2016  *
2017  * This function drops the power domain reference obtained by
2018  * intel_display_power_get() and might power down the corresponding hardware
2019  * block right away if this is the last reference.
2020  *
2021  * This function exists only for historical reasons and should be avoided in
2022  * new code, as the correctness of its use cannot be checked. Always use
2023  * intel_display_power_put() instead.
2024  */
2025 void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
2026                                        enum intel_display_power_domain domain)
2027 {
2028         __intel_display_power_put(dev_priv, domain);
2029         intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
2030 }
2031
2032 static void
2033 queue_async_put_domains_work(struct i915_power_domains *power_domains,
2034                              intel_wakeref_t wakeref)
2035 {
2036         WARN_ON(power_domains->async_put_wakeref);
2037         power_domains->async_put_wakeref = wakeref;
2038         WARN_ON(!queue_delayed_work(system_unbound_wq,
2039                                     &power_domains->async_put_work,
2040                                     msecs_to_jiffies(100)));
2041 }
2042
2043 static void
2044 release_async_put_domains(struct i915_power_domains *power_domains, u64 mask)
2045 {
2046         struct drm_i915_private *dev_priv =
2047                 container_of(power_domains, struct drm_i915_private,
2048                              power_domains);
2049         struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
2050         enum intel_display_power_domain domain;
2051         intel_wakeref_t wakeref;
2052
2053         /*
2054          * The caller must hold already raw wakeref, upgrade that to a proper
2055          * wakeref to make the state checker happy about the HW access during
2056          * power well disabling.
2057          */
2058         assert_rpm_raw_wakeref_held(rpm);
2059         wakeref = intel_runtime_pm_get(rpm);
2060
2061         for_each_power_domain(domain, mask) {
2062                 /* Clear before put, so put's sanity check is happy. */
2063                 async_put_domains_clear_domain(power_domains, domain);
2064                 __intel_display_power_put_domain(dev_priv, domain);
2065         }
2066
2067         intel_runtime_pm_put(rpm, wakeref);
2068 }
2069
2070 static void
2071 intel_display_power_put_async_work(struct work_struct *work)
2072 {
2073         struct drm_i915_private *dev_priv =
2074                 container_of(work, struct drm_i915_private,
2075                              power_domains.async_put_work.work);
2076         struct i915_power_domains *power_domains = &dev_priv->power_domains;
2077         struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
2078         intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm);
2079         intel_wakeref_t old_work_wakeref = 0;
2080
2081         mutex_lock(&power_domains->lock);
2082
2083         /*
2084          * Bail out if all the domain refs pending to be released were grabbed
2085          * by subsequent gets or a flush_work.
2086          */
2087         old_work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
2088         if (!old_work_wakeref)
2089                 goto out_verify;
2090
2091         release_async_put_domains(power_domains,
2092                                   power_domains->async_put_domains[0]);
2093
2094         /* Requeue the work if more domains were async put meanwhile. */
2095         if (power_domains->async_put_domains[1]) {
2096                 power_domains->async_put_domains[0] =
2097                         fetch_and_zero(&power_domains->async_put_domains[1]);
2098                 queue_async_put_domains_work(power_domains,
2099                                              fetch_and_zero(&new_work_wakeref));
2100         }
2101
2102 out_verify:
2103         verify_async_put_domains_state(power_domains);
2104
2105         mutex_unlock(&power_domains->lock);
2106
2107         if (old_work_wakeref)
2108                 intel_runtime_pm_put_raw(rpm, old_work_wakeref);
2109         if (new_work_wakeref)
2110                 intel_runtime_pm_put_raw(rpm, new_work_wakeref);
2111 }
2112
2113 /**
2114  * intel_display_power_put_async - release a power domain reference asynchronously
2115  * @i915: i915 device instance
2116  * @domain: power domain to reference
2117  * @wakeref: wakeref acquired for the reference that is being released
2118  *
2119  * This function drops the power domain reference obtained by
2120  * intel_display_power_get*() and schedules a work to power down the
2121  * corresponding hardware block if this is the last reference.
2122  */
2123 void __intel_display_power_put_async(struct drm_i915_private *i915,
2124                                      enum intel_display_power_domain domain,
2125                                      intel_wakeref_t wakeref)
2126 {
2127         struct i915_power_domains *power_domains = &i915->power_domains;
2128         struct intel_runtime_pm *rpm = &i915->runtime_pm;
2129         intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm);
2130
2131         mutex_lock(&power_domains->lock);
2132
2133         if (power_domains->domain_use_count[domain] > 1) {
2134                 __intel_display_power_put_domain(i915, domain);
2135
2136                 goto out_verify;
2137         }
2138
2139         WARN_ON(power_domains->domain_use_count[domain] != 1);
2140
2141         /* Let a pending work requeue itself or queue a new one. */
2142         if (power_domains->async_put_wakeref) {
2143                 power_domains->async_put_domains[1] |= BIT_ULL(domain);
2144         } else {
2145                 power_domains->async_put_domains[0] |= BIT_ULL(domain);
2146                 queue_async_put_domains_work(power_domains,
2147                                              fetch_and_zero(&work_wakeref));
2148         }
2149
2150 out_verify:
2151         verify_async_put_domains_state(power_domains);
2152
2153         mutex_unlock(&power_domains->lock);
2154
2155         if (work_wakeref)
2156                 intel_runtime_pm_put_raw(rpm, work_wakeref);
2157
2158         intel_runtime_pm_put(rpm, wakeref);
2159 }
2160
2161 /**
2162  * intel_display_power_flush_work - flushes the async display power disabling work
2163  * @i915: i915 device instance
2164  *
2165  * Flushes any pending work that was scheduled by a preceding
2166  * intel_display_power_put_async() call, completing the disabling of the
2167  * corresponding power domains.
2168  *
2169  * Note that the work handler function may still be running after this
2170  * function returns; to ensure that the work handler isn't running use
2171  * intel_display_power_flush_work_sync() instead.
2172  */
2173 void intel_display_power_flush_work(struct drm_i915_private *i915)
2174 {
2175         struct i915_power_domains *power_domains = &i915->power_domains;
2176         intel_wakeref_t work_wakeref;
2177
2178         mutex_lock(&power_domains->lock);
2179
2180         work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
2181         if (!work_wakeref)
2182                 goto out_verify;
2183
2184         release_async_put_domains(power_domains,
2185                                   async_put_domains_mask(power_domains));
2186         cancel_delayed_work(&power_domains->async_put_work);
2187
2188 out_verify:
2189         verify_async_put_domains_state(power_domains);
2190
2191         mutex_unlock(&power_domains->lock);
2192
2193         if (work_wakeref)
2194                 intel_runtime_pm_put_raw(&i915->runtime_pm, work_wakeref);
2195 }
2196
2197 /**
2198  * intel_display_power_flush_work_sync - flushes and syncs the async display power disabling work
2199  * @i915: i915 device instance
2200  *
2201  * Like intel_display_power_flush_work(), but also ensure that the work
2202  * handler function is not running any more when this function returns.
2203  */
2204 static void
2205 intel_display_power_flush_work_sync(struct drm_i915_private *i915)
2206 {
2207         struct i915_power_domains *power_domains = &i915->power_domains;
2208
2209         intel_display_power_flush_work(i915);
2210         cancel_delayed_work_sync(&power_domains->async_put_work);
2211
2212         verify_async_put_domains_state(power_domains);
2213
2214         WARN_ON(power_domains->async_put_wakeref);
2215 }
2216
2217 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
2218 /**
2219  * intel_display_power_put - release a power domain reference
2220  * @dev_priv: i915 device instance
2221  * @domain: power domain to reference
2222  * @wakeref: wakeref acquired for the reference that is being released
2223  *
2224  * This function drops the power domain reference obtained by
2225  * intel_display_power_get() and might power down the corresponding hardware
2226  * block right away if this is the last reference.
2227  */
2228 void intel_display_power_put(struct drm_i915_private *dev_priv,
2229                              enum intel_display_power_domain domain,
2230                              intel_wakeref_t wakeref)
2231 {
2232         __intel_display_power_put(dev_priv, domain);
2233         intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2234 }
2235 #endif
2236
2237 #define I830_PIPES_POWER_DOMAINS (              \
2238         BIT_ULL(POWER_DOMAIN_PIPE_A) |          \
2239         BIT_ULL(POWER_DOMAIN_PIPE_B) |          \
2240         BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |     \
2241         BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |     \
2242         BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |    \
2243         BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |    \
2244         BIT_ULL(POWER_DOMAIN_INIT))
2245
2246 #define VLV_DISPLAY_POWER_DOMAINS (             \
2247         BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) |    \
2248         BIT_ULL(POWER_DOMAIN_PIPE_A) |          \
2249         BIT_ULL(POWER_DOMAIN_PIPE_B) |          \
2250         BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |     \
2251         BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |     \
2252         BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |    \
2253         BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |    \
2254         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |        \
2255         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |        \
2256         BIT_ULL(POWER_DOMAIN_PORT_DSI) |                \
2257         BIT_ULL(POWER_DOMAIN_PORT_CRT) |                \
2258         BIT_ULL(POWER_DOMAIN_VGA) |                     \
2259         BIT_ULL(POWER_DOMAIN_AUDIO) |           \
2260         BIT_ULL(POWER_DOMAIN_AUX_B) |           \
2261         BIT_ULL(POWER_DOMAIN_AUX_C) |           \
2262         BIT_ULL(POWER_DOMAIN_GMBUS) |           \
2263         BIT_ULL(POWER_DOMAIN_INIT))
2264
2265 #define VLV_DPIO_CMN_BC_POWER_DOMAINS (         \
2266         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |        \
2267         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |        \
2268         BIT_ULL(POWER_DOMAIN_PORT_CRT) |                \
2269         BIT_ULL(POWER_DOMAIN_AUX_B) |           \
2270         BIT_ULL(POWER_DOMAIN_AUX_C) |           \
2271         BIT_ULL(POWER_DOMAIN_INIT))
2272
2273 #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS (  \
2274         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |        \
2275         BIT_ULL(POWER_DOMAIN_AUX_B) |           \
2276         BIT_ULL(POWER_DOMAIN_INIT))
2277
2278 #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS (  \
2279         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |        \
2280         BIT_ULL(POWER_DOMAIN_AUX_B) |           \
2281         BIT_ULL(POWER_DOMAIN_INIT))
2282
2283 #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS (  \
2284         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |        \
2285         BIT_ULL(POWER_DOMAIN_AUX_C) |           \
2286         BIT_ULL(POWER_DOMAIN_INIT))
2287
2288 #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS (  \
2289         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |        \
2290         BIT_ULL(POWER_DOMAIN_AUX_C) |           \
2291         BIT_ULL(POWER_DOMAIN_INIT))
2292
2293 #define CHV_DISPLAY_POWER_DOMAINS (             \
2294         BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) |    \
2295         BIT_ULL(POWER_DOMAIN_PIPE_A) |          \
2296         BIT_ULL(POWER_DOMAIN_PIPE_B) |          \
2297         BIT_ULL(POWER_DOMAIN_PIPE_C) |          \
2298         BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |     \
2299         BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |     \
2300         BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |     \
2301         BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |    \
2302         BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |    \
2303         BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |    \
2304         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |        \
2305         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |        \
2306         BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |        \
2307         BIT_ULL(POWER_DOMAIN_PORT_DSI) |                \
2308         BIT_ULL(POWER_DOMAIN_VGA) |                     \
2309         BIT_ULL(POWER_DOMAIN_AUDIO) |           \
2310         BIT_ULL(POWER_DOMAIN_AUX_B) |           \
2311         BIT_ULL(POWER_DOMAIN_AUX_C) |           \
2312         BIT_ULL(POWER_DOMAIN_AUX_D) |           \
2313         BIT_ULL(POWER_DOMAIN_GMBUS) |           \
2314         BIT_ULL(POWER_DOMAIN_INIT))
2315
2316 #define CHV_DPIO_CMN_BC_POWER_DOMAINS (         \
2317         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |        \
2318         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |        \
2319         BIT_ULL(POWER_DOMAIN_AUX_B) |           \
2320         BIT_ULL(POWER_DOMAIN_AUX_C) |           \
2321         BIT_ULL(POWER_DOMAIN_INIT))
2322
2323 #define CHV_DPIO_CMN_D_POWER_DOMAINS (          \
2324         BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |        \
2325         BIT_ULL(POWER_DOMAIN_AUX_D) |           \
2326         BIT_ULL(POWER_DOMAIN_INIT))
2327
2328 #define HSW_DISPLAY_POWER_DOMAINS (                     \
2329         BIT_ULL(POWER_DOMAIN_PIPE_B) |                  \
2330         BIT_ULL(POWER_DOMAIN_PIPE_C) |                  \
2331         BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |             \
2332         BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |             \
2333         BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |             \
2334         BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |            \
2335         BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |            \
2336         BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |            \
2337         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |                \
2338         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |                \
2339         BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |                \
2340         BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */    \
2341         BIT_ULL(POWER_DOMAIN_VGA) |                             \
2342         BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
2343         BIT_ULL(POWER_DOMAIN_INIT))
2344
2345 #define BDW_DISPLAY_POWER_DOMAINS (                     \
2346         BIT_ULL(POWER_DOMAIN_PIPE_B) |                  \
2347         BIT_ULL(POWER_DOMAIN_PIPE_C) |                  \
2348         BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |             \
2349         BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |             \
2350         BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |            \
2351         BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |            \
2352         BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |            \
2353         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |                \
2354         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |                \
2355         BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |                \
2356         BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */    \
2357         BIT_ULL(POWER_DOMAIN_VGA) |                             \
2358         BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
2359         BIT_ULL(POWER_DOMAIN_INIT))
2360
2361 #define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS (         \
2362         BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |            \
2363         BIT_ULL(POWER_DOMAIN_PIPE_B) |                  \
2364         BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |            \
2365         BIT_ULL(POWER_DOMAIN_PIPE_C) |                  \
2366         BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |            \
2367         BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |             \
2368         BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |             \
2369         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |                \
2370         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |                \
2371         BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |                \
2372         BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) |                \
2373         BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
2374         BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
2375         BIT_ULL(POWER_DOMAIN_AUX_D) |                   \
2376         BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
2377         BIT_ULL(POWER_DOMAIN_VGA) |                             \
2378         BIT_ULL(POWER_DOMAIN_INIT))
2379 #define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS (          \
2380         BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) |           \
2381         BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) |           \
2382         BIT_ULL(POWER_DOMAIN_INIT))
2383 #define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS (            \
2384         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) |           \
2385         BIT_ULL(POWER_DOMAIN_INIT))
2386 #define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS (            \
2387         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) |           \
2388         BIT_ULL(POWER_DOMAIN_INIT))
2389 #define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS (            \
2390         BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) |           \
2391         BIT_ULL(POWER_DOMAIN_INIT))
2392 #define SKL_DISPLAY_DC_OFF_POWER_DOMAINS (              \
2393         SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS |         \
2394         BIT_ULL(POWER_DOMAIN_GT_IRQ) |                  \
2395         BIT_ULL(POWER_DOMAIN_MODESET) |                 \
2396         BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
2397         BIT_ULL(POWER_DOMAIN_INIT))
2398
2399 #define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS (         \
2400         BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |            \
2401         BIT_ULL(POWER_DOMAIN_PIPE_B) |                  \
2402         BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |            \
2403         BIT_ULL(POWER_DOMAIN_PIPE_C) |                  \
2404         BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |            \
2405         BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |             \
2406         BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |             \
2407         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |                \
2408         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |                \
2409         BIT_ULL(POWER_DOMAIN_AUX_B) |                   \
2410         BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
2411         BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
2412         BIT_ULL(POWER_DOMAIN_VGA) |                             \
2413         BIT_ULL(POWER_DOMAIN_INIT))
2414 #define BXT_DISPLAY_DC_OFF_POWER_DOMAINS (              \
2415         BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS |         \
2416         BIT_ULL(POWER_DOMAIN_GT_IRQ) |                  \
2417         BIT_ULL(POWER_DOMAIN_MODESET) |                 \
2418         BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
2419         BIT_ULL(POWER_DOMAIN_GMBUS) |                   \
2420         BIT_ULL(POWER_DOMAIN_INIT))
2421 #define BXT_DPIO_CMN_A_POWER_DOMAINS (                  \
2422         BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) |                \
2423         BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
2424         BIT_ULL(POWER_DOMAIN_INIT))
2425 #define BXT_DPIO_CMN_BC_POWER_DOMAINS (                 \
2426         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |                \
2427         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |                \
2428         BIT_ULL(POWER_DOMAIN_AUX_B) |                   \
2429         BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
2430         BIT_ULL(POWER_DOMAIN_INIT))
2431
2432 #define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS (         \
2433         BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |            \
2434         BIT_ULL(POWER_DOMAIN_PIPE_B) |                  \
2435         BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |            \
2436         BIT_ULL(POWER_DOMAIN_PIPE_C) |                  \
2437         BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |            \
2438         BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |             \
2439         BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |             \
2440         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |                \
2441         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |                \
2442         BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
2443         BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
2444         BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
2445         BIT_ULL(POWER_DOMAIN_VGA) |                             \
2446         BIT_ULL(POWER_DOMAIN_INIT))
2447 #define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS (            \
2448         BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
2449 #define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS (            \
2450         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
2451 #define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS (            \
2452         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
2453 #define GLK_DPIO_CMN_A_POWER_DOMAINS (                  \
2454         BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) |                \
2455         BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
2456         BIT_ULL(POWER_DOMAIN_INIT))
2457 #define GLK_DPIO_CMN_B_POWER_DOMAINS (                  \
2458         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |                \
2459         BIT_ULL(POWER_DOMAIN_AUX_B) |                   \
2460         BIT_ULL(POWER_DOMAIN_INIT))
2461 #define GLK_DPIO_CMN_C_POWER_DOMAINS (                  \
2462         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |                \
2463         BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
2464         BIT_ULL(POWER_DOMAIN_INIT))
2465 #define GLK_DISPLAY_AUX_A_POWER_DOMAINS (               \
2466         BIT_ULL(POWER_DOMAIN_AUX_A) |           \
2467         BIT_ULL(POWER_DOMAIN_AUX_IO_A) |                \
2468         BIT_ULL(POWER_DOMAIN_INIT))
2469 #define GLK_DISPLAY_AUX_B_POWER_DOMAINS (               \
2470         BIT_ULL(POWER_DOMAIN_AUX_B) |           \
2471         BIT_ULL(POWER_DOMAIN_INIT))
2472 #define GLK_DISPLAY_AUX_C_POWER_DOMAINS (               \
2473         BIT_ULL(POWER_DOMAIN_AUX_C) |           \
2474         BIT_ULL(POWER_DOMAIN_INIT))
2475 #define GLK_DISPLAY_DC_OFF_POWER_DOMAINS (              \
2476         GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS |         \
2477         BIT_ULL(POWER_DOMAIN_GT_IRQ) |                  \
2478         BIT_ULL(POWER_DOMAIN_MODESET) |                 \
2479         BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
2480         BIT_ULL(POWER_DOMAIN_GMBUS) |                   \
2481         BIT_ULL(POWER_DOMAIN_INIT))
2482
2483 #define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS (         \
2484         BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |            \
2485         BIT_ULL(POWER_DOMAIN_PIPE_B) |                  \
2486         BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |            \
2487         BIT_ULL(POWER_DOMAIN_PIPE_C) |                  \
2488         BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |            \
2489         BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |             \
2490         BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |             \
2491         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |                \
2492         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |                \
2493         BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |                \
2494         BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) |                \
2495         BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
2496         BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
2497         BIT_ULL(POWER_DOMAIN_AUX_D) |                   \
2498         BIT_ULL(POWER_DOMAIN_AUX_F) |                   \
2499         BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
2500         BIT_ULL(POWER_DOMAIN_VGA) |                             \
2501         BIT_ULL(POWER_DOMAIN_INIT))
2502 #define CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS (            \
2503         BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) |           \
2504         BIT_ULL(POWER_DOMAIN_INIT))
2505 #define CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS (            \
2506         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) |           \
2507         BIT_ULL(POWER_DOMAIN_INIT))
2508 #define CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS (            \
2509         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) |           \
2510         BIT_ULL(POWER_DOMAIN_INIT))
2511 #define CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS (            \
2512         BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) |           \
2513         BIT_ULL(POWER_DOMAIN_INIT))
2514 #define CNL_DISPLAY_AUX_A_POWER_DOMAINS (               \
2515         BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
2516         BIT_ULL(POWER_DOMAIN_AUX_IO_A) |                \
2517         BIT_ULL(POWER_DOMAIN_INIT))
2518 #define CNL_DISPLAY_AUX_B_POWER_DOMAINS (               \
2519         BIT_ULL(POWER_DOMAIN_AUX_B) |                   \
2520         BIT_ULL(POWER_DOMAIN_INIT))
2521 #define CNL_DISPLAY_AUX_C_POWER_DOMAINS (               \
2522         BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
2523         BIT_ULL(POWER_DOMAIN_INIT))
2524 #define CNL_DISPLAY_AUX_D_POWER_DOMAINS (               \
2525         BIT_ULL(POWER_DOMAIN_AUX_D) |                   \
2526         BIT_ULL(POWER_DOMAIN_INIT))
2527 #define CNL_DISPLAY_AUX_F_POWER_DOMAINS (               \
2528         BIT_ULL(POWER_DOMAIN_AUX_F) |                   \
2529         BIT_ULL(POWER_DOMAIN_INIT))
2530 #define CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS (            \
2531         BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) |           \
2532         BIT_ULL(POWER_DOMAIN_INIT))
2533 #define CNL_DISPLAY_DC_OFF_POWER_DOMAINS (              \
2534         CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS |         \
2535         BIT_ULL(POWER_DOMAIN_GT_IRQ) |                  \
2536         BIT_ULL(POWER_DOMAIN_MODESET) |                 \
2537         BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
2538         BIT_ULL(POWER_DOMAIN_INIT))
2539
2540 /*
2541  * ICL PW_0/PG_0 domains (HW/DMC control):
2542  * - PCI
2543  * - clocks except port PLL
2544  * - central power except FBC
2545  * - shared functions except pipe interrupts, pipe MBUS, DBUF registers
2546  * ICL PW_1/PG_1 domains (HW/DMC control):
2547  * - DBUF function
2548  * - PIPE_A and its planes, except VGA
2549  * - transcoder EDP + PSR
2550  * - transcoder DSI
2551  * - DDI_A
2552  * - FBC
2553  */
2554 #define ICL_PW_4_POWER_DOMAINS (                        \
2555         BIT_ULL(POWER_DOMAIN_PIPE_C) |                  \
2556         BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |     \
2557         BIT_ULL(POWER_DOMAIN_INIT))
2558         /* VDSC/joining */
2559 #define ICL_PW_3_POWER_DOMAINS (                        \
2560         ICL_PW_4_POWER_DOMAINS |                        \
2561         BIT_ULL(POWER_DOMAIN_PIPE_B) |                  \
2562         BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |            \
2563         BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |            \
2564         BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |            \
2565         BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |     \
2566         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |        \
2567         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |        \
2568         BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |        \
2569         BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) |        \
2570         BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) |        \
2571         BIT_ULL(POWER_DOMAIN_AUX_B) |                   \
2572         BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
2573         BIT_ULL(POWER_DOMAIN_AUX_D) |                   \
2574         BIT_ULL(POWER_DOMAIN_AUX_E) |                   \
2575         BIT_ULL(POWER_DOMAIN_AUX_F) |                   \
2576         BIT_ULL(POWER_DOMAIN_AUX_C_TBT) |               \
2577         BIT_ULL(POWER_DOMAIN_AUX_D_TBT) |               \
2578         BIT_ULL(POWER_DOMAIN_AUX_E_TBT) |               \
2579         BIT_ULL(POWER_DOMAIN_AUX_F_TBT) |               \
2580         BIT_ULL(POWER_DOMAIN_VGA) |                     \
2581         BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
2582         BIT_ULL(POWER_DOMAIN_INIT))
2583         /*
2584          * - transcoder WD
2585          * - KVMR (HW control)
2586          */
2587 #define ICL_PW_2_POWER_DOMAINS (                        \
2588         ICL_PW_3_POWER_DOMAINS |                        \
2589         BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) |             \
2590         BIT_ULL(POWER_DOMAIN_INIT))
2591         /*
2592          * - KVMR (HW control)
2593          */
2594 #define ICL_DISPLAY_DC_OFF_POWER_DOMAINS (              \
2595         ICL_PW_2_POWER_DOMAINS |                        \
2596         BIT_ULL(POWER_DOMAIN_MODESET) |                 \
2597         BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
2598         BIT_ULL(POWER_DOMAIN_DPLL_DC_OFF) |                     \
2599         BIT_ULL(POWER_DOMAIN_INIT))
2600
2601 #define ICL_DDI_IO_A_POWER_DOMAINS (                    \
2602         BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
2603 #define ICL_DDI_IO_B_POWER_DOMAINS (                    \
2604         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
2605 #define ICL_DDI_IO_C_POWER_DOMAINS (                    \
2606         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
2607 #define ICL_DDI_IO_D_POWER_DOMAINS (                    \
2608         BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO))
2609 #define ICL_DDI_IO_E_POWER_DOMAINS (                    \
2610         BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO))
2611 #define ICL_DDI_IO_F_POWER_DOMAINS (                    \
2612         BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO))
2613
2614 #define ICL_AUX_A_IO_POWER_DOMAINS (                    \
2615         BIT_ULL(POWER_DOMAIN_AUX_IO_A) |                \
2616         BIT_ULL(POWER_DOMAIN_AUX_A))
2617 #define ICL_AUX_B_IO_POWER_DOMAINS (                    \
2618         BIT_ULL(POWER_DOMAIN_AUX_B))
2619 #define ICL_AUX_C_TC1_IO_POWER_DOMAINS (                \
2620         BIT_ULL(POWER_DOMAIN_AUX_C))
2621 #define ICL_AUX_D_TC2_IO_POWER_DOMAINS (                \
2622         BIT_ULL(POWER_DOMAIN_AUX_D))
2623 #define ICL_AUX_E_TC3_IO_POWER_DOMAINS (                \
2624         BIT_ULL(POWER_DOMAIN_AUX_E))
2625 #define ICL_AUX_F_TC4_IO_POWER_DOMAINS (                \
2626         BIT_ULL(POWER_DOMAIN_AUX_F))
2627 #define ICL_AUX_C_TBT1_IO_POWER_DOMAINS (               \
2628         BIT_ULL(POWER_DOMAIN_AUX_C_TBT))
2629 #define ICL_AUX_D_TBT2_IO_POWER_DOMAINS (               \
2630         BIT_ULL(POWER_DOMAIN_AUX_D_TBT))
2631 #define ICL_AUX_E_TBT3_IO_POWER_DOMAINS (               \
2632         BIT_ULL(POWER_DOMAIN_AUX_E_TBT))
2633 #define ICL_AUX_F_TBT4_IO_POWER_DOMAINS (               \
2634         BIT_ULL(POWER_DOMAIN_AUX_F_TBT))
2635
2636 #define TGL_PW_5_POWER_DOMAINS (                        \
2637         BIT_ULL(POWER_DOMAIN_PIPE_D) |                  \
2638         BIT_ULL(POWER_DOMAIN_TRANSCODER_D) |            \
2639         BIT_ULL(POWER_DOMAIN_PIPE_D_PANEL_FITTER) |     \
2640         BIT_ULL(POWER_DOMAIN_INIT))
2641
2642 #define TGL_PW_4_POWER_DOMAINS (                        \
2643         TGL_PW_5_POWER_DOMAINS |                        \
2644         BIT_ULL(POWER_DOMAIN_PIPE_C) |                  \
2645         BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |            \
2646         BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |     \
2647         BIT_ULL(POWER_DOMAIN_INIT))
2648
2649 #define TGL_PW_3_POWER_DOMAINS (                        \
2650         TGL_PW_4_POWER_DOMAINS |                        \
2651         BIT_ULL(POWER_DOMAIN_PIPE_B) |                  \
2652         BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |            \
2653         BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |     \
2654         BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |        \
2655         BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) |        \
2656         BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) |        \
2657         BIT_ULL(POWER_DOMAIN_PORT_DDI_G_LANES) |        \
2658         BIT_ULL(POWER_DOMAIN_PORT_DDI_H_LANES) |        \
2659         BIT_ULL(POWER_DOMAIN_PORT_DDI_I_LANES) |        \
2660         BIT_ULL(POWER_DOMAIN_AUX_D) |                   \
2661         BIT_ULL(POWER_DOMAIN_AUX_E) |                   \
2662         BIT_ULL(POWER_DOMAIN_AUX_F) |                   \
2663         BIT_ULL(POWER_DOMAIN_AUX_G) |                   \
2664         BIT_ULL(POWER_DOMAIN_AUX_H) |                   \
2665         BIT_ULL(POWER_DOMAIN_AUX_I) |                   \
2666         BIT_ULL(POWER_DOMAIN_AUX_D_TBT) |               \
2667         BIT_ULL(POWER_DOMAIN_AUX_E_TBT) |               \
2668         BIT_ULL(POWER_DOMAIN_AUX_F_TBT) |               \
2669         BIT_ULL(POWER_DOMAIN_AUX_G_TBT) |               \
2670         BIT_ULL(POWER_DOMAIN_AUX_H_TBT) |               \
2671         BIT_ULL(POWER_DOMAIN_AUX_I_TBT) |               \
2672         BIT_ULL(POWER_DOMAIN_VGA) |                     \
2673         BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
2674         BIT_ULL(POWER_DOMAIN_INIT))
2675
2676 #define TGL_PW_2_POWER_DOMAINS (                        \
2677         TGL_PW_3_POWER_DOMAINS |                        \
2678         BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) |     \
2679         BIT_ULL(POWER_DOMAIN_INIT))
2680
2681 #define TGL_DISPLAY_DC_OFF_POWER_DOMAINS (              \
2682         TGL_PW_2_POWER_DOMAINS |                        \
2683         BIT_ULL(POWER_DOMAIN_MODESET) |                 \
2684         BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
2685         BIT_ULL(POWER_DOMAIN_INIT))
2686
2687 #define TGL_DDI_IO_D_TC1_POWER_DOMAINS (        \
2688         BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO))
2689 #define TGL_DDI_IO_E_TC2_POWER_DOMAINS (        \
2690         BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO))
2691 #define TGL_DDI_IO_F_TC3_POWER_DOMAINS (        \
2692         BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO))
2693 #define TGL_DDI_IO_G_TC4_POWER_DOMAINS (        \
2694         BIT_ULL(POWER_DOMAIN_PORT_DDI_G_IO))
2695 #define TGL_DDI_IO_H_TC5_POWER_DOMAINS (        \
2696         BIT_ULL(POWER_DOMAIN_PORT_DDI_H_IO))
2697 #define TGL_DDI_IO_I_TC6_POWER_DOMAINS (        \
2698         BIT_ULL(POWER_DOMAIN_PORT_DDI_I_IO))
2699
2700 #define TGL_AUX_A_IO_POWER_DOMAINS (            \
2701         BIT_ULL(POWER_DOMAIN_AUX_IO_A) |        \
2702         BIT_ULL(POWER_DOMAIN_AUX_A))
2703 #define TGL_AUX_B_IO_POWER_DOMAINS (            \
2704         BIT_ULL(POWER_DOMAIN_AUX_B))
2705 #define TGL_AUX_C_IO_POWER_DOMAINS (            \
2706         BIT_ULL(POWER_DOMAIN_AUX_C))
2707 #define TGL_AUX_D_TC1_IO_POWER_DOMAINS (        \
2708         BIT_ULL(POWER_DOMAIN_AUX_D))
2709 #define TGL_AUX_E_TC2_IO_POWER_DOMAINS (        \
2710         BIT_ULL(POWER_DOMAIN_AUX_E))
2711 #define TGL_AUX_F_TC3_IO_POWER_DOMAINS (        \
2712         BIT_ULL(POWER_DOMAIN_AUX_F))
2713 #define TGL_AUX_G_TC4_IO_POWER_DOMAINS (        \
2714         BIT_ULL(POWER_DOMAIN_AUX_G))
2715 #define TGL_AUX_H_TC5_IO_POWER_DOMAINS (        \
2716         BIT_ULL(POWER_DOMAIN_AUX_H))
2717 #define TGL_AUX_I_TC6_IO_POWER_DOMAINS (        \
2718         BIT_ULL(POWER_DOMAIN_AUX_I))
2719 #define TGL_AUX_D_TBT1_IO_POWER_DOMAINS (       \
2720         BIT_ULL(POWER_DOMAIN_AUX_D_TBT))
2721 #define TGL_AUX_E_TBT2_IO_POWER_DOMAINS (       \
2722         BIT_ULL(POWER_DOMAIN_AUX_E_TBT))
2723 #define TGL_AUX_F_TBT3_IO_POWER_DOMAINS (       \
2724         BIT_ULL(POWER_DOMAIN_AUX_F_TBT))
2725 #define TGL_AUX_G_TBT4_IO_POWER_DOMAINS (       \
2726         BIT_ULL(POWER_DOMAIN_AUX_G_TBT))
2727 #define TGL_AUX_H_TBT5_IO_POWER_DOMAINS (       \
2728         BIT_ULL(POWER_DOMAIN_AUX_H_TBT))
2729 #define TGL_AUX_I_TBT6_IO_POWER_DOMAINS (       \
2730         BIT_ULL(POWER_DOMAIN_AUX_I_TBT))
2731
2732 static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
2733         .sync_hw = i9xx_power_well_sync_hw_noop,
2734         .enable = i9xx_always_on_power_well_noop,
2735         .disable = i9xx_always_on_power_well_noop,
2736         .is_enabled = i9xx_always_on_power_well_enabled,
2737 };
2738
2739 static const struct i915_power_well_ops chv_pipe_power_well_ops = {
2740         .sync_hw = i9xx_power_well_sync_hw_noop,
2741         .enable = chv_pipe_power_well_enable,
2742         .disable = chv_pipe_power_well_disable,
2743         .is_enabled = chv_pipe_power_well_enabled,
2744 };
2745
2746 static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
2747         .sync_hw = i9xx_power_well_sync_hw_noop,
2748         .enable = chv_dpio_cmn_power_well_enable,
2749         .disable = chv_dpio_cmn_power_well_disable,
2750         .is_enabled = vlv_power_well_enabled,
2751 };
2752
2753 static const struct i915_power_well_desc i9xx_always_on_power_well[] = {
2754         {
2755                 .name = "always-on",
2756                 .always_on = true,
2757                 .domains = POWER_DOMAIN_MASK,
2758                 .ops = &i9xx_always_on_power_well_ops,
2759                 .id = DISP_PW_ID_NONE,
2760         },
2761 };
2762
2763 static const struct i915_power_well_ops i830_pipes_power_well_ops = {
2764         .sync_hw = i830_pipes_power_well_sync_hw,
2765         .enable = i830_pipes_power_well_enable,
2766         .disable = i830_pipes_power_well_disable,
2767         .is_enabled = i830_pipes_power_well_enabled,
2768 };
2769
2770 static const struct i915_power_well_desc i830_power_wells[] = {
2771         {
2772                 .name = "always-on",
2773                 .always_on = true,
2774                 .domains = POWER_DOMAIN_MASK,
2775                 .ops = &i9xx_always_on_power_well_ops,
2776                 .id = DISP_PW_ID_NONE,
2777         },
2778         {
2779                 .name = "pipes",
2780                 .domains = I830_PIPES_POWER_DOMAINS,
2781                 .ops = &i830_pipes_power_well_ops,
2782                 .id = DISP_PW_ID_NONE,
2783         },
2784 };
2785
2786 static const struct i915_power_well_ops hsw_power_well_ops = {
2787         .sync_hw = hsw_power_well_sync_hw,
2788         .enable = hsw_power_well_enable,
2789         .disable = hsw_power_well_disable,
2790         .is_enabled = hsw_power_well_enabled,
2791 };
2792
2793 static const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
2794         .sync_hw = i9xx_power_well_sync_hw_noop,
2795         .enable = gen9_dc_off_power_well_enable,
2796         .disable = gen9_dc_off_power_well_disable,
2797         .is_enabled = gen9_dc_off_power_well_enabled,
2798 };
2799
2800 static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = {
2801         .sync_hw = i9xx_power_well_sync_hw_noop,
2802         .enable = bxt_dpio_cmn_power_well_enable,
2803         .disable = bxt_dpio_cmn_power_well_disable,
2804         .is_enabled = bxt_dpio_cmn_power_well_enabled,
2805 };
2806
2807 static const struct i915_power_well_regs hsw_power_well_regs = {
2808         .bios   = HSW_PWR_WELL_CTL1,
2809         .driver = HSW_PWR_WELL_CTL2,
2810         .kvmr   = HSW_PWR_WELL_CTL3,
2811         .debug  = HSW_PWR_WELL_CTL4,
2812 };
2813
2814 static const struct i915_power_well_desc hsw_power_wells[] = {
2815         {
2816                 .name = "always-on",
2817                 .always_on = true,
2818                 .domains = POWER_DOMAIN_MASK,
2819                 .ops = &i9xx_always_on_power_well_ops,
2820                 .id = DISP_PW_ID_NONE,
2821         },
2822         {
2823                 .name = "display",
2824                 .domains = HSW_DISPLAY_POWER_DOMAINS,
2825                 .ops = &hsw_power_well_ops,
2826                 .id = HSW_DISP_PW_GLOBAL,
2827                 {
2828                         .hsw.regs = &hsw_power_well_regs,
2829                         .hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
2830                         .hsw.has_vga = true,
2831                 },
2832         },
2833 };
2834
2835 static const struct i915_power_well_desc bdw_power_wells[] = {
2836         {
2837                 .name = "always-on",
2838                 .always_on = true,
2839                 .domains = POWER_DOMAIN_MASK,
2840                 .ops = &i9xx_always_on_power_well_ops,
2841                 .id = DISP_PW_ID_NONE,
2842         },
2843         {
2844                 .name = "display",
2845                 .domains = BDW_DISPLAY_POWER_DOMAINS,
2846                 .ops = &hsw_power_well_ops,
2847                 .id = HSW_DISP_PW_GLOBAL,
2848                 {
2849                         .hsw.regs = &hsw_power_well_regs,
2850                         .hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
2851                         .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2852                         .hsw.has_vga = true,
2853                 },
2854         },
2855 };
2856
2857 static const struct i915_power_well_ops vlv_display_power_well_ops = {
2858         .sync_hw = i9xx_power_well_sync_hw_noop,
2859         .enable = vlv_display_power_well_enable,
2860         .disable = vlv_display_power_well_disable,
2861         .is_enabled = vlv_power_well_enabled,
2862 };
2863
2864 static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
2865         .sync_hw = i9xx_power_well_sync_hw_noop,
2866         .enable = vlv_dpio_cmn_power_well_enable,
2867         .disable = vlv_dpio_cmn_power_well_disable,
2868         .is_enabled = vlv_power_well_enabled,
2869 };
2870
2871 static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
2872         .sync_hw = i9xx_power_well_sync_hw_noop,
2873         .enable = vlv_power_well_enable,
2874         .disable = vlv_power_well_disable,
2875         .is_enabled = vlv_power_well_enabled,
2876 };
2877
2878 static const struct i915_power_well_desc vlv_power_wells[] = {
2879         {
2880                 .name = "always-on",
2881                 .always_on = true,
2882                 .domains = POWER_DOMAIN_MASK,
2883                 .ops = &i9xx_always_on_power_well_ops,
2884                 .id = DISP_PW_ID_NONE,
2885         },
2886         {
2887                 .name = "display",
2888                 .domains = VLV_DISPLAY_POWER_DOMAINS,
2889                 .ops = &vlv_display_power_well_ops,
2890                 .id = VLV_DISP_PW_DISP2D,
2891                 {
2892                         .vlv.idx = PUNIT_PWGT_IDX_DISP2D,
2893                 },
2894         },
2895         {
2896                 .name = "dpio-tx-b-01",
2897                 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2898                            VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2899                            VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2900                            VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2901                 .ops = &vlv_dpio_power_well_ops,
2902                 .id = DISP_PW_ID_NONE,
2903                 {
2904                         .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_01,
2905                 },
2906         },
2907         {
2908                 .name = "dpio-tx-b-23",
2909                 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2910                            VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2911                            VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2912                            VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2913                 .ops = &vlv_dpio_power_well_ops,
2914                 .id = DISP_PW_ID_NONE,
2915                 {
2916                         .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_23,
2917                 },
2918         },
2919         {
2920                 .name = "dpio-tx-c-01",
2921                 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2922                            VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2923                            VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2924                            VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2925                 .ops = &vlv_dpio_power_well_ops,
2926                 .id = DISP_PW_ID_NONE,
2927                 {
2928                         .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_01,
2929                 },
2930         },
2931         {
2932                 .name = "dpio-tx-c-23",
2933                 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2934                            VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2935                            VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2936                            VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2937                 .ops = &vlv_dpio_power_well_ops,
2938                 .id = DISP_PW_ID_NONE,
2939                 {
2940                         .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_23,
2941                 },
2942         },
2943         {
2944                 .name = "dpio-common",
2945                 .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
2946                 .ops = &vlv_dpio_cmn_power_well_ops,
2947                 .id = VLV_DISP_PW_DPIO_CMN_BC,
2948                 {
2949                         .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
2950                 },
2951         },
2952 };
2953
2954 static const struct i915_power_well_desc chv_power_wells[] = {
2955         {
2956                 .name = "always-on",
2957                 .always_on = true,
2958                 .domains = POWER_DOMAIN_MASK,
2959                 .ops = &i9xx_always_on_power_well_ops,
2960                 .id = DISP_PW_ID_NONE,
2961         },
2962         {
2963                 .name = "display",
2964                 /*
2965                  * Pipe A power well is the new disp2d well. Pipe B and C
2966                  * power wells don't actually exist. Pipe A power well is
2967                  * required for any pipe to work.
2968                  */
2969                 .domains = CHV_DISPLAY_POWER_DOMAINS,
2970                 .ops = &chv_pipe_power_well_ops,
2971                 .id = DISP_PW_ID_NONE,
2972         },
2973         {
2974                 .name = "dpio-common-bc",
2975                 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
2976                 .ops = &chv_dpio_cmn_power_well_ops,
2977                 .id = VLV_DISP_PW_DPIO_CMN_BC,
2978                 {
2979                         .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
2980                 },
2981         },
2982         {
2983                 .name = "dpio-common-d",
2984                 .domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
2985                 .ops = &chv_dpio_cmn_power_well_ops,
2986                 .id = CHV_DISP_PW_DPIO_CMN_D,
2987                 {
2988                         .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_D,
2989                 },
2990         },
2991 };
2992
2993 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
2994                                          enum i915_power_well_id power_well_id)
2995 {
2996         struct i915_power_well *power_well;
2997         bool ret;
2998
2999         power_well = lookup_power_well(dev_priv, power_well_id);
3000         ret = power_well->desc->ops->is_enabled(dev_priv, power_well);
3001
3002         return ret;
3003 }
3004
3005 static const struct i915_power_well_desc skl_power_wells[] = {
3006         {
3007                 .name = "always-on",
3008                 .always_on = true,
3009                 .domains = POWER_DOMAIN_MASK,
3010                 .ops = &i9xx_always_on_power_well_ops,
3011                 .id = DISP_PW_ID_NONE,
3012         },
3013         {
3014                 .name = "power well 1",
3015                 /* Handled by the DMC firmware */
3016                 .always_on = true,
3017                 .domains = 0,
3018                 .ops = &hsw_power_well_ops,
3019                 .id = SKL_DISP_PW_1,
3020                 {
3021                         .hsw.regs = &hsw_power_well_regs,
3022                         .hsw.idx = SKL_PW_CTL_IDX_PW_1,
3023                         .hsw.has_fuses = true,
3024                 },
3025         },
3026         {
3027                 .name = "MISC IO power well",
3028                 /* Handled by the DMC firmware */
3029                 .always_on = true,
3030                 .domains = 0,
3031                 .ops = &hsw_power_well_ops,
3032                 .id = SKL_DISP_PW_MISC_IO,
3033                 {
3034                         .hsw.regs = &hsw_power_well_regs,
3035                         .hsw.idx = SKL_PW_CTL_IDX_MISC_IO,
3036                 },
3037         },
3038         {
3039                 .name = "DC off",
3040                 .domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS,
3041                 .ops = &gen9_dc_off_power_well_ops,
3042                 .id = SKL_DISP_DC_OFF,
3043         },
3044         {
3045                 .name = "power well 2",
3046                 .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3047                 .ops = &hsw_power_well_ops,
3048                 .id = SKL_DISP_PW_2,
3049                 {
3050                         .hsw.regs = &hsw_power_well_regs,
3051                         .hsw.idx = SKL_PW_CTL_IDX_PW_2,
3052                         .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3053                         .hsw.has_vga = true,
3054                         .hsw.has_fuses = true,
3055                 },
3056         },
3057         {
3058                 .name = "DDI A/E IO power well",
3059                 .domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS,
3060                 .ops = &hsw_power_well_ops,
3061                 .id = DISP_PW_ID_NONE,
3062                 {
3063                         .hsw.regs = &hsw_power_well_regs,
3064                         .hsw.idx = SKL_PW_CTL_IDX_DDI_A_E,
3065                 },
3066         },
3067         {
3068                 .name = "DDI B IO power well",
3069                 .domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS,
3070                 .ops = &hsw_power_well_ops,
3071                 .id = DISP_PW_ID_NONE,
3072                 {
3073                         .hsw.regs = &hsw_power_well_regs,
3074                         .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
3075                 },
3076         },
3077         {
3078                 .name = "DDI C IO power well",
3079                 .domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS,
3080                 .ops = &hsw_power_well_ops,
3081                 .id = DISP_PW_ID_NONE,
3082                 {
3083                         .hsw.regs = &hsw_power_well_regs,
3084                         .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
3085                 },
3086         },
3087         {
3088                 .name = "DDI D IO power well",
3089                 .domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS,
3090                 .ops = &hsw_power_well_ops,
3091                 .id = DISP_PW_ID_NONE,
3092                 {
3093                         .hsw.regs = &hsw_power_well_regs,
3094                         .hsw.idx = SKL_PW_CTL_IDX_DDI_D,
3095                 },
3096         },
3097 };
3098
3099 static const struct i915_power_well_desc bxt_power_wells[] = {
3100         {
3101                 .name = "always-on",
3102                 .always_on = true,
3103                 .domains = POWER_DOMAIN_MASK,
3104                 .ops = &i9xx_always_on_power_well_ops,
3105                 .id = DISP_PW_ID_NONE,
3106         },
3107         {
3108                 .name = "power well 1",
3109                 /* Handled by the DMC firmware */
3110                 .always_on = true,
3111                 .domains = 0,
3112                 .ops = &hsw_power_well_ops,
3113                 .id = SKL_DISP_PW_1,
3114                 {
3115                         .hsw.regs = &hsw_power_well_regs,
3116                         .hsw.idx = SKL_PW_CTL_IDX_PW_1,
3117                         .hsw.has_fuses = true,
3118                 },
3119         },
3120         {
3121                 .name = "DC off",
3122                 .domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS,
3123                 .ops = &gen9_dc_off_power_well_ops,
3124                 .id = SKL_DISP_DC_OFF,
3125         },
3126         {
3127                 .name = "power well 2",
3128                 .domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3129                 .ops = &hsw_power_well_ops,
3130                 .id = SKL_DISP_PW_2,
3131                 {
3132                         .hsw.regs = &hsw_power_well_regs,
3133                         .hsw.idx = SKL_PW_CTL_IDX_PW_2,
3134                         .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3135                         .hsw.has_vga = true,
3136                         .hsw.has_fuses = true,
3137                 },
3138         },
3139         {
3140                 .name = "dpio-common-a",
3141                 .domains = BXT_DPIO_CMN_A_POWER_DOMAINS,
3142                 .ops = &bxt_dpio_cmn_power_well_ops,
3143                 .id = BXT_DISP_PW_DPIO_CMN_A,
3144                 {
3145                         .bxt.phy = DPIO_PHY1,
3146                 },
3147         },
3148         {
3149                 .name = "dpio-common-bc",
3150                 .domains = BXT_DPIO_CMN_BC_POWER_DOMAINS,
3151                 .ops = &bxt_dpio_cmn_power_well_ops,
3152                 .id = VLV_DISP_PW_DPIO_CMN_BC,
3153                 {
3154                         .bxt.phy = DPIO_PHY0,
3155                 },
3156         },
3157 };
3158
3159 static const struct i915_power_well_desc glk_power_wells[] = {
3160         {
3161                 .name = "always-on",
3162                 .always_on = true,
3163                 .domains = POWER_DOMAIN_MASK,
3164                 .ops = &i9xx_always_on_power_well_ops,
3165                 .id = DISP_PW_ID_NONE,
3166         },
3167         {
3168                 .name = "power well 1",
3169                 /* Handled by the DMC firmware */
3170                 .always_on = true,
3171                 .domains = 0,
3172                 .ops = &hsw_power_well_ops,
3173                 .id = SKL_DISP_PW_1,
3174                 {
3175                         .hsw.regs = &hsw_power_well_regs,
3176                         .hsw.idx = SKL_PW_CTL_IDX_PW_1,
3177                         .hsw.has_fuses = true,
3178                 },
3179         },
3180         {
3181                 .name = "DC off",
3182                 .domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS,
3183                 .ops = &gen9_dc_off_power_well_ops,
3184                 .id = SKL_DISP_DC_OFF,
3185         },
3186         {
3187                 .name = "power well 2",
3188                 .domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3189                 .ops = &hsw_power_well_ops,
3190                 .id = SKL_DISP_PW_2,
3191                 {
3192                         .hsw.regs = &hsw_power_well_regs,
3193                         .hsw.idx = SKL_PW_CTL_IDX_PW_2,
3194                         .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3195                         .hsw.has_vga = true,
3196                         .hsw.has_fuses = true,
3197                 },
3198         },
3199         {
3200                 .name = "dpio-common-a",
3201                 .domains = GLK_DPIO_CMN_A_POWER_DOMAINS,
3202                 .ops = &bxt_dpio_cmn_power_well_ops,
3203                 .id = BXT_DISP_PW_DPIO_CMN_A,
3204                 {
3205                         .bxt.phy = DPIO_PHY1,
3206                 },
3207         },
3208         {
3209                 .name = "dpio-common-b",
3210                 .domains = GLK_DPIO_CMN_B_POWER_DOMAINS,
3211                 .ops = &bxt_dpio_cmn_power_well_ops,
3212                 .id = VLV_DISP_PW_DPIO_CMN_BC,
3213                 {
3214                         .bxt.phy = DPIO_PHY0,
3215                 },
3216         },
3217         {
3218                 .name = "dpio-common-c",
3219                 .domains = GLK_DPIO_CMN_C_POWER_DOMAINS,
3220                 .ops = &bxt_dpio_cmn_power_well_ops,
3221                 .id = GLK_DISP_PW_DPIO_CMN_C,
3222                 {
3223                         .bxt.phy = DPIO_PHY2,
3224                 },
3225         },
3226         {
3227                 .name = "AUX A",
3228                 .domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS,
3229                 .ops = &hsw_power_well_ops,
3230                 .id = DISP_PW_ID_NONE,
3231                 {
3232                         .hsw.regs = &hsw_power_well_regs,
3233                         .hsw.idx = GLK_PW_CTL_IDX_AUX_A,
3234                 },
3235         },
3236         {
3237                 .name = "AUX B",
3238                 .domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS,
3239                 .ops = &hsw_power_well_ops,
3240                 .id = DISP_PW_ID_NONE,
3241                 {
3242                         .hsw.regs = &hsw_power_well_regs,
3243                         .hsw.idx = GLK_PW_CTL_IDX_AUX_B,
3244                 },
3245         },
3246         {
3247                 .name = "AUX C",
3248                 .domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS,
3249                 .ops = &hsw_power_well_ops,
3250                 .id = DISP_PW_ID_NONE,
3251                 {
3252                         .hsw.regs = &hsw_power_well_regs,
3253                         .hsw.idx = GLK_PW_CTL_IDX_AUX_C,
3254                 },
3255         },
3256         {
3257                 .name = "DDI A IO power well",
3258                 .domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS,
3259                 .ops = &hsw_power_well_ops,
3260                 .id = DISP_PW_ID_NONE,
3261                 {
3262                         .hsw.regs = &hsw_power_well_regs,
3263                         .hsw.idx = GLK_PW_CTL_IDX_DDI_A,
3264                 },
3265         },
3266         {
3267                 .name = "DDI B IO power well",
3268                 .domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS,
3269                 .ops = &hsw_power_well_ops,
3270                 .id = DISP_PW_ID_NONE,
3271                 {
3272                         .hsw.regs = &hsw_power_well_regs,
3273                         .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
3274                 },
3275         },
3276         {
3277                 .name = "DDI C IO power well",
3278                 .domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS,
3279                 .ops = &hsw_power_well_ops,
3280                 .id = DISP_PW_ID_NONE,
3281                 {
3282                         .hsw.regs = &hsw_power_well_regs,
3283                         .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
3284                 },
3285         },
3286 };
3287
3288 static const struct i915_power_well_desc cnl_power_wells[] = {
3289         {
3290                 .name = "always-on",
3291                 .always_on = true,
3292                 .domains = POWER_DOMAIN_MASK,
3293                 .ops = &i9xx_always_on_power_well_ops,
3294                 .id = DISP_PW_ID_NONE,
3295         },
3296         {
3297                 .name = "power well 1",
3298                 /* Handled by the DMC firmware */
3299                 .always_on = true,
3300                 .domains = 0,
3301                 .ops = &hsw_power_well_ops,
3302                 .id = SKL_DISP_PW_1,
3303                 {
3304                         .hsw.regs = &hsw_power_well_regs,
3305                         .hsw.idx = SKL_PW_CTL_IDX_PW_1,
3306                         .hsw.has_fuses = true,
3307                 },
3308         },
3309         {
3310                 .name = "AUX A",
3311                 .domains = CNL_DISPLAY_AUX_A_POWER_DOMAINS,
3312                 .ops = &hsw_power_well_ops,
3313                 .id = DISP_PW_ID_NONE,
3314                 {
3315                         .hsw.regs = &hsw_power_well_regs,
3316                         .hsw.idx = GLK_PW_CTL_IDX_AUX_A,
3317                 },
3318         },
3319         {
3320                 .name = "AUX B",
3321                 .domains = CNL_DISPLAY_AUX_B_POWER_DOMAINS,
3322                 .ops = &hsw_power_well_ops,
3323                 .id = DISP_PW_ID_NONE,
3324                 {
3325                         .hsw.regs = &hsw_power_well_regs,
3326                         .hsw.idx = GLK_PW_CTL_IDX_AUX_B,
3327                 },
3328         },
3329         {
3330                 .name = "AUX C",
3331                 .domains = CNL_DISPLAY_AUX_C_POWER_DOMAINS,
3332                 .ops = &hsw_power_well_ops,
3333                 .id = DISP_PW_ID_NONE,
3334                 {
3335                         .hsw.regs = &hsw_power_well_regs,
3336                         .hsw.idx = GLK_PW_CTL_IDX_AUX_C,
3337                 },
3338         },
3339         {
3340                 .name = "AUX D",
3341                 .domains = CNL_DISPLAY_AUX_D_POWER_DOMAINS,
3342                 .ops = &hsw_power_well_ops,
3343                 .id = DISP_PW_ID_NONE,
3344                 {
3345                         .hsw.regs = &hsw_power_well_regs,
3346                         .hsw.idx = CNL_PW_CTL_IDX_AUX_D,
3347                 },
3348         },
3349         {
3350                 .name = "DC off",
3351                 .domains = CNL_DISPLAY_DC_OFF_POWER_DOMAINS,
3352                 .ops = &gen9_dc_off_power_well_ops,
3353                 .id = SKL_DISP_DC_OFF,
3354         },
3355         {
3356                 .name = "power well 2",
3357                 .domains = CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3358                 .ops = &hsw_power_well_ops,
3359                 .id = SKL_DISP_PW_2,
3360                 {
3361                         .hsw.regs = &hsw_power_well_regs,
3362                         .hsw.idx = SKL_PW_CTL_IDX_PW_2,
3363                         .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3364                         .hsw.has_vga = true,
3365                         .hsw.has_fuses = true,
3366                 },
3367         },
3368         {
3369                 .name = "DDI A IO power well",
3370                 .domains = CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS,
3371                 .ops = &hsw_power_well_ops,
3372                 .id = DISP_PW_ID_NONE,
3373                 {
3374                         .hsw.regs = &hsw_power_well_regs,
3375                         .hsw.idx = GLK_PW_CTL_IDX_DDI_A,
3376                 },
3377         },
3378         {
3379                 .name = "DDI B IO power well",
3380                 .domains = CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS,
3381                 .ops = &hsw_power_well_ops,
3382                 .id = DISP_PW_ID_NONE,
3383                 {
3384                         .hsw.regs = &hsw_power_well_regs,
3385                         .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
3386                 },
3387         },
3388         {
3389                 .name = "DDI C IO power well",
3390                 .domains = CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS,
3391                 .ops = &hsw_power_well_ops,
3392                 .id = DISP_PW_ID_NONE,
3393                 {
3394                         .hsw.regs = &hsw_power_well_regs,
3395                         .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
3396                 },
3397         },
3398         {
3399                 .name = "DDI D IO power well",
3400                 .domains = CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS,
3401                 .ops = &hsw_power_well_ops,
3402                 .id = DISP_PW_ID_NONE,
3403                 {
3404                         .hsw.regs = &hsw_power_well_regs,
3405                         .hsw.idx = SKL_PW_CTL_IDX_DDI_D,
3406                 },
3407         },
3408         {
3409                 .name = "DDI F IO power well",
3410                 .domains = CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS,
3411                 .ops = &hsw_power_well_ops,
3412                 .id = DISP_PW_ID_NONE,
3413                 {
3414                         .hsw.regs = &hsw_power_well_regs,
3415                         .hsw.idx = CNL_PW_CTL_IDX_DDI_F,
3416                 },
3417         },
3418         {
3419                 .name = "AUX F",
3420                 .domains = CNL_DISPLAY_AUX_F_POWER_DOMAINS,
3421                 .ops = &hsw_power_well_ops,
3422                 .id = DISP_PW_ID_NONE,
3423                 {
3424                         .hsw.regs = &hsw_power_well_regs,
3425                         .hsw.idx = CNL_PW_CTL_IDX_AUX_F,
3426                 },
3427         },
3428 };
3429
3430 static const struct i915_power_well_ops icl_combo_phy_aux_power_well_ops = {
3431         .sync_hw = hsw_power_well_sync_hw,
3432         .enable = icl_combo_phy_aux_power_well_enable,
3433         .disable = icl_combo_phy_aux_power_well_disable,
3434         .is_enabled = hsw_power_well_enabled,
3435 };
3436
3437 static const struct i915_power_well_ops icl_tc_phy_aux_power_well_ops = {
3438         .sync_hw = hsw_power_well_sync_hw,
3439         .enable = icl_tc_phy_aux_power_well_enable,
3440         .disable = icl_tc_phy_aux_power_well_disable,
3441         .is_enabled = hsw_power_well_enabled,
3442 };
3443
3444 static const struct i915_power_well_regs icl_aux_power_well_regs = {
3445         .bios   = ICL_PWR_WELL_CTL_AUX1,
3446         .driver = ICL_PWR_WELL_CTL_AUX2,
3447         .debug  = ICL_PWR_WELL_CTL_AUX4,
3448 };
3449
3450 static const struct i915_power_well_regs icl_ddi_power_well_regs = {
3451         .bios   = ICL_PWR_WELL_CTL_DDI1,
3452         .driver = ICL_PWR_WELL_CTL_DDI2,
3453         .debug  = ICL_PWR_WELL_CTL_DDI4,
3454 };
3455
3456 static const struct i915_power_well_desc icl_power_wells[] = {
3457         {
3458                 .name = "always-on",
3459                 .always_on = true,
3460                 .domains = POWER_DOMAIN_MASK,
3461                 .ops = &i9xx_always_on_power_well_ops,
3462                 .id = DISP_PW_ID_NONE,
3463         },
3464         {
3465                 .name = "power well 1",
3466                 /* Handled by the DMC firmware */
3467                 .always_on = true,
3468                 .domains = 0,
3469                 .ops = &hsw_power_well_ops,
3470                 .id = SKL_DISP_PW_1,
3471                 {
3472                         .hsw.regs = &hsw_power_well_regs,
3473                         .hsw.idx = ICL_PW_CTL_IDX_PW_1,
3474                         .hsw.has_fuses = true,
3475                 },
3476         },
3477         {
3478                 .name = "DC off",
3479                 .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
3480                 .ops = &gen9_dc_off_power_well_ops,
3481                 .id = SKL_DISP_DC_OFF,
3482         },
3483         {
3484                 .name = "power well 2",
3485                 .domains = ICL_PW_2_POWER_DOMAINS,
3486                 .ops = &hsw_power_well_ops,
3487                 .id = SKL_DISP_PW_2,
3488                 {
3489                         .hsw.regs = &hsw_power_well_regs,
3490                         .hsw.idx = ICL_PW_CTL_IDX_PW_2,
3491                         .hsw.has_fuses = true,
3492                 },
3493         },
3494         {
3495                 .name = "power well 3",
3496                 .domains = ICL_PW_3_POWER_DOMAINS,
3497                 .ops = &hsw_power_well_ops,
3498                 .id = DISP_PW_ID_NONE,
3499                 {
3500                         .hsw.regs = &hsw_power_well_regs,
3501                         .hsw.idx = ICL_PW_CTL_IDX_PW_3,
3502                         .hsw.irq_pipe_mask = BIT(PIPE_B),
3503                         .hsw.has_vga = true,
3504                         .hsw.has_fuses = true,
3505                 },
3506         },
3507         {
3508                 .name = "DDI A IO",
3509                 .domains = ICL_DDI_IO_A_POWER_DOMAINS,
3510                 .ops = &hsw_power_well_ops,
3511                 .id = DISP_PW_ID_NONE,
3512                 {
3513                         .hsw.regs = &icl_ddi_power_well_regs,
3514                         .hsw.idx = ICL_PW_CTL_IDX_DDI_A,
3515                 },
3516         },
3517         {
3518                 .name = "DDI B IO",
3519                 .domains = ICL_DDI_IO_B_POWER_DOMAINS,
3520                 .ops = &hsw_power_well_ops,
3521                 .id = DISP_PW_ID_NONE,
3522                 {
3523                         .hsw.regs = &icl_ddi_power_well_regs,
3524                         .hsw.idx = ICL_PW_CTL_IDX_DDI_B,
3525                 },
3526         },
3527         {
3528                 .name = "DDI C IO",
3529                 .domains = ICL_DDI_IO_C_POWER_DOMAINS,
3530                 .ops = &hsw_power_well_ops,
3531                 .id = DISP_PW_ID_NONE,
3532                 {
3533                         .hsw.regs = &icl_ddi_power_well_regs,
3534                         .hsw.idx = ICL_PW_CTL_IDX_DDI_C,
3535                 },
3536         },
3537         {
3538                 .name = "DDI D IO",
3539                 .domains = ICL_DDI_IO_D_POWER_DOMAINS,
3540                 .ops = &hsw_power_well_ops,
3541                 .id = DISP_PW_ID_NONE,
3542                 {
3543                         .hsw.regs = &icl_ddi_power_well_regs,
3544                         .hsw.idx = ICL_PW_CTL_IDX_DDI_D,
3545                 },
3546         },
3547         {
3548                 .name = "DDI E IO",
3549                 .domains = ICL_DDI_IO_E_POWER_DOMAINS,
3550                 .ops = &hsw_power_well_ops,
3551                 .id = DISP_PW_ID_NONE,
3552                 {
3553                         .hsw.regs = &icl_ddi_power_well_regs,
3554                         .hsw.idx = ICL_PW_CTL_IDX_DDI_E,
3555                 },
3556         },
3557         {
3558                 .name = "DDI F IO",
3559                 .domains = ICL_DDI_IO_F_POWER_DOMAINS,
3560                 .ops = &hsw_power_well_ops,
3561                 .id = DISP_PW_ID_NONE,
3562                 {
3563                         .hsw.regs = &icl_ddi_power_well_regs,
3564                         .hsw.idx = ICL_PW_CTL_IDX_DDI_F,
3565                 },
3566         },
3567         {
3568                 .name = "AUX A",
3569                 .domains = ICL_AUX_A_IO_POWER_DOMAINS,
3570                 .ops = &icl_combo_phy_aux_power_well_ops,
3571                 .id = DISP_PW_ID_NONE,
3572                 {
3573                         .hsw.regs = &icl_aux_power_well_regs,
3574                         .hsw.idx = ICL_PW_CTL_IDX_AUX_A,
3575                 },
3576         },
3577         {
3578                 .name = "AUX B",
3579                 .domains = ICL_AUX_B_IO_POWER_DOMAINS,
3580                 .ops = &icl_combo_phy_aux_power_well_ops,
3581                 .id = DISP_PW_ID_NONE,
3582                 {
3583                         .hsw.regs = &icl_aux_power_well_regs,
3584                         .hsw.idx = ICL_PW_CTL_IDX_AUX_B,
3585                 },
3586         },
3587         {
3588                 .name = "AUX C TC1",
3589                 .domains = ICL_AUX_C_TC1_IO_POWER_DOMAINS,
3590                 .ops = &icl_tc_phy_aux_power_well_ops,
3591                 .id = DISP_PW_ID_NONE,
3592                 {
3593                         .hsw.regs = &icl_aux_power_well_regs,
3594                         .hsw.idx = ICL_PW_CTL_IDX_AUX_C,
3595                         .hsw.is_tc_tbt = false,
3596                 },
3597         },
3598         {
3599                 .name = "AUX D TC2",
3600                 .domains = ICL_AUX_D_TC2_IO_POWER_DOMAINS,
3601                 .ops = &icl_tc_phy_aux_power_well_ops,
3602                 .id = DISP_PW_ID_NONE,
3603                 {
3604                         .hsw.regs = &icl_aux_power_well_regs,
3605                         .hsw.idx = ICL_PW_CTL_IDX_AUX_D,
3606                         .hsw.is_tc_tbt = false,
3607                 },
3608         },
3609         {
3610                 .name = "AUX E TC3",
3611                 .domains = ICL_AUX_E_TC3_IO_POWER_DOMAINS,
3612                 .ops = &icl_tc_phy_aux_power_well_ops,
3613                 .id = DISP_PW_ID_NONE,
3614                 {
3615                         .hsw.regs = &icl_aux_power_well_regs,
3616                         .hsw.idx = ICL_PW_CTL_IDX_AUX_E,
3617                         .hsw.is_tc_tbt = false,
3618                 },
3619         },
3620         {
3621                 .name = "AUX F TC4",
3622                 .domains = ICL_AUX_F_TC4_IO_POWER_DOMAINS,
3623                 .ops = &icl_tc_phy_aux_power_well_ops,
3624                 .id = DISP_PW_ID_NONE,
3625                 {
3626                         .hsw.regs = &icl_aux_power_well_regs,
3627                         .hsw.idx = ICL_PW_CTL_IDX_AUX_F,
3628                         .hsw.is_tc_tbt = false,
3629                 },
3630         },
3631         {
3632                 .name = "AUX C TBT1",
3633                 .domains = ICL_AUX_C_TBT1_IO_POWER_DOMAINS,
3634                 .ops = &icl_tc_phy_aux_power_well_ops,
3635                 .id = DISP_PW_ID_NONE,
3636                 {
3637                         .hsw.regs = &icl_aux_power_well_regs,
3638                         .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1,
3639                         .hsw.is_tc_tbt = true,
3640                 },
3641         },
3642         {
3643                 .name = "AUX D TBT2",
3644                 .domains = ICL_AUX_D_TBT2_IO_POWER_DOMAINS,
3645                 .ops = &icl_tc_phy_aux_power_well_ops,
3646                 .id = DISP_PW_ID_NONE,
3647                 {
3648                         .hsw.regs = &icl_aux_power_well_regs,
3649                         .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2,
3650                         .hsw.is_tc_tbt = true,
3651                 },
3652         },
3653         {
3654                 .name = "AUX E TBT3",
3655                 .domains = ICL_AUX_E_TBT3_IO_POWER_DOMAINS,
3656                 .ops = &icl_tc_phy_aux_power_well_ops,
3657                 .id = DISP_PW_ID_NONE,
3658                 {
3659                         .hsw.regs = &icl_aux_power_well_regs,
3660                         .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3,
3661                         .hsw.is_tc_tbt = true,
3662                 },
3663         },
3664         {
3665                 .name = "AUX F TBT4",
3666                 .domains = ICL_AUX_F_TBT4_IO_POWER_DOMAINS,
3667                 .ops = &icl_tc_phy_aux_power_well_ops,
3668                 .id = DISP_PW_ID_NONE,
3669                 {
3670                         .hsw.regs = &icl_aux_power_well_regs,
3671                         .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4,
3672                         .hsw.is_tc_tbt = true,
3673                 },
3674         },
3675         {
3676                 .name = "power well 4",
3677                 .domains = ICL_PW_4_POWER_DOMAINS,
3678                 .ops = &hsw_power_well_ops,
3679                 .id = DISP_PW_ID_NONE,
3680                 {
3681                         .hsw.regs = &hsw_power_well_regs,
3682                         .hsw.idx = ICL_PW_CTL_IDX_PW_4,
3683                         .hsw.has_fuses = true,
3684                         .hsw.irq_pipe_mask = BIT(PIPE_C),
3685                 },
3686         },
3687 };
3688
3689 static const struct i915_power_well_desc tgl_power_wells[] = {
3690         {
3691                 .name = "always-on",
3692                 .always_on = true,
3693                 .domains = POWER_DOMAIN_MASK,
3694                 .ops = &i9xx_always_on_power_well_ops,
3695                 .id = DISP_PW_ID_NONE,
3696         },
3697         {
3698                 .name = "power well 1",
3699                 /* Handled by the DMC firmware */
3700                 .always_on = true,
3701                 .domains = 0,
3702                 .ops = &hsw_power_well_ops,
3703                 .id = SKL_DISP_PW_1,
3704                 {
3705                         .hsw.regs = &hsw_power_well_regs,
3706                         .hsw.idx = ICL_PW_CTL_IDX_PW_1,
3707                         .hsw.has_fuses = true,
3708                 },
3709         },
3710         {
3711                 .name = "DC off",
3712                 .domains = TGL_DISPLAY_DC_OFF_POWER_DOMAINS,
3713                 .ops = &gen9_dc_off_power_well_ops,
3714                 .id = SKL_DISP_DC_OFF,
3715         },
3716         {
3717                 .name = "power well 2",
3718                 .domains = TGL_PW_2_POWER_DOMAINS,
3719                 .ops = &hsw_power_well_ops,
3720                 .id = SKL_DISP_PW_2,
3721                 {
3722                         .hsw.regs = &hsw_power_well_regs,
3723                         .hsw.idx = ICL_PW_CTL_IDX_PW_2,
3724                         .hsw.has_fuses = true,
3725                 },
3726         },
3727         {
3728                 .name = "power well 3",
3729                 .domains = TGL_PW_3_POWER_DOMAINS,
3730                 .ops = &hsw_power_well_ops,
3731                 .id = DISP_PW_ID_NONE,
3732                 {
3733                         .hsw.regs = &hsw_power_well_regs,
3734                         .hsw.idx = ICL_PW_CTL_IDX_PW_3,
3735                         .hsw.irq_pipe_mask = BIT(PIPE_B),
3736                         .hsw.has_vga = true,
3737                         .hsw.has_fuses = true,
3738                 },
3739         },
3740         {
3741                 .name = "DDI A IO",
3742                 .domains = ICL_DDI_IO_A_POWER_DOMAINS,
3743                 .ops = &hsw_power_well_ops,
3744                 .id = DISP_PW_ID_NONE,
3745                 {
3746                         .hsw.regs = &icl_ddi_power_well_regs,
3747                         .hsw.idx = ICL_PW_CTL_IDX_DDI_A,
3748                 }
3749         },
3750         {
3751                 .name = "DDI B IO",
3752                 .domains = ICL_DDI_IO_B_POWER_DOMAINS,
3753                 .ops = &hsw_power_well_ops,
3754                 .id = DISP_PW_ID_NONE,
3755                 {
3756                         .hsw.regs = &icl_ddi_power_well_regs,
3757                         .hsw.idx = ICL_PW_CTL_IDX_DDI_B,
3758                 }
3759         },
3760         {
3761                 .name = "DDI C IO",
3762                 .domains = ICL_DDI_IO_C_POWER_DOMAINS,
3763                 .ops = &hsw_power_well_ops,
3764                 .id = DISP_PW_ID_NONE,
3765                 {
3766                         .hsw.regs = &icl_ddi_power_well_regs,
3767                         .hsw.idx = ICL_PW_CTL_IDX_DDI_C,
3768                 }
3769         },
3770         {
3771                 .name = "DDI D TC1 IO",
3772                 .domains = TGL_DDI_IO_D_TC1_POWER_DOMAINS,
3773                 .ops = &hsw_power_well_ops,
3774                 .id = DISP_PW_ID_NONE,
3775                 {
3776                         .hsw.regs = &icl_ddi_power_well_regs,
3777                         .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1,
3778                 },
3779         },
3780         {
3781                 .name = "DDI E TC2 IO",
3782                 .domains = TGL_DDI_IO_E_TC2_POWER_DOMAINS,
3783                 .ops = &hsw_power_well_ops,
3784                 .id = DISP_PW_ID_NONE,
3785                 {
3786                         .hsw.regs = &icl_ddi_power_well_regs,
3787                         .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2,
3788                 },
3789         },
3790         {
3791                 .name = "DDI F TC3 IO",
3792                 .domains = TGL_DDI_IO_F_TC3_POWER_DOMAINS,
3793                 .ops = &hsw_power_well_ops,
3794                 .id = DISP_PW_ID_NONE,
3795                 {
3796                         .hsw.regs = &icl_ddi_power_well_regs,
3797                         .hsw.idx = TGL_PW_CTL_IDX_DDI_TC3,
3798                 },
3799         },
3800         {
3801                 .name = "DDI G TC4 IO",
3802                 .domains = TGL_DDI_IO_G_TC4_POWER_DOMAINS,
3803                 .ops = &hsw_power_well_ops,
3804                 .id = DISP_PW_ID_NONE,
3805                 {
3806                         .hsw.regs = &icl_ddi_power_well_regs,
3807                         .hsw.idx = TGL_PW_CTL_IDX_DDI_TC4,
3808                 },
3809         },
3810         {
3811                 .name = "DDI H TC5 IO",
3812                 .domains = TGL_DDI_IO_H_TC5_POWER_DOMAINS,
3813                 .ops = &hsw_power_well_ops,
3814                 .id = DISP_PW_ID_NONE,
3815                 {
3816                         .hsw.regs = &icl_ddi_power_well_regs,
3817                         .hsw.idx = TGL_PW_CTL_IDX_DDI_TC5,
3818                 },
3819         },
3820         {
3821                 .name = "DDI I TC6 IO",
3822                 .domains = TGL_DDI_IO_I_TC6_POWER_DOMAINS,
3823                 .ops = &hsw_power_well_ops,
3824                 .id = DISP_PW_ID_NONE,
3825                 {
3826                         .hsw.regs = &icl_ddi_power_well_regs,
3827                         .hsw.idx = TGL_PW_CTL_IDX_DDI_TC6,
3828                 },
3829         },
3830         {
3831                 .name = "AUX A",
3832                 .domains = TGL_AUX_A_IO_POWER_DOMAINS,
3833                 .ops = &icl_combo_phy_aux_power_well_ops,
3834                 .id = DISP_PW_ID_NONE,
3835                 {
3836                         .hsw.regs = &icl_aux_power_well_regs,
3837                         .hsw.idx = ICL_PW_CTL_IDX_AUX_A,
3838                 },
3839         },
3840         {
3841                 .name = "AUX B",
3842                 .domains = TGL_AUX_B_IO_POWER_DOMAINS,
3843                 .ops = &icl_combo_phy_aux_power_well_ops,
3844                 .id = DISP_PW_ID_NONE,
3845                 {
3846                         .hsw.regs = &icl_aux_power_well_regs,
3847                         .hsw.idx = ICL_PW_CTL_IDX_AUX_B,
3848                 },
3849         },
3850         {
3851                 .name = "AUX C",
3852                 .domains = TGL_AUX_C_IO_POWER_DOMAINS,
3853                 .ops = &icl_combo_phy_aux_power_well_ops,
3854                 .id = DISP_PW_ID_NONE,
3855                 {
3856                         .hsw.regs = &icl_aux_power_well_regs,
3857                         .hsw.idx = ICL_PW_CTL_IDX_AUX_C,
3858                 },
3859         },
3860         {
3861                 .name = "AUX D TC1",
3862                 .domains = TGL_AUX_D_TC1_IO_POWER_DOMAINS,
3863                 .ops = &icl_tc_phy_aux_power_well_ops,
3864                 .id = DISP_PW_ID_NONE,
3865                 {
3866                         .hsw.regs = &icl_aux_power_well_regs,
3867                         .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1,
3868                         .hsw.is_tc_tbt = false,
3869                 },
3870         },
3871         {
3872                 .name = "AUX E TC2",
3873                 .domains = TGL_AUX_E_TC2_IO_POWER_DOMAINS,
3874                 .ops = &icl_tc_phy_aux_power_well_ops,
3875                 .id = DISP_PW_ID_NONE,
3876                 {
3877                         .hsw.regs = &icl_aux_power_well_regs,
3878                         .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2,
3879                         .hsw.is_tc_tbt = false,
3880                 },
3881         },
3882         {
3883                 .name = "AUX F TC3",
3884                 .domains = TGL_AUX_F_TC3_IO_POWER_DOMAINS,
3885                 .ops = &icl_tc_phy_aux_power_well_ops,
3886                 .id = DISP_PW_ID_NONE,
3887                 {
3888                         .hsw.regs = &icl_aux_power_well_regs,
3889                         .hsw.idx = TGL_PW_CTL_IDX_AUX_TC3,
3890                         .hsw.is_tc_tbt = false,
3891                 },
3892         },
3893         {
3894                 .name = "AUX G TC4",
3895                 .domains = TGL_AUX_G_TC4_IO_POWER_DOMAINS,
3896                 .ops = &icl_tc_phy_aux_power_well_ops,
3897                 .id = DISP_PW_ID_NONE,
3898                 {
3899                         .hsw.regs = &icl_aux_power_well_regs,
3900                         .hsw.idx = TGL_PW_CTL_IDX_AUX_TC4,
3901                         .hsw.is_tc_tbt = false,
3902                 },
3903         },
3904         {
3905                 .name = "AUX H TC5",
3906                 .domains = TGL_AUX_H_TC5_IO_POWER_DOMAINS,
3907                 .ops = &icl_tc_phy_aux_power_well_ops,
3908                 .id = DISP_PW_ID_NONE,
3909                 {
3910                         .hsw.regs = &icl_aux_power_well_regs,
3911                         .hsw.idx = TGL_PW_CTL_IDX_AUX_TC5,
3912                         .hsw.is_tc_tbt = false,
3913                 },
3914         },
3915         {
3916                 .name = "AUX I TC6",
3917                 .domains = TGL_AUX_I_TC6_IO_POWER_DOMAINS,
3918                 .ops = &icl_tc_phy_aux_power_well_ops,
3919                 .id = DISP_PW_ID_NONE,
3920                 {
3921                         .hsw.regs = &icl_aux_power_well_regs,
3922                         .hsw.idx = TGL_PW_CTL_IDX_AUX_TC6,
3923                         .hsw.is_tc_tbt = false,
3924                 },
3925         },
3926         {
3927                 .name = "AUX D TBT1",
3928                 .domains = TGL_AUX_D_TBT1_IO_POWER_DOMAINS,
3929                 .ops = &hsw_power_well_ops,
3930                 .id = DISP_PW_ID_NONE,
3931                 {
3932                         .hsw.regs = &icl_aux_power_well_regs,
3933                         .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1,
3934                         .hsw.is_tc_tbt = true,
3935                 },
3936         },
3937         {
3938                 .name = "AUX E TBT2",
3939                 .domains = TGL_AUX_E_TBT2_IO_POWER_DOMAINS,
3940                 .ops = &hsw_power_well_ops,
3941                 .id = DISP_PW_ID_NONE,
3942                 {
3943                         .hsw.regs = &icl_aux_power_well_regs,
3944                         .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2,
3945                         .hsw.is_tc_tbt = true,
3946                 },
3947         },
3948         {
3949                 .name = "AUX F TBT3",
3950                 .domains = TGL_AUX_F_TBT3_IO_POWER_DOMAINS,
3951                 .ops = &hsw_power_well_ops,
3952                 .id = DISP_PW_ID_NONE,
3953                 {
3954                         .hsw.regs = &icl_aux_power_well_regs,
3955                         .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3,
3956                         .hsw.is_tc_tbt = true,
3957                 },
3958         },
3959         {
3960                 .name = "AUX G TBT4",
3961                 .domains = TGL_AUX_G_TBT4_IO_POWER_DOMAINS,
3962                 .ops = &hsw_power_well_ops,
3963                 .id = DISP_PW_ID_NONE,
3964                 {
3965                         .hsw.regs = &icl_aux_power_well_regs,
3966                         .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4,
3967                         .hsw.is_tc_tbt = true,
3968                 },
3969         },
3970         {
3971                 .name = "AUX H TBT5",
3972                 .domains = TGL_AUX_H_TBT5_IO_POWER_DOMAINS,
3973                 .ops = &hsw_power_well_ops,
3974                 .id = DISP_PW_ID_NONE,
3975                 {
3976                         .hsw.regs = &icl_aux_power_well_regs,
3977                         .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT5,
3978                         .hsw.is_tc_tbt = true,
3979                 },
3980         },
3981         {
3982                 .name = "AUX I TBT6",
3983                 .domains = TGL_AUX_I_TBT6_IO_POWER_DOMAINS,
3984                 .ops = &hsw_power_well_ops,
3985                 .id = DISP_PW_ID_NONE,
3986                 {
3987                         .hsw.regs = &icl_aux_power_well_regs,
3988                         .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT6,
3989                         .hsw.is_tc_tbt = true,
3990                 },
3991         },
3992         {
3993                 .name = "power well 4",
3994                 .domains = TGL_PW_4_POWER_DOMAINS,
3995                 .ops = &hsw_power_well_ops,
3996                 .id = DISP_PW_ID_NONE,
3997                 {
3998                         .hsw.regs = &hsw_power_well_regs,
3999                         .hsw.idx = ICL_PW_CTL_IDX_PW_4,
4000                         .hsw.has_fuses = true,
4001                         .hsw.irq_pipe_mask = BIT(PIPE_C),
4002                 }
4003         },
4004         {
4005                 .name = "power well 5",
4006                 .domains = TGL_PW_5_POWER_DOMAINS,
4007                 .ops = &hsw_power_well_ops,
4008                 .id = DISP_PW_ID_NONE,
4009                 {
4010                         .hsw.regs = &hsw_power_well_regs,
4011                         .hsw.idx = TGL_PW_CTL_IDX_PW_5,
4012                         .hsw.has_fuses = true,
4013                         .hsw.irq_pipe_mask = BIT(PIPE_D),
4014                 },
4015         },
4016 };
4017
4018 static int
4019 sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
4020                                    int disable_power_well)
4021 {
4022         if (disable_power_well >= 0)
4023                 return !!disable_power_well;
4024
4025         return 1;
4026 }
4027
4028 static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
4029                                int enable_dc)
4030 {
4031         u32 mask;
4032         int requested_dc;
4033         int max_dc;
4034
4035         if (INTEL_GEN(dev_priv) >= 12) {
4036                 max_dc = 4;
4037                 /*
4038                  * DC9 has a separate HW flow from the rest of the DC states,
4039                  * not depending on the DMC firmware. It's needed by system
4040                  * suspend/resume, so allow it unconditionally.
4041                  */
4042                 mask = DC_STATE_EN_DC9;
4043         } else if (IS_GEN(dev_priv, 11)) {
4044                 max_dc = 2;
4045                 mask = DC_STATE_EN_DC9;
4046         } else if (IS_GEN(dev_priv, 10) || IS_GEN9_BC(dev_priv)) {
4047                 max_dc = 2;
4048                 mask = 0;
4049         } else if (IS_GEN9_LP(dev_priv)) {
4050                 max_dc = 1;
4051                 mask = DC_STATE_EN_DC9;
4052         } else {
4053                 max_dc = 0;
4054                 mask = 0;
4055         }
4056
4057         if (!i915_modparams.disable_power_well)
4058                 max_dc = 0;
4059
4060         if (enable_dc >= 0 && enable_dc <= max_dc) {
4061                 requested_dc = enable_dc;
4062         } else if (enable_dc == -1) {
4063                 requested_dc = max_dc;
4064         } else if (enable_dc > max_dc && enable_dc <= 4) {
4065                 DRM_DEBUG_KMS("Adjusting requested max DC state (%d->%d)\n",
4066                               enable_dc, max_dc);
4067                 requested_dc = max_dc;
4068         } else {
4069                 DRM_ERROR("Unexpected value for enable_dc (%d)\n", enable_dc);
4070                 requested_dc = max_dc;
4071         }
4072
4073         switch (requested_dc) {
4074         case 4:
4075                 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6;
4076                 break;
4077         case 3:
4078                 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC5;
4079                 break;
4080         case 2:
4081                 mask |= DC_STATE_EN_UPTO_DC6;
4082                 break;
4083         case 1:
4084                 mask |= DC_STATE_EN_UPTO_DC5;
4085                 break;
4086         }
4087
4088         DRM_DEBUG_KMS("Allowed DC state mask %02x\n", mask);
4089
4090         return mask;
4091 }
4092
4093 static int
4094 __set_power_wells(struct i915_power_domains *power_domains,
4095                   const struct i915_power_well_desc *power_well_descs,
4096                   int power_well_count)
4097 {
4098         u64 power_well_ids = 0;
4099         int i;
4100
4101         power_domains->power_well_count = power_well_count;
4102         power_domains->power_wells =
4103                                 kcalloc(power_well_count,
4104                                         sizeof(*power_domains->power_wells),
4105                                         GFP_KERNEL);
4106         if (!power_domains->power_wells)
4107                 return -ENOMEM;
4108
4109         for (i = 0; i < power_well_count; i++) {
4110                 enum i915_power_well_id id = power_well_descs[i].id;
4111
4112                 power_domains->power_wells[i].desc = &power_well_descs[i];
4113
4114                 if (id == DISP_PW_ID_NONE)
4115                         continue;
4116
4117                 WARN_ON(id >= sizeof(power_well_ids) * 8);
4118                 WARN_ON(power_well_ids & BIT_ULL(id));
4119                 power_well_ids |= BIT_ULL(id);
4120         }
4121
4122         return 0;
4123 }
4124
4125 #define set_power_wells(power_domains, __power_well_descs) \
4126         __set_power_wells(power_domains, __power_well_descs, \
4127                           ARRAY_SIZE(__power_well_descs))
4128
4129 /**
4130  * intel_power_domains_init - initializes the power domain structures
4131  * @dev_priv: i915 device instance
4132  *
4133  * Initializes the power domain structures for @dev_priv depending upon the
4134  * supported platform.
4135  */
4136 int intel_power_domains_init(struct drm_i915_private *dev_priv)
4137 {
4138         struct i915_power_domains *power_domains = &dev_priv->power_domains;
4139         int err;
4140
4141         i915_modparams.disable_power_well =
4142                 sanitize_disable_power_well_option(dev_priv,
4143                                                    i915_modparams.disable_power_well);
4144         dev_priv->csr.allowed_dc_mask =
4145                 get_allowed_dc_mask(dev_priv, i915_modparams.enable_dc);
4146
4147         dev_priv->csr.target_dc_state =
4148                 sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
4149
4150         BUILD_BUG_ON(POWER_DOMAIN_NUM > 64);
4151
4152         mutex_init(&power_domains->lock);
4153
4154         INIT_DELAYED_WORK(&power_domains->async_put_work,
4155                           intel_display_power_put_async_work);
4156
4157         /*
4158          * The enabling order will be from lower to higher indexed wells,
4159          * the disabling order is reversed.
4160          */
4161         if (IS_GEN(dev_priv, 12)) {
4162                 err = set_power_wells(power_domains, tgl_power_wells);
4163         } else if (IS_GEN(dev_priv, 11)) {
4164                 err = set_power_wells(power_domains, icl_power_wells);
4165         } else if (IS_CANNONLAKE(dev_priv)) {
4166                 err = set_power_wells(power_domains, cnl_power_wells);
4167
4168                 /*
4169                  * DDI and Aux IO are getting enabled for all ports
4170                  * regardless the presence or use. So, in order to avoid
4171                  * timeouts, lets remove them from the list
4172                  * for the SKUs without port F.
4173                  */
4174                 if (!IS_CNL_WITH_PORT_F(dev_priv))
4175                         power_domains->power_well_count -= 2;
4176         } else if (IS_GEMINILAKE(dev_priv)) {
4177                 err = set_power_wells(power_domains, glk_power_wells);
4178         } else if (IS_BROXTON(dev_priv)) {
4179                 err = set_power_wells(power_domains, bxt_power_wells);
4180         } else if (IS_GEN9_BC(dev_priv)) {
4181                 err = set_power_wells(power_domains, skl_power_wells);
4182         } else if (IS_CHERRYVIEW(dev_priv)) {
4183                 err = set_power_wells(power_domains, chv_power_wells);
4184         } else if (IS_BROADWELL(dev_priv)) {
4185                 err = set_power_wells(power_domains, bdw_power_wells);
4186         } else if (IS_HASWELL(dev_priv)) {
4187                 err = set_power_wells(power_domains, hsw_power_wells);
4188         } else if (IS_VALLEYVIEW(dev_priv)) {
4189                 err = set_power_wells(power_domains, vlv_power_wells);
4190         } else if (IS_I830(dev_priv)) {
4191                 err = set_power_wells(power_domains, i830_power_wells);
4192         } else {
4193                 err = set_power_wells(power_domains, i9xx_always_on_power_well);
4194         }
4195
4196         return err;
4197 }
4198
4199 /**
4200  * intel_power_domains_cleanup - clean up power domains resources
4201  * @dev_priv: i915 device instance
4202  *
4203  * Release any resources acquired by intel_power_domains_init()
4204  */
4205 void intel_power_domains_cleanup(struct drm_i915_private *dev_priv)
4206 {
4207         kfree(dev_priv->power_domains.power_wells);
4208 }
4209
4210 static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
4211 {
4212         struct i915_power_domains *power_domains = &dev_priv->power_domains;
4213         struct i915_power_well *power_well;
4214
4215         mutex_lock(&power_domains->lock);
4216         for_each_power_well(dev_priv, power_well) {
4217                 power_well->desc->ops->sync_hw(dev_priv, power_well);
4218                 power_well->hw_enabled =
4219                         power_well->desc->ops->is_enabled(dev_priv, power_well);
4220         }
4221         mutex_unlock(&power_domains->lock);
4222 }
4223
4224 static inline
4225 bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv,
4226                           i915_reg_t reg, bool enable)
4227 {
4228         u32 val, status;
4229
4230         val = I915_READ(reg);
4231         val = enable ? (val | DBUF_POWER_REQUEST) : (val & ~DBUF_POWER_REQUEST);
4232         I915_WRITE(reg, val);
4233         POSTING_READ(reg);
4234         udelay(10);
4235
4236         status = I915_READ(reg) & DBUF_POWER_STATE;
4237         if ((enable && !status) || (!enable && status)) {
4238                 DRM_ERROR("DBus power %s timeout!\n",
4239                           enable ? "enable" : "disable");
4240                 return false;
4241         }
4242         return true;
4243 }
4244
4245 static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
4246 {
4247         intel_dbuf_slice_set(dev_priv, DBUF_CTL, true);
4248 }
4249
4250 static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
4251 {
4252         intel_dbuf_slice_set(dev_priv, DBUF_CTL, false);
4253 }
4254
4255 static u8 intel_dbuf_max_slices(struct drm_i915_private *dev_priv)
4256 {
4257         if (INTEL_GEN(dev_priv) < 11)
4258                 return 1;
4259         return 2;
4260 }
4261
4262 void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
4263                             u8 req_slices)
4264 {
4265         const u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
4266         bool ret;
4267
4268         if (req_slices > intel_dbuf_max_slices(dev_priv)) {
4269                 DRM_ERROR("Invalid number of dbuf slices requested\n");
4270                 return;
4271         }
4272
4273         if (req_slices == hw_enabled_slices || req_slices == 0)
4274                 return;
4275
4276         if (req_slices > hw_enabled_slices)
4277                 ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, true);
4278         else
4279                 ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, false);
4280
4281         if (ret)
4282                 dev_priv->wm.skl_hw.ddb.enabled_slices = req_slices;
4283 }
4284
4285 static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
4286 {
4287         I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) | DBUF_POWER_REQUEST);
4288         I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) | DBUF_POWER_REQUEST);
4289         POSTING_READ(DBUF_CTL_S2);
4290
4291         udelay(10);
4292
4293         if (!(I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
4294             !(I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
4295                 DRM_ERROR("DBuf power enable timeout\n");
4296         else
4297                 /*
4298                  * FIXME: for now pretend that we only have 1 slice, see
4299                  * intel_enabled_dbuf_slices_num().
4300                  */
4301                 dev_priv->wm.skl_hw.ddb.enabled_slices = 1;
4302 }
4303
4304 static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
4305 {
4306         I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) & ~DBUF_POWER_REQUEST);
4307         I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) & ~DBUF_POWER_REQUEST);
4308         POSTING_READ(DBUF_CTL_S2);
4309
4310         udelay(10);
4311
4312         if ((I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
4313             (I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
4314                 DRM_ERROR("DBuf power disable timeout!\n");
4315         else
4316                 /*
4317                  * FIXME: for now pretend that the first slice is always
4318                  * enabled, see intel_enabled_dbuf_slices_num().
4319                  */
4320                 dev_priv->wm.skl_hw.ddb.enabled_slices = 1;
4321 }
4322
4323 static void icl_mbus_init(struct drm_i915_private *dev_priv)
4324 {
4325         u32 val;
4326
4327         val = MBUS_ABOX_BT_CREDIT_POOL1(16) |
4328               MBUS_ABOX_BT_CREDIT_POOL2(16) |
4329               MBUS_ABOX_B_CREDIT(1) |
4330               MBUS_ABOX_BW_CREDIT(1);
4331
4332         I915_WRITE(MBUS_ABOX_CTL, val);
4333 }
4334
4335 static void hsw_assert_cdclk(struct drm_i915_private *dev_priv)
4336 {
4337         u32 val = I915_READ(LCPLL_CTL);
4338
4339         /*
4340          * The LCPLL register should be turned on by the BIOS. For now
4341          * let's just check its state and print errors in case
4342          * something is wrong.  Don't even try to turn it on.
4343          */
4344
4345         if (val & LCPLL_CD_SOURCE_FCLK)
4346                 DRM_ERROR("CDCLK source is not LCPLL\n");
4347
4348         if (val & LCPLL_PLL_DISABLE)
4349                 DRM_ERROR("LCPLL is disabled\n");
4350
4351         if ((val & LCPLL_REF_MASK) != LCPLL_REF_NON_SSC)
4352                 DRM_ERROR("LCPLL not using non-SSC reference\n");
4353 }
4354
4355 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
4356 {
4357         struct drm_device *dev = &dev_priv->drm;
4358         struct intel_crtc *crtc;
4359
4360         for_each_intel_crtc(dev, crtc)
4361                 I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
4362                                 pipe_name(crtc->pipe));
4363
4364         I915_STATE_WARN(I915_READ(HSW_PWR_WELL_CTL2),
4365                         "Display power well on\n");
4366         I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE,
4367                         "SPLL enabled\n");
4368         I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE,
4369                         "WRPLL1 enabled\n");
4370         I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE,
4371                         "WRPLL2 enabled\n");
4372         I915_STATE_WARN(I915_READ(PP_STATUS(0)) & PP_ON,
4373                         "Panel power on\n");
4374         I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
4375                         "CPU PWM1 enabled\n");
4376         if (IS_HASWELL(dev_priv))
4377                 I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
4378                                 "CPU PWM2 enabled\n");
4379         I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
4380                         "PCH PWM1 enabled\n");
4381         I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
4382                         "Utility pin enabled\n");
4383         I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE,
4384                         "PCH GTC enabled\n");
4385
4386         /*
4387          * In theory we can still leave IRQs enabled, as long as only the HPD
4388          * interrupts remain enabled. We used to check for that, but since it's
4389          * gen-specific and since we only disable LCPLL after we fully disable
4390          * the interrupts, the check below should be enough.
4391          */
4392         I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
4393 }
4394
4395 static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv)
4396 {
4397         if (IS_HASWELL(dev_priv))
4398                 return I915_READ(D_COMP_HSW);
4399         else
4400                 return I915_READ(D_COMP_BDW);
4401 }
4402
4403 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val)
4404 {
4405         if (IS_HASWELL(dev_priv)) {
4406                 if (sandybridge_pcode_write(dev_priv,
4407                                             GEN6_PCODE_WRITE_D_COMP, val))
4408                         DRM_DEBUG_KMS("Failed to write to D_COMP\n");
4409         } else {
4410                 I915_WRITE(D_COMP_BDW, val);
4411                 POSTING_READ(D_COMP_BDW);
4412         }
4413 }
4414
4415 /*
4416  * This function implements pieces of two sequences from BSpec:
4417  * - Sequence for display software to disable LCPLL
4418  * - Sequence for display software to allow package C8+
4419  * The steps implemented here are just the steps that actually touch the LCPLL
4420  * register. Callers should take care of disabling all the display engine
4421  * functions, doing the mode unset, fixing interrupts, etc.
4422  */
4423 static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
4424                               bool switch_to_fclk, bool allow_power_down)
4425 {
4426         u32 val;
4427
4428         assert_can_disable_lcpll(dev_priv);
4429
4430         val = I915_READ(LCPLL_CTL);
4431
4432         if (switch_to_fclk) {
4433                 val |= LCPLL_CD_SOURCE_FCLK;
4434                 I915_WRITE(LCPLL_CTL, val);
4435
4436                 if (wait_for_us(I915_READ(LCPLL_CTL) &
4437                                 LCPLL_CD_SOURCE_FCLK_DONE, 1))
4438                         DRM_ERROR("Switching to FCLK failed\n");
4439
4440                 val = I915_READ(LCPLL_CTL);
4441         }
4442
4443         val |= LCPLL_PLL_DISABLE;
4444         I915_WRITE(LCPLL_CTL, val);
4445         POSTING_READ(LCPLL_CTL);
4446
4447         if (intel_de_wait_for_clear(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 1))
4448                 DRM_ERROR("LCPLL still locked\n");
4449
4450         val = hsw_read_dcomp(dev_priv);
4451         val |= D_COMP_COMP_DISABLE;
4452         hsw_write_dcomp(dev_priv, val);
4453         ndelay(100);
4454
4455         if (wait_for((hsw_read_dcomp(dev_priv) &
4456                       D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
4457                 DRM_ERROR("D_COMP RCOMP still in progress\n");
4458
4459         if (allow_power_down) {
4460                 val = I915_READ(LCPLL_CTL);
4461                 val |= LCPLL_POWER_DOWN_ALLOW;
4462                 I915_WRITE(LCPLL_CTL, val);
4463                 POSTING_READ(LCPLL_CTL);
4464         }
4465 }
4466
4467 /*
4468  * Fully restores LCPLL, disallowing power down and switching back to LCPLL
4469  * source.
4470  */
4471 static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
4472 {
4473         u32 val;
4474
4475         val = I915_READ(LCPLL_CTL);
4476
4477         if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
4478                     LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
4479                 return;
4480
4481         /*
4482          * Make sure we're not on PC8 state before disabling PC8, otherwise
4483          * we'll hang the machine. To prevent PC8 state, just enable force_wake.
4484          */
4485         intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
4486
4487         if (val & LCPLL_POWER_DOWN_ALLOW) {
4488                 val &= ~LCPLL_POWER_DOWN_ALLOW;
4489                 I915_WRITE(LCPLL_CTL, val);
4490                 POSTING_READ(LCPLL_CTL);
4491         }
4492
4493         val = hsw_read_dcomp(dev_priv);
4494         val |= D_COMP_COMP_FORCE;
4495         val &= ~D_COMP_COMP_DISABLE;
4496         hsw_write_dcomp(dev_priv, val);
4497
4498         val = I915_READ(LCPLL_CTL);
4499         val &= ~LCPLL_PLL_DISABLE;
4500         I915_WRITE(LCPLL_CTL, val);
4501
4502         if (intel_de_wait_for_set(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 5))
4503                 DRM_ERROR("LCPLL not locked yet\n");
4504
4505         if (val & LCPLL_CD_SOURCE_FCLK) {
4506                 val = I915_READ(LCPLL_CTL);
4507                 val &= ~LCPLL_CD_SOURCE_FCLK;
4508                 I915_WRITE(LCPLL_CTL, val);
4509
4510                 if (wait_for_us((I915_READ(LCPLL_CTL) &
4511                                  LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
4512                         DRM_ERROR("Switching back to LCPLL failed\n");
4513         }
4514
4515         intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
4516
4517         intel_update_cdclk(dev_priv);
4518         intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
4519 }
4520
4521 /*
4522  * Package states C8 and deeper are really deep PC states that can only be
4523  * reached when all the devices on the system allow it, so even if the graphics
4524  * device allows PC8+, it doesn't mean the system will actually get to these
4525  * states. Our driver only allows PC8+ when going into runtime PM.
4526  *
4527  * The requirements for PC8+ are that all the outputs are disabled, the power
4528  * well is disabled and most interrupts are disabled, and these are also
4529  * requirements for runtime PM. When these conditions are met, we manually do
4530  * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
4531  * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
4532  * hang the machine.
4533  *
4534  * When we really reach PC8 or deeper states (not just when we allow it) we lose
4535  * the state of some registers, so when we come back from PC8+ we need to
4536  * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
4537  * need to take care of the registers kept by RC6. Notice that this happens even
4538  * if we don't put the device in PCI D3 state (which is what currently happens
4539  * because of the runtime PM support).
4540  *
4541  * For more, read "Display Sequences for Package C8" on the hardware
4542  * documentation.
4543  */
4544 static void hsw_enable_pc8(struct drm_i915_private *dev_priv)
4545 {
4546         u32 val;
4547
4548         DRM_DEBUG_KMS("Enabling package C8+\n");
4549
4550         if (HAS_PCH_LPT_LP(dev_priv)) {
4551                 val = I915_READ(SOUTH_DSPCLK_GATE_D);
4552                 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
4553                 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
4554         }
4555
4556         lpt_disable_clkout_dp(dev_priv);
4557         hsw_disable_lcpll(dev_priv, true, true);
4558 }
4559
4560 static void hsw_disable_pc8(struct drm_i915_private *dev_priv)
4561 {
4562         u32 val;
4563
4564         DRM_DEBUG_KMS("Disabling package C8+\n");
4565
4566         hsw_restore_lcpll(dev_priv);
4567         intel_init_pch_refclk(dev_priv);
4568
4569         if (HAS_PCH_LPT_LP(dev_priv)) {
4570                 val = I915_READ(SOUTH_DSPCLK_GATE_D);
4571                 val |= PCH_LP_PARTITION_LEVEL_DISABLE;
4572                 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
4573         }
4574 }
4575
4576 static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
4577                                       bool enable)
4578 {
4579         i915_reg_t reg;
4580         u32 reset_bits, val;
4581
4582         if (IS_IVYBRIDGE(dev_priv)) {
4583                 reg = GEN7_MSG_CTL;
4584                 reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK;
4585         } else {
4586                 reg = HSW_NDE_RSTWRN_OPT;
4587                 reset_bits = RESET_PCH_HANDSHAKE_ENABLE;
4588         }
4589
4590         val = I915_READ(reg);
4591
4592         if (enable)
4593                 val |= reset_bits;
4594         else
4595                 val &= ~reset_bits;
4596
4597         I915_WRITE(reg, val);
4598 }
4599
4600 static void skl_display_core_init(struct drm_i915_private *dev_priv,
4601                                   bool resume)
4602 {
4603         struct i915_power_domains *power_domains = &dev_priv->power_domains;
4604         struct i915_power_well *well;
4605
4606         gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
4607
4608         /* enable PCH reset handshake */
4609         intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
4610
4611         /* enable PG1 and Misc I/O */
4612         mutex_lock(&power_domains->lock);
4613
4614         well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4615         intel_power_well_enable(dev_priv, well);
4616
4617         well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
4618         intel_power_well_enable(dev_priv, well);
4619
4620         mutex_unlock(&power_domains->lock);
4621
4622         intel_cdclk_init(dev_priv);
4623
4624         gen9_dbuf_enable(dev_priv);
4625
4626         if (resume && dev_priv->csr.dmc_payload)
4627                 intel_csr_load_program(dev_priv);
4628 }
4629
4630 static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
4631 {
4632         struct i915_power_domains *power_domains = &dev_priv->power_domains;
4633         struct i915_power_well *well;
4634
4635         gen9_disable_dc_states(dev_priv);
4636
4637         gen9_dbuf_disable(dev_priv);
4638
4639         intel_cdclk_uninit(dev_priv);
4640
4641         /* The spec doesn't call for removing the reset handshake flag */
4642         /* disable PG1 and Misc I/O */
4643
4644         mutex_lock(&power_domains->lock);
4645
4646         /*
4647          * BSpec says to keep the MISC IO power well enabled here, only
4648          * remove our request for power well 1.
4649          * Note that even though the driver's request is removed power well 1
4650          * may stay enabled after this due to DMC's own request on it.
4651          */
4652         well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4653         intel_power_well_disable(dev_priv, well);
4654
4655         mutex_unlock(&power_domains->lock);
4656
4657         usleep_range(10, 30);           /* 10 us delay per Bspec */
4658 }
4659
4660 static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume)
4661 {
4662         struct i915_power_domains *power_domains = &dev_priv->power_domains;
4663         struct i915_power_well *well;
4664
4665         gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
4666
4667         /*
4668          * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
4669          * or else the reset will hang because there is no PCH to respond.
4670          * Move the handshake programming to initialization sequence.
4671          * Previously was left up to BIOS.
4672          */
4673         intel_pch_reset_handshake(dev_priv, false);
4674
4675         /* Enable PG1 */
4676         mutex_lock(&power_domains->lock);
4677
4678         well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4679         intel_power_well_enable(dev_priv, well);
4680
4681         mutex_unlock(&power_domains->lock);
4682
4683         intel_cdclk_init(dev_priv);
4684
4685         gen9_dbuf_enable(dev_priv);
4686
4687         if (resume && dev_priv->csr.dmc_payload)
4688                 intel_csr_load_program(dev_priv);
4689 }
4690
4691 static void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
4692 {
4693         struct i915_power_domains *power_domains = &dev_priv->power_domains;
4694         struct i915_power_well *well;
4695
4696         gen9_disable_dc_states(dev_priv);
4697
4698         gen9_dbuf_disable(dev_priv);
4699
4700         intel_cdclk_uninit(dev_priv);
4701
4702         /* The spec doesn't call for removing the reset handshake flag */
4703
4704         /*
4705          * Disable PW1 (PG1).
4706          * Note that even though the driver's request is removed power well 1
4707          * may stay enabled after this due to DMC's own request on it.
4708          */
4709         mutex_lock(&power_domains->lock);
4710
4711         well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4712         intel_power_well_disable(dev_priv, well);
4713
4714         mutex_unlock(&power_domains->lock);
4715
4716         usleep_range(10, 30);           /* 10 us delay per Bspec */
4717 }
4718
4719 static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume)
4720 {
4721         struct i915_power_domains *power_domains = &dev_priv->power_domains;
4722         struct i915_power_well *well;
4723
4724         gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
4725
4726         /* 1. Enable PCH Reset Handshake */
4727         intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
4728
4729         /* 2-3. */
4730         intel_combo_phy_init(dev_priv);
4731
4732         /*
4733          * 4. Enable Power Well 1 (PG1).
4734          *    The AUX IO power wells will be enabled on demand.
4735          */
4736         mutex_lock(&power_domains->lock);
4737         well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4738         intel_power_well_enable(dev_priv, well);
4739         mutex_unlock(&power_domains->lock);
4740
4741         /* 5. Enable CD clock */
4742         intel_cdclk_init(dev_priv);
4743
4744         /* 6. Enable DBUF */
4745         gen9_dbuf_enable(dev_priv);
4746
4747         if (resume && dev_priv->csr.dmc_payload)
4748                 intel_csr_load_program(dev_priv);
4749 }
4750
4751 static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
4752 {
4753         struct i915_power_domains *power_domains = &dev_priv->power_domains;
4754         struct i915_power_well *well;
4755
4756         gen9_disable_dc_states(dev_priv);
4757
4758         /* 1. Disable all display engine functions -> aready done */
4759
4760         /* 2. Disable DBUF */
4761         gen9_dbuf_disable(dev_priv);
4762
4763         /* 3. Disable CD clock */
4764         intel_cdclk_uninit(dev_priv);
4765
4766         /*
4767          * 4. Disable Power Well 1 (PG1).
4768          *    The AUX IO power wells are toggled on demand, so they are already
4769          *    disabled at this point.
4770          */
4771         mutex_lock(&power_domains->lock);
4772         well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4773         intel_power_well_disable(dev_priv, well);
4774         mutex_unlock(&power_domains->lock);
4775
4776         usleep_range(10, 30);           /* 10 us delay per Bspec */
4777
4778         /* 5. */
4779         intel_combo_phy_uninit(dev_priv);
4780 }
4781
4782 static void icl_display_core_init(struct drm_i915_private *dev_priv,
4783                                   bool resume)
4784 {
4785         struct i915_power_domains *power_domains = &dev_priv->power_domains;
4786         struct i915_power_well *well;
4787
4788         gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
4789
4790         /* 1. Enable PCH reset handshake. */
4791         intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
4792
4793         /* 2. Initialize all combo phys */
4794         intel_combo_phy_init(dev_priv);
4795
4796         /*
4797          * 3. Enable Power Well 1 (PG1).
4798          *    The AUX IO power wells will be enabled on demand.
4799          */
4800         mutex_lock(&power_domains->lock);
4801         well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4802         intel_power_well_enable(dev_priv, well);
4803         mutex_unlock(&power_domains->lock);
4804
4805         /* 4. Enable CDCLK. */
4806         intel_cdclk_init(dev_priv);
4807
4808         /* 5. Enable DBUF. */
4809         icl_dbuf_enable(dev_priv);
4810
4811         /* 6. Setup MBUS. */
4812         icl_mbus_init(dev_priv);
4813
4814         if (resume && dev_priv->csr.dmc_payload)
4815                 intel_csr_load_program(dev_priv);
4816 }
4817
4818 static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
4819 {
4820         struct i915_power_domains *power_domains = &dev_priv->power_domains;
4821         struct i915_power_well *well;
4822
4823         gen9_disable_dc_states(dev_priv);
4824
4825         /* 1. Disable all display engine functions -> aready done */
4826
4827         /* 2. Disable DBUF */
4828         icl_dbuf_disable(dev_priv);
4829
4830         /* 3. Disable CD clock */
4831         intel_cdclk_uninit(dev_priv);
4832
4833         /*
4834          * 4. Disable Power Well 1 (PG1).
4835          *    The AUX IO power wells are toggled on demand, so they are already
4836          *    disabled at this point.
4837          */
4838         mutex_lock(&power_domains->lock);
4839         well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4840         intel_power_well_disable(dev_priv, well);
4841         mutex_unlock(&power_domains->lock);
4842
4843         /* 5. */
4844         intel_combo_phy_uninit(dev_priv);
4845 }
4846
4847 static void chv_phy_control_init(struct drm_i915_private *dev_priv)
4848 {
4849         struct i915_power_well *cmn_bc =
4850                 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
4851         struct i915_power_well *cmn_d =
4852                 lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
4853
4854         /*
4855          * DISPLAY_PHY_CONTROL can get corrupted if read. As a
4856          * workaround never ever read DISPLAY_PHY_CONTROL, and
4857          * instead maintain a shadow copy ourselves. Use the actual
4858          * power well state and lane status to reconstruct the
4859          * expected initial value.
4860          */
4861         dev_priv->chv_phy_control =
4862                 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
4863                 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
4864                 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
4865                 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
4866                 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
4867
4868         /*
4869          * If all lanes are disabled we leave the override disabled
4870          * with all power down bits cleared to match the state we
4871          * would use after disabling the port. Otherwise enable the
4872          * override and set the lane powerdown bits accding to the
4873          * current lane status.
4874          */
4875         if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
4876                 u32 status = I915_READ(DPLL(PIPE_A));
4877                 unsigned int mask;
4878
4879                 mask = status & DPLL_PORTB_READY_MASK;
4880                 if (mask == 0xf)
4881                         mask = 0x0;
4882                 else
4883                         dev_priv->chv_phy_control |=
4884                                 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
4885
4886                 dev_priv->chv_phy_control |=
4887                         PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
4888
4889                 mask = (status & DPLL_PORTC_READY_MASK) >> 4;
4890                 if (mask == 0xf)
4891                         mask = 0x0;
4892                 else
4893                         dev_priv->chv_phy_control |=
4894                                 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
4895
4896                 dev_priv->chv_phy_control |=
4897                         PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
4898
4899                 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
4900
4901                 dev_priv->chv_phy_assert[DPIO_PHY0] = false;
4902         } else {
4903                 dev_priv->chv_phy_assert[DPIO_PHY0] = true;
4904         }
4905
4906         if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
4907                 u32 status = I915_READ(DPIO_PHY_STATUS);
4908                 unsigned int mask;
4909
4910                 mask = status & DPLL_PORTD_READY_MASK;
4911
4912                 if (mask == 0xf)
4913                         mask = 0x0;
4914                 else
4915                         dev_priv->chv_phy_control |=
4916                                 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
4917
4918                 dev_priv->chv_phy_control |=
4919                         PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
4920
4921                 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
4922
4923                 dev_priv->chv_phy_assert[DPIO_PHY1] = false;
4924         } else {
4925                 dev_priv->chv_phy_assert[DPIO_PHY1] = true;
4926         }
4927
4928         I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
4929
4930         DRM_DEBUG_KMS("Initial PHY_CONTROL=0x%08x\n",
4931                       dev_priv->chv_phy_control);
4932 }
4933
4934 static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
4935 {
4936         struct i915_power_well *cmn =
4937                 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
4938         struct i915_power_well *disp2d =
4939                 lookup_power_well(dev_priv, VLV_DISP_PW_DISP2D);
4940
4941         /* If the display might be already active skip this */
4942         if (cmn->desc->ops->is_enabled(dev_priv, cmn) &&
4943             disp2d->desc->ops->is_enabled(dev_priv, disp2d) &&
4944             I915_READ(DPIO_CTL) & DPIO_CMNRST)
4945                 return;
4946
4947         DRM_DEBUG_KMS("toggling display PHY side reset\n");
4948
4949         /* cmnlane needs DPLL registers */
4950         disp2d->desc->ops->enable(dev_priv, disp2d);
4951
4952         /*
4953          * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
4954          * Need to assert and de-assert PHY SB reset by gating the
4955          * common lane power, then un-gating it.
4956          * Simply ungating isn't enough to reset the PHY enough to get
4957          * ports and lanes running.
4958          */
4959         cmn->desc->ops->disable(dev_priv, cmn);
4960 }
4961
4962 static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0)
4963 {
4964         bool ret;
4965
4966         vlv_punit_get(dev_priv);
4967         ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE;
4968         vlv_punit_put(dev_priv);
4969
4970         return ret;
4971 }
4972
4973 static void assert_ved_power_gated(struct drm_i915_private *dev_priv)
4974 {
4975         WARN(!vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0),
4976              "VED not power gated\n");
4977 }
4978
4979 static void assert_isp_power_gated(struct drm_i915_private *dev_priv)
4980 {
4981         static const struct pci_device_id isp_ids[] = {
4982                 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38)},
4983                 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x22b8)},
4984                 {}
4985         };
4986
4987         WARN(!pci_dev_present(isp_ids) &&
4988              !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0),
4989              "ISP not power gated\n");
4990 }
4991
4992 static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
4993
4994 /**
4995  * intel_power_domains_init_hw - initialize hardware power domain state
4996  * @i915: i915 device instance
4997  * @resume: Called from resume code paths or not
4998  *
4999  * This function initializes the hardware power domain state and enables all
5000  * power wells belonging to the INIT power domain. Power wells in other
5001  * domains (and not in the INIT domain) are referenced or disabled by
5002  * intel_modeset_readout_hw_state(). After that the reference count of each
5003  * power well must match its HW enabled state, see
5004  * intel_power_domains_verify_state().
5005  *
5006  * It will return with power domains disabled (to be enabled later by
5007  * intel_power_domains_enable()) and must be paired with
5008  * intel_power_domains_driver_remove().
5009  */
5010 void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
5011 {
5012         struct i915_power_domains *power_domains = &i915->power_domains;
5013
5014         power_domains->initializing = true;
5015
5016         if (INTEL_GEN(i915) >= 11) {
5017                 icl_display_core_init(i915, resume);
5018         } else if (IS_CANNONLAKE(i915)) {
5019                 cnl_display_core_init(i915, resume);
5020         } else if (IS_GEN9_BC(i915)) {
5021                 skl_display_core_init(i915, resume);
5022         } else if (IS_GEN9_LP(i915)) {
5023                 bxt_display_core_init(i915, resume);
5024         } else if (IS_CHERRYVIEW(i915)) {
5025                 mutex_lock(&power_domains->lock);
5026                 chv_phy_control_init(i915);
5027                 mutex_unlock(&power_domains->lock);
5028                 assert_isp_power_gated(i915);
5029         } else if (IS_VALLEYVIEW(i915)) {
5030                 mutex_lock(&power_domains->lock);
5031                 vlv_cmnlane_wa(i915);
5032                 mutex_unlock(&power_domains->lock);
5033                 assert_ved_power_gated(i915);
5034                 assert_isp_power_gated(i915);
5035         } else if (IS_BROADWELL(i915) || IS_HASWELL(i915)) {
5036                 hsw_assert_cdclk(i915);
5037                 intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
5038         } else if (IS_IVYBRIDGE(i915)) {
5039                 intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
5040         }
5041
5042         /*
5043          * Keep all power wells enabled for any dependent HW access during
5044          * initialization and to make sure we keep BIOS enabled display HW
5045          * resources powered until display HW readout is complete. We drop
5046          * this reference in intel_power_domains_enable().
5047          */
5048         power_domains->wakeref =
5049                 intel_display_power_get(i915, POWER_DOMAIN_INIT);
5050
5051         /* Disable power support if the user asked so. */
5052         if (!i915_modparams.disable_power_well)
5053                 intel_display_power_get(i915, POWER_DOMAIN_INIT);
5054         intel_power_domains_sync_hw(i915);
5055
5056         power_domains->initializing = false;
5057 }
5058
5059 /**
5060  * intel_power_domains_driver_remove - deinitialize hw power domain state
5061  * @i915: i915 device instance
5062  *
5063  * De-initializes the display power domain HW state. It also ensures that the
5064  * device stays powered up so that the driver can be reloaded.
5065  *
5066  * It must be called with power domains already disabled (after a call to
5067  * intel_power_domains_disable()) and must be paired with
5068  * intel_power_domains_init_hw().
5069  */
5070 void intel_power_domains_driver_remove(struct drm_i915_private *i915)
5071 {
5072         intel_wakeref_t wakeref __maybe_unused =
5073                 fetch_and_zero(&i915->power_domains.wakeref);
5074
5075         /* Remove the refcount we took to keep power well support disabled. */
5076         if (!i915_modparams.disable_power_well)
5077                 intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
5078
5079         intel_display_power_flush_work_sync(i915);
5080
5081         intel_power_domains_verify_state(i915);
5082
5083         /* Keep the power well enabled, but cancel its rpm wakeref. */
5084         intel_runtime_pm_put(&i915->runtime_pm, wakeref);
5085 }
5086
5087 /**
5088  * intel_power_domains_enable - enable toggling of display power wells
5089  * @i915: i915 device instance
5090  *
5091  * Enable the ondemand enabling/disabling of the display power wells. Note that
5092  * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled
5093  * only at specific points of the display modeset sequence, thus they are not
5094  * affected by the intel_power_domains_enable()/disable() calls. The purpose
5095  * of these function is to keep the rest of power wells enabled until the end
5096  * of display HW readout (which will acquire the power references reflecting
5097  * the current HW state).
5098  */
5099 void intel_power_domains_enable(struct drm_i915_private *i915)
5100 {
5101         intel_wakeref_t wakeref __maybe_unused =
5102                 fetch_and_zero(&i915->power_domains.wakeref);
5103
5104         intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
5105         intel_power_domains_verify_state(i915);
5106 }
5107
5108 /**
5109  * intel_power_domains_disable - disable toggling of display power wells
5110  * @i915: i915 device instance
5111  *
5112  * Disable the ondemand enabling/disabling of the display power wells. See
5113  * intel_power_domains_enable() for which power wells this call controls.
5114  */
5115 void intel_power_domains_disable(struct drm_i915_private *i915)
5116 {
5117         struct i915_power_domains *power_domains = &i915->power_domains;
5118
5119         WARN_ON(power_domains->wakeref);
5120         power_domains->wakeref =
5121                 intel_display_power_get(i915, POWER_DOMAIN_INIT);
5122
5123         intel_power_domains_verify_state(i915);
5124 }
5125
5126 /**
5127  * intel_power_domains_suspend - suspend power domain state
5128  * @i915: i915 device instance
5129  * @suspend_mode: specifies the target suspend state (idle, mem, hibernation)
5130  *
5131  * This function prepares the hardware power domain state before entering
5132  * system suspend.
5133  *
5134  * It must be called with power domains already disabled (after a call to
5135  * intel_power_domains_disable()) and paired with intel_power_domains_resume().
5136  */
5137 void intel_power_domains_suspend(struct drm_i915_private *i915,
5138                                  enum i915_drm_suspend_mode suspend_mode)
5139 {
5140         struct i915_power_domains *power_domains = &i915->power_domains;
5141         intel_wakeref_t wakeref __maybe_unused =
5142                 fetch_and_zero(&power_domains->wakeref);
5143
5144         intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
5145
5146         /*
5147          * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9
5148          * support don't manually deinit the power domains. This also means the
5149          * CSR/DMC firmware will stay active, it will power down any HW
5150          * resources as required and also enable deeper system power states
5151          * that would be blocked if the firmware was inactive.
5152          */
5153         if (!(i915->csr.allowed_dc_mask & DC_STATE_EN_DC9) &&
5154             suspend_mode == I915_DRM_SUSPEND_IDLE &&
5155             i915->csr.dmc_payload) {
5156                 intel_display_power_flush_work(i915);
5157                 intel_power_domains_verify_state(i915);
5158                 return;
5159         }
5160
5161         /*
5162          * Even if power well support was disabled we still want to disable
5163          * power wells if power domains must be deinitialized for suspend.
5164          */
5165         if (!i915_modparams.disable_power_well)
5166                 intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
5167
5168         intel_display_power_flush_work(i915);
5169         intel_power_domains_verify_state(i915);
5170
5171         if (INTEL_GEN(i915) >= 11)
5172                 icl_display_core_uninit(i915);
5173         else if (IS_CANNONLAKE(i915))
5174                 cnl_display_core_uninit(i915);
5175         else if (IS_GEN9_BC(i915))
5176                 skl_display_core_uninit(i915);
5177         else if (IS_GEN9_LP(i915))
5178                 bxt_display_core_uninit(i915);
5179
5180         power_domains->display_core_suspended = true;
5181 }
5182
5183 /**
5184  * intel_power_domains_resume - resume power domain state
5185  * @i915: i915 device instance
5186  *
5187  * This function resume the hardware power domain state during system resume.
5188  *
5189  * It will return with power domain support disabled (to be enabled later by
5190  * intel_power_domains_enable()) and must be paired with
5191  * intel_power_domains_suspend().
5192  */
5193 void intel_power_domains_resume(struct drm_i915_private *i915)
5194 {
5195         struct i915_power_domains *power_domains = &i915->power_domains;
5196
5197         if (power_domains->display_core_suspended) {
5198                 intel_power_domains_init_hw(i915, true);
5199                 power_domains->display_core_suspended = false;
5200         } else {
5201                 WARN_ON(power_domains->wakeref);
5202                 power_domains->wakeref =
5203                         intel_display_power_get(i915, POWER_DOMAIN_INIT);
5204         }
5205
5206         intel_power_domains_verify_state(i915);
5207 }
5208
5209 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
5210
5211 static void intel_power_domains_dump_info(struct drm_i915_private *i915)
5212 {
5213         struct i915_power_domains *power_domains = &i915->power_domains;
5214         struct i915_power_well *power_well;
5215
5216         for_each_power_well(i915, power_well) {
5217                 enum intel_display_power_domain domain;
5218
5219                 DRM_DEBUG_DRIVER("%-25s %d\n",
5220                                  power_well->desc->name, power_well->count);
5221
5222                 for_each_power_domain(domain, power_well->desc->domains)
5223                         DRM_DEBUG_DRIVER("  %-23s %d\n",
5224                                          intel_display_power_domain_str(domain),
5225                                          power_domains->domain_use_count[domain]);
5226         }
5227 }
5228
5229 /**
5230  * intel_power_domains_verify_state - verify the HW/SW state for all power wells
5231  * @i915: i915 device instance
5232  *
5233  * Verify if the reference count of each power well matches its HW enabled
5234  * state and the total refcount of the domains it belongs to. This must be
5235  * called after modeset HW state sanitization, which is responsible for
5236  * acquiring reference counts for any power wells in use and disabling the
5237  * ones left on by BIOS but not required by any active output.
5238  */
5239 static void intel_power_domains_verify_state(struct drm_i915_private *i915)
5240 {
5241         struct i915_power_domains *power_domains = &i915->power_domains;
5242         struct i915_power_well *power_well;
5243         bool dump_domain_info;
5244
5245         mutex_lock(&power_domains->lock);
5246
5247         verify_async_put_domains_state(power_domains);
5248
5249         dump_domain_info = false;
5250         for_each_power_well(i915, power_well) {
5251                 enum intel_display_power_domain domain;
5252                 int domains_count;
5253                 bool enabled;
5254
5255                 enabled = power_well->desc->ops->is_enabled(i915, power_well);
5256                 if ((power_well->count || power_well->desc->always_on) !=
5257                     enabled)
5258                         DRM_ERROR("power well %s state mismatch (refcount %d/enabled %d)",
5259                                   power_well->desc->name,
5260                                   power_well->count, enabled);
5261
5262                 domains_count = 0;
5263                 for_each_power_domain(domain, power_well->desc->domains)
5264                         domains_count += power_domains->domain_use_count[domain];
5265
5266                 if (power_well->count != domains_count) {
5267                         DRM_ERROR("power well %s refcount/domain refcount mismatch "
5268                                   "(refcount %d/domains refcount %d)\n",
5269                                   power_well->desc->name, power_well->count,
5270                                   domains_count);
5271                         dump_domain_info = true;
5272                 }
5273         }
5274
5275         if (dump_domain_info) {
5276                 static bool dumped;
5277
5278                 if (!dumped) {
5279                         intel_power_domains_dump_info(i915);
5280                         dumped = true;
5281                 }
5282         }
5283
5284         mutex_unlock(&power_domains->lock);
5285 }
5286
5287 #else
5288
5289 static void intel_power_domains_verify_state(struct drm_i915_private *i915)
5290 {
5291 }
5292
5293 #endif
5294
5295 void intel_display_power_suspend_late(struct drm_i915_private *i915)
5296 {
5297         if (INTEL_GEN(i915) >= 11 || IS_GEN9_LP(i915))
5298                 bxt_enable_dc9(i915);
5299         else if (IS_HASWELL(i915) || IS_BROADWELL(i915))
5300                 hsw_enable_pc8(i915);
5301 }
5302
5303 void intel_display_power_resume_early(struct drm_i915_private *i915)
5304 {
5305         if (INTEL_GEN(i915) >= 11 || IS_GEN9_LP(i915)) {
5306                 gen9_sanitize_dc_state(i915);
5307                 bxt_disable_dc9(i915);
5308         } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
5309                 hsw_disable_pc8(i915);
5310         }
5311 }
5312
5313 void intel_display_power_suspend(struct drm_i915_private *i915)
5314 {
5315         if (INTEL_GEN(i915) >= 11) {
5316                 icl_display_core_uninit(i915);
5317                 bxt_enable_dc9(i915);
5318         } else if (IS_GEN9_LP(i915)) {
5319                 bxt_display_core_uninit(i915);
5320                 bxt_enable_dc9(i915);
5321         } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
5322                 hsw_enable_pc8(i915);
5323         }
5324 }
5325
5326 void intel_display_power_resume(struct drm_i915_private *i915)
5327 {
5328         if (INTEL_GEN(i915) >= 11) {
5329                 bxt_disable_dc9(i915);
5330                 icl_display_core_init(i915, true);
5331                 if (i915->csr.dmc_payload) {
5332                         if (i915->csr.allowed_dc_mask &
5333                             DC_STATE_EN_UPTO_DC6)
5334                                 skl_enable_dc6(i915);
5335                         else if (i915->csr.allowed_dc_mask &
5336                                  DC_STATE_EN_UPTO_DC5)
5337                                 gen9_enable_dc5(i915);
5338                 }
5339         } else if (IS_GEN9_LP(i915)) {
5340                 bxt_disable_dc9(i915);
5341                 bxt_display_core_init(i915, true);
5342                 if (i915->csr.dmc_payload &&
5343                     (i915->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
5344                         gen9_enable_dc5(i915);
5345         } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
5346                 hsw_disable_pc8(i915);
5347         }
5348 }