drm/i915: Use single set of AUX powerwell ops for gen11+
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / display / intel_display_power.c
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5
6 #include "display/intel_crt.h"
7 #include "display/intel_dp.h"
8
9 #include "i915_drv.h"
10 #include "i915_irq.h"
11 #include "intel_cdclk.h"
12 #include "intel_combo_phy.h"
13 #include "intel_csr.h"
14 #include "intel_display_power.h"
15 #include "intel_display_types.h"
16 #include "intel_dpio_phy.h"
17 #include "intel_hotplug.h"
18 #include "intel_pm.h"
19 #include "intel_sideband.h"
20 #include "intel_tc.h"
21 #include "intel_vga.h"
22
23 static const struct i915_power_well_ops icl_tc_phy_aux_power_well_ops;
24
25 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
26                                          enum i915_power_well_id power_well_id);
27
28 const char *
29 intel_display_power_domain_str(enum intel_display_power_domain domain)
30 {
31         switch (domain) {
32         case POWER_DOMAIN_DISPLAY_CORE:
33                 return "DISPLAY_CORE";
34         case POWER_DOMAIN_PIPE_A:
35                 return "PIPE_A";
36         case POWER_DOMAIN_PIPE_B:
37                 return "PIPE_B";
38         case POWER_DOMAIN_PIPE_C:
39                 return "PIPE_C";
40         case POWER_DOMAIN_PIPE_D:
41                 return "PIPE_D";
42         case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
43                 return "PIPE_A_PANEL_FITTER";
44         case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
45                 return "PIPE_B_PANEL_FITTER";
46         case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
47                 return "PIPE_C_PANEL_FITTER";
48         case POWER_DOMAIN_PIPE_D_PANEL_FITTER:
49                 return "PIPE_D_PANEL_FITTER";
50         case POWER_DOMAIN_TRANSCODER_A:
51                 return "TRANSCODER_A";
52         case POWER_DOMAIN_TRANSCODER_B:
53                 return "TRANSCODER_B";
54         case POWER_DOMAIN_TRANSCODER_C:
55                 return "TRANSCODER_C";
56         case POWER_DOMAIN_TRANSCODER_D:
57                 return "TRANSCODER_D";
58         case POWER_DOMAIN_TRANSCODER_EDP:
59                 return "TRANSCODER_EDP";
60         case POWER_DOMAIN_TRANSCODER_VDSC_PW2:
61                 return "TRANSCODER_VDSC_PW2";
62         case POWER_DOMAIN_TRANSCODER_DSI_A:
63                 return "TRANSCODER_DSI_A";
64         case POWER_DOMAIN_TRANSCODER_DSI_C:
65                 return "TRANSCODER_DSI_C";
66         case POWER_DOMAIN_PORT_DDI_A_LANES:
67                 return "PORT_DDI_A_LANES";
68         case POWER_DOMAIN_PORT_DDI_B_LANES:
69                 return "PORT_DDI_B_LANES";
70         case POWER_DOMAIN_PORT_DDI_C_LANES:
71                 return "PORT_DDI_C_LANES";
72         case POWER_DOMAIN_PORT_DDI_D_LANES:
73                 return "PORT_DDI_D_LANES";
74         case POWER_DOMAIN_PORT_DDI_E_LANES:
75                 return "PORT_DDI_E_LANES";
76         case POWER_DOMAIN_PORT_DDI_F_LANES:
77                 return "PORT_DDI_F_LANES";
78         case POWER_DOMAIN_PORT_DDI_G_LANES:
79                 return "PORT_DDI_G_LANES";
80         case POWER_DOMAIN_PORT_DDI_H_LANES:
81                 return "PORT_DDI_H_LANES";
82         case POWER_DOMAIN_PORT_DDI_I_LANES:
83                 return "PORT_DDI_I_LANES";
84         case POWER_DOMAIN_PORT_DDI_A_IO:
85                 return "PORT_DDI_A_IO";
86         case POWER_DOMAIN_PORT_DDI_B_IO:
87                 return "PORT_DDI_B_IO";
88         case POWER_DOMAIN_PORT_DDI_C_IO:
89                 return "PORT_DDI_C_IO";
90         case POWER_DOMAIN_PORT_DDI_D_IO:
91                 return "PORT_DDI_D_IO";
92         case POWER_DOMAIN_PORT_DDI_E_IO:
93                 return "PORT_DDI_E_IO";
94         case POWER_DOMAIN_PORT_DDI_F_IO:
95                 return "PORT_DDI_F_IO";
96         case POWER_DOMAIN_PORT_DDI_G_IO:
97                 return "PORT_DDI_G_IO";
98         case POWER_DOMAIN_PORT_DDI_H_IO:
99                 return "PORT_DDI_H_IO";
100         case POWER_DOMAIN_PORT_DDI_I_IO:
101                 return "PORT_DDI_I_IO";
102         case POWER_DOMAIN_PORT_DSI:
103                 return "PORT_DSI";
104         case POWER_DOMAIN_PORT_CRT:
105                 return "PORT_CRT";
106         case POWER_DOMAIN_PORT_OTHER:
107                 return "PORT_OTHER";
108         case POWER_DOMAIN_VGA:
109                 return "VGA";
110         case POWER_DOMAIN_AUDIO:
111                 return "AUDIO";
112         case POWER_DOMAIN_AUX_A:
113                 return "AUX_A";
114         case POWER_DOMAIN_AUX_B:
115                 return "AUX_B";
116         case POWER_DOMAIN_AUX_C:
117                 return "AUX_C";
118         case POWER_DOMAIN_AUX_D:
119                 return "AUX_D";
120         case POWER_DOMAIN_AUX_E:
121                 return "AUX_E";
122         case POWER_DOMAIN_AUX_F:
123                 return "AUX_F";
124         case POWER_DOMAIN_AUX_G:
125                 return "AUX_G";
126         case POWER_DOMAIN_AUX_H:
127                 return "AUX_H";
128         case POWER_DOMAIN_AUX_I:
129                 return "AUX_I";
130         case POWER_DOMAIN_AUX_IO_A:
131                 return "AUX_IO_A";
132         case POWER_DOMAIN_AUX_C_TBT:
133                 return "AUX_C_TBT";
134         case POWER_DOMAIN_AUX_D_TBT:
135                 return "AUX_D_TBT";
136         case POWER_DOMAIN_AUX_E_TBT:
137                 return "AUX_E_TBT";
138         case POWER_DOMAIN_AUX_F_TBT:
139                 return "AUX_F_TBT";
140         case POWER_DOMAIN_AUX_G_TBT:
141                 return "AUX_G_TBT";
142         case POWER_DOMAIN_AUX_H_TBT:
143                 return "AUX_H_TBT";
144         case POWER_DOMAIN_AUX_I_TBT:
145                 return "AUX_I_TBT";
146         case POWER_DOMAIN_GMBUS:
147                 return "GMBUS";
148         case POWER_DOMAIN_INIT:
149                 return "INIT";
150         case POWER_DOMAIN_MODESET:
151                 return "MODESET";
152         case POWER_DOMAIN_GT_IRQ:
153                 return "GT_IRQ";
154         case POWER_DOMAIN_DPLL_DC_OFF:
155                 return "DPLL_DC_OFF";
156         case POWER_DOMAIN_TC_COLD_OFF:
157                 return "TC_COLD_OFF";
158         default:
159                 MISSING_CASE(domain);
160                 return "?";
161         }
162 }
163
164 static void intel_power_well_enable(struct drm_i915_private *dev_priv,
165                                     struct i915_power_well *power_well)
166 {
167         drm_dbg_kms(&dev_priv->drm, "enabling %s\n", power_well->desc->name);
168         power_well->desc->ops->enable(dev_priv, power_well);
169         power_well->hw_enabled = true;
170 }
171
172 static void intel_power_well_disable(struct drm_i915_private *dev_priv,
173                                      struct i915_power_well *power_well)
174 {
175         drm_dbg_kms(&dev_priv->drm, "disabling %s\n", power_well->desc->name);
176         power_well->hw_enabled = false;
177         power_well->desc->ops->disable(dev_priv, power_well);
178 }
179
180 static void intel_power_well_get(struct drm_i915_private *dev_priv,
181                                  struct i915_power_well *power_well)
182 {
183         if (!power_well->count++)
184                 intel_power_well_enable(dev_priv, power_well);
185 }
186
187 static void intel_power_well_put(struct drm_i915_private *dev_priv,
188                                  struct i915_power_well *power_well)
189 {
190         drm_WARN(&dev_priv->drm, !power_well->count,
191                  "Use count on power well %s is already zero",
192                  power_well->desc->name);
193
194         if (!--power_well->count)
195                 intel_power_well_disable(dev_priv, power_well);
196 }
197
198 /**
199  * __intel_display_power_is_enabled - unlocked check for a power domain
200  * @dev_priv: i915 device instance
201  * @domain: power domain to check
202  *
203  * This is the unlocked version of intel_display_power_is_enabled() and should
204  * only be used from error capture and recovery code where deadlocks are
205  * possible.
206  *
207  * Returns:
208  * True when the power domain is enabled, false otherwise.
209  */
210 bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
211                                       enum intel_display_power_domain domain)
212 {
213         struct i915_power_well *power_well;
214         bool is_enabled;
215
216         if (dev_priv->runtime_pm.suspended)
217                 return false;
218
219         is_enabled = true;
220
221         for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) {
222                 if (power_well->desc->always_on)
223                         continue;
224
225                 if (!power_well->hw_enabled) {
226                         is_enabled = false;
227                         break;
228                 }
229         }
230
231         return is_enabled;
232 }
233
234 /**
235  * intel_display_power_is_enabled - check for a power domain
236  * @dev_priv: i915 device instance
237  * @domain: power domain to check
238  *
239  * This function can be used to check the hw power domain state. It is mostly
240  * used in hardware state readout functions. Everywhere else code should rely
241  * upon explicit power domain reference counting to ensure that the hardware
242  * block is powered up before accessing it.
243  *
244  * Callers must hold the relevant modesetting locks to ensure that concurrent
245  * threads can't disable the power well while the caller tries to read a few
246  * registers.
247  *
248  * Returns:
249  * True when the power domain is enabled, false otherwise.
250  */
251 bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
252                                     enum intel_display_power_domain domain)
253 {
254         struct i915_power_domains *power_domains;
255         bool ret;
256
257         power_domains = &dev_priv->power_domains;
258
259         mutex_lock(&power_domains->lock);
260         ret = __intel_display_power_is_enabled(dev_priv, domain);
261         mutex_unlock(&power_domains->lock);
262
263         return ret;
264 }
265
266 /*
267  * Starting with Haswell, we have a "Power Down Well" that can be turned off
268  * when not needed anymore. We have 4 registers that can request the power well
269  * to be enabled, and it will only be disabled if none of the registers is
270  * requesting it to be enabled.
271  */
272 static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv,
273                                        u8 irq_pipe_mask, bool has_vga)
274 {
275         if (has_vga)
276                 intel_vga_reset_io_mem(dev_priv);
277
278         if (irq_pipe_mask)
279                 gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask);
280 }
281
282 static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv,
283                                        u8 irq_pipe_mask)
284 {
285         if (irq_pipe_mask)
286                 gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask);
287 }
288
289 #define ICL_AUX_PW_TO_CH(pw_idx)        \
290         ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A)
291
292 #define ICL_TBT_AUX_PW_TO_CH(pw_idx)    \
293         ((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C)
294
295 static enum aux_ch icl_tc_phy_aux_ch(struct drm_i915_private *dev_priv,
296                                      struct i915_power_well *power_well)
297 {
298         int pw_idx = power_well->desc->hsw.idx;
299
300         return power_well->desc->hsw.is_tc_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) :
301                                                  ICL_AUX_PW_TO_CH(pw_idx);
302 }
303
304 static struct intel_digital_port *
305 aux_ch_to_digital_port(struct drm_i915_private *dev_priv,
306                        enum aux_ch aux_ch)
307 {
308         struct intel_digital_port *dig_port = NULL;
309         struct intel_encoder *encoder;
310
311         for_each_intel_encoder(&dev_priv->drm, encoder) {
312                 /* We'll check the MST primary port */
313                 if (encoder->type == INTEL_OUTPUT_DP_MST)
314                         continue;
315
316                 dig_port = enc_to_dig_port(encoder);
317                 if (!dig_port)
318                         continue;
319
320                 if (dig_port->aux_ch != aux_ch) {
321                         dig_port = NULL;
322                         continue;
323                 }
324
325                 break;
326         }
327
328         return dig_port;
329 }
330
331 static bool tc_phy_aux_timeout_expected(struct drm_i915_private *dev_priv,
332                                         struct i915_power_well *power_well)
333 {
334         /* An AUX timeout is expected if the TBT DP tunnel is down. */
335         if (power_well->desc->hsw.is_tc_tbt)
336                 return true;
337
338         /*
339          * An AUX timeout is expected because we enable TC legacy port aux
340          * to hold port out of TC cold
341          */
342         if (INTEL_GEN(dev_priv) == 11 &&
343             power_well->desc->ops == &icl_tc_phy_aux_power_well_ops) {
344                 enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
345                 struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch);
346
347                 return dig_port->tc_legacy_port;
348         }
349
350         return false;
351 }
352
353 static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
354                                            struct i915_power_well *power_well)
355 {
356         const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
357         int pw_idx = power_well->desc->hsw.idx;
358
359         /* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
360         if (intel_de_wait_for_set(dev_priv, regs->driver,
361                                   HSW_PWR_WELL_CTL_STATE(pw_idx), 1)) {
362                 drm_dbg_kms(&dev_priv->drm, "%s power well enable timeout\n",
363                             power_well->desc->name);
364
365                 drm_WARN_ON(&dev_priv->drm,
366                             !tc_phy_aux_timeout_expected(dev_priv, power_well));
367
368         }
369 }
370
371 static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv,
372                                      const struct i915_power_well_regs *regs,
373                                      int pw_idx)
374 {
375         u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
376         u32 ret;
377
378         ret = intel_de_read(dev_priv, regs->bios) & req_mask ? 1 : 0;
379         ret |= intel_de_read(dev_priv, regs->driver) & req_mask ? 2 : 0;
380         if (regs->kvmr.reg)
381                 ret |= intel_de_read(dev_priv, regs->kvmr) & req_mask ? 4 : 0;
382         ret |= intel_de_read(dev_priv, regs->debug) & req_mask ? 8 : 0;
383
384         return ret;
385 }
386
387 static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv,
388                                             struct i915_power_well *power_well)
389 {
390         const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
391         int pw_idx = power_well->desc->hsw.idx;
392         bool disabled;
393         u32 reqs;
394
395         /*
396          * Bspec doesn't require waiting for PWs to get disabled, but still do
397          * this for paranoia. The known cases where a PW will be forced on:
398          * - a KVMR request on any power well via the KVMR request register
399          * - a DMC request on PW1 and MISC_IO power wells via the BIOS and
400          *   DEBUG request registers
401          * Skip the wait in case any of the request bits are set and print a
402          * diagnostic message.
403          */
404         wait_for((disabled = !(intel_de_read(dev_priv, regs->driver) &
405                                HSW_PWR_WELL_CTL_STATE(pw_idx))) ||
406                  (reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1);
407         if (disabled)
408                 return;
409
410         drm_dbg_kms(&dev_priv->drm,
411                     "%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n",
412                     power_well->desc->name,
413                     !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8));
414 }
415
416 static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv,
417                                            enum skl_power_gate pg)
418 {
419         /* Timeout 5us for PG#0, for other PGs 1us */
420         drm_WARN_ON(&dev_priv->drm,
421                     intel_de_wait_for_set(dev_priv, SKL_FUSE_STATUS,
422                                           SKL_FUSE_PG_DIST_STATUS(pg), 1));
423 }
424
425 static void hsw_power_well_enable_prepare(struct drm_i915_private *dev_priv,
426                                           struct i915_power_well *power_well)
427 {
428         const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
429         int pw_idx = power_well->desc->hsw.idx;
430         u32 val;
431
432         if (power_well->desc->hsw.has_fuses) {
433                 enum skl_power_gate pg;
434
435                 pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
436                                                  SKL_PW_CTL_IDX_TO_PG(pw_idx);
437                 /*
438                  * For PW1 we have to wait both for the PW0/PG0 fuse state
439                  * before enabling the power well and PW1/PG1's own fuse
440                  * state after the enabling. For all other power wells with
441                  * fuses we only have to wait for that PW/PG's fuse state
442                  * after the enabling.
443                  */
444                 if (pg == SKL_PG1)
445                         gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0);
446         }
447
448         val = intel_de_read(dev_priv, regs->driver);
449         intel_de_write(dev_priv, regs->driver,
450                        val | HSW_PWR_WELL_CTL_REQ(pw_idx));
451 }
452
453 static void hsw_power_well_enable_complete(struct drm_i915_private *dev_priv,
454                                            struct i915_power_well *power_well)
455 {
456         int pw_idx = power_well->desc->hsw.idx;
457
458         hsw_wait_for_power_well_enable(dev_priv, power_well);
459
460         /* Display WA #1178: cnl */
461         if (IS_CANNONLAKE(dev_priv) &&
462             pw_idx >= GLK_PW_CTL_IDX_AUX_B &&
463             pw_idx <= CNL_PW_CTL_IDX_AUX_F) {
464                 u32 val;
465
466                 val = intel_de_read(dev_priv, CNL_AUX_ANAOVRD1(pw_idx));
467                 val |= CNL_AUX_ANAOVRD1_ENABLE | CNL_AUX_ANAOVRD1_LDO_BYPASS;
468                 intel_de_write(dev_priv, CNL_AUX_ANAOVRD1(pw_idx), val);
469         }
470
471         if (power_well->desc->hsw.has_fuses) {
472                 enum skl_power_gate pg;
473
474                 pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
475                                                  SKL_PW_CTL_IDX_TO_PG(pw_idx);
476                 gen9_wait_for_power_well_fuses(dev_priv, pg);
477         }
478
479         hsw_power_well_post_enable(dev_priv,
480                                    power_well->desc->hsw.irq_pipe_mask,
481                                    power_well->desc->hsw.has_vga);
482 }
483
484 static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
485                                   struct i915_power_well *power_well)
486 {
487         hsw_power_well_enable_prepare(dev_priv, power_well);
488         hsw_power_well_enable_complete(dev_priv, power_well);
489 }
490
491 static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
492                                    struct i915_power_well *power_well)
493 {
494         const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
495         int pw_idx = power_well->desc->hsw.idx;
496         u32 val;
497
498         hsw_power_well_pre_disable(dev_priv,
499                                    power_well->desc->hsw.irq_pipe_mask);
500
501         val = intel_de_read(dev_priv, regs->driver);
502         intel_de_write(dev_priv, regs->driver,
503                        val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
504         hsw_wait_for_power_well_disable(dev_priv, power_well);
505 }
506
507 #define ICL_AUX_PW_TO_PHY(pw_idx)       ((pw_idx) - ICL_PW_CTL_IDX_AUX_A)
508
509 static void
510 icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
511                                     struct i915_power_well *power_well)
512 {
513         const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
514         int pw_idx = power_well->desc->hsw.idx;
515         enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx);
516         u32 val;
517
518         drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
519
520         val = intel_de_read(dev_priv, regs->driver);
521         intel_de_write(dev_priv, regs->driver,
522                        val | HSW_PWR_WELL_CTL_REQ(pw_idx));
523
524         if (INTEL_GEN(dev_priv) < 12) {
525                 val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy));
526                 intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy),
527                                val | ICL_LANE_ENABLE_AUX);
528         }
529
530         hsw_wait_for_power_well_enable(dev_priv, power_well);
531
532         /* Display WA #1178: icl */
533         if (pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B &&
534             !intel_bios_is_port_edp(dev_priv, (enum port)phy)) {
535                 val = intel_de_read(dev_priv, ICL_AUX_ANAOVRD1(pw_idx));
536                 val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS;
537                 intel_de_write(dev_priv, ICL_AUX_ANAOVRD1(pw_idx), val);
538         }
539 }
540
541 static void
542 icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
543                                      struct i915_power_well *power_well)
544 {
545         const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
546         int pw_idx = power_well->desc->hsw.idx;
547         enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx);
548         u32 val;
549
550         drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
551
552         val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy));
553         intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy),
554                        val & ~ICL_LANE_ENABLE_AUX);
555
556         val = intel_de_read(dev_priv, regs->driver);
557         intel_de_write(dev_priv, regs->driver,
558                        val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
559
560         hsw_wait_for_power_well_disable(dev_priv, power_well);
561 }
562
563 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
564
565 static u64 async_put_domains_mask(struct i915_power_domains *power_domains);
566
567 static int power_well_async_ref_count(struct drm_i915_private *dev_priv,
568                                       struct i915_power_well *power_well)
569 {
570         int refs = hweight64(power_well->desc->domains &
571                              async_put_domains_mask(&dev_priv->power_domains));
572
573         drm_WARN_ON(&dev_priv->drm, refs > power_well->count);
574
575         return refs;
576 }
577
578 static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
579                                         struct i915_power_well *power_well,
580                                         struct intel_digital_port *dig_port)
581 {
582         /* Bypass the check if all references are released asynchronously */
583         if (power_well_async_ref_count(dev_priv, power_well) ==
584             power_well->count)
585                 return;
586
587         if (drm_WARN_ON(&dev_priv->drm, !dig_port))
588                 return;
589
590         if (INTEL_GEN(dev_priv) == 11 && dig_port->tc_legacy_port)
591                 return;
592
593         drm_WARN_ON(&dev_priv->drm, !intel_tc_port_ref_held(dig_port));
594 }
595
596 #else
597
598 static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
599                                         struct i915_power_well *power_well,
600                                         struct intel_digital_port *dig_port)
601 {
602 }
603
604 #endif
605
606 #define TGL_AUX_PW_TO_TC_PORT(pw_idx)   ((pw_idx) - TGL_PW_CTL_IDX_AUX_TC1)
607
608 static void icl_tc_cold_exit(struct drm_i915_private *i915)
609 {
610         int ret, tries = 0;
611
612         while (1) {
613                 ret = sandybridge_pcode_write_timeout(i915,
614                                                       ICL_PCODE_EXIT_TCCOLD,
615                                                       0, 250, 1);
616                 if (ret != -EAGAIN || ++tries == 3)
617                         break;
618                 msleep(1);
619         }
620
621         /* Spec states that TC cold exit can take up to 1ms to complete */
622         if (!ret)
623                 msleep(1);
624
625         /* TODO: turn failure into a error as soon i915 CI updates ICL IFWI */
626         drm_dbg_kms(&i915->drm, "TC cold block %s\n", ret ? "failed" :
627                     "succeeded");
628 }
629
630 static void
631 icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
632                                  struct i915_power_well *power_well)
633 {
634         enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
635         struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch);
636         u32 val;
637
638         icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port);
639
640         val = intel_de_read(dev_priv, DP_AUX_CH_CTL(aux_ch));
641         val &= ~DP_AUX_CH_CTL_TBT_IO;
642         if (power_well->desc->hsw.is_tc_tbt)
643                 val |= DP_AUX_CH_CTL_TBT_IO;
644         intel_de_write(dev_priv, DP_AUX_CH_CTL(aux_ch), val);
645
646         hsw_power_well_enable_prepare(dev_priv, power_well);
647
648         if (INTEL_GEN(dev_priv) == 11 && dig_port->tc_legacy_port)
649                 icl_tc_cold_exit(dev_priv);
650
651         hsw_power_well_enable_complete(dev_priv, power_well);
652
653         if (INTEL_GEN(dev_priv) >= 12 && !power_well->desc->hsw.is_tc_tbt) {
654                 enum tc_port tc_port;
655
656                 tc_port = TGL_AUX_PW_TO_TC_PORT(power_well->desc->hsw.idx);
657                 intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
658                                HIP_INDEX_VAL(tc_port, 0x2));
659
660                 if (intel_de_wait_for_set(dev_priv, DKL_CMN_UC_DW_27(tc_port),
661                                           DKL_CMN_UC_DW27_UC_HEALTH, 1))
662                         drm_warn(&dev_priv->drm,
663                                  "Timeout waiting TC uC health\n");
664         }
665 }
666
667 static void
668 icl_tc_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
669                                   struct i915_power_well *power_well)
670 {
671         enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
672         struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch);
673
674         icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port);
675
676         hsw_power_well_disable(dev_priv, power_well);
677 }
678
679 static void
680 icl_aux_power_well_enable(struct drm_i915_private *dev_priv,
681                           struct i915_power_well *power_well)
682 {
683         int pw_idx = power_well->desc->hsw.idx;
684         enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx);  /* non-TBT only */
685         bool is_tbt = power_well->desc->hsw.is_tc_tbt;
686
687         if (is_tbt || intel_phy_is_tc(dev_priv, phy))
688                 return icl_tc_phy_aux_power_well_enable(dev_priv, power_well);
689         else if (IS_ICELAKE(dev_priv))
690                 return icl_combo_phy_aux_power_well_enable(dev_priv,
691                                                            power_well);
692         else
693                 return hsw_power_well_enable(dev_priv, power_well);
694 }
695
696 static void
697 icl_aux_power_well_disable(struct drm_i915_private *dev_priv,
698                            struct i915_power_well *power_well)
699 {
700         int pw_idx = power_well->desc->hsw.idx;
701         enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx);  /* non-TBT only */
702         bool is_tbt = power_well->desc->hsw.is_tc_tbt;
703
704         if (is_tbt || intel_phy_is_tc(dev_priv, phy))
705                 return icl_tc_phy_aux_power_well_disable(dev_priv, power_well);
706         else if (IS_ICELAKE(dev_priv))
707                 return icl_combo_phy_aux_power_well_disable(dev_priv,
708                                                             power_well);
709         else
710                 return hsw_power_well_disable(dev_priv, power_well);
711 }
712
713 /*
714  * We should only use the power well if we explicitly asked the hardware to
715  * enable it, so check if it's enabled and also check if we've requested it to
716  * be enabled.
717  */
718 static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
719                                    struct i915_power_well *power_well)
720 {
721         const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
722         enum i915_power_well_id id = power_well->desc->id;
723         int pw_idx = power_well->desc->hsw.idx;
724         u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) |
725                    HSW_PWR_WELL_CTL_STATE(pw_idx);
726         u32 val;
727
728         val = intel_de_read(dev_priv, regs->driver);
729
730         /*
731          * On GEN9 big core due to a DMC bug the driver's request bits for PW1
732          * and the MISC_IO PW will be not restored, so check instead for the
733          * BIOS's own request bits, which are forced-on for these power wells
734          * when exiting DC5/6.
735          */
736         if (IS_GEN(dev_priv, 9) && !IS_GEN9_LP(dev_priv) &&
737             (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO))
738                 val |= intel_de_read(dev_priv, regs->bios);
739
740         return (val & mask) == mask;
741 }
742
743 static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
744 {
745         drm_WARN_ONCE(&dev_priv->drm,
746                       (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC9),
747                       "DC9 already programmed to be enabled.\n");
748         drm_WARN_ONCE(&dev_priv->drm,
749                       intel_de_read(dev_priv, DC_STATE_EN) &
750                       DC_STATE_EN_UPTO_DC5,
751                       "DC5 still not disabled to enable DC9.\n");
752         drm_WARN_ONCE(&dev_priv->drm,
753                       intel_de_read(dev_priv, HSW_PWR_WELL_CTL2) &
754                       HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2),
755                       "Power well 2 on.\n");
756         drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv),
757                       "Interrupts not disabled yet.\n");
758
759          /*
760           * TODO: check for the following to verify the conditions to enter DC9
761           * state are satisfied:
762           * 1] Check relevant display engine registers to verify if mode set
763           * disable sequence was followed.
764           * 2] Check if display uninitialize sequence is initialized.
765           */
766 }
767
768 static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
769 {
770         drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv),
771                       "Interrupts not disabled yet.\n");
772         drm_WARN_ONCE(&dev_priv->drm,
773                       intel_de_read(dev_priv, DC_STATE_EN) &
774                       DC_STATE_EN_UPTO_DC5,
775                       "DC5 still not disabled.\n");
776
777          /*
778           * TODO: check for the following to verify DC9 state was indeed
779           * entered before programming to disable it:
780           * 1] Check relevant display engine registers to verify if mode
781           *  set disable sequence was followed.
782           * 2] Check if display uninitialize sequence is initialized.
783           */
784 }
785
786 static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
787                                 u32 state)
788 {
789         int rewrites = 0;
790         int rereads = 0;
791         u32 v;
792
793         intel_de_write(dev_priv, DC_STATE_EN, state);
794
795         /* It has been observed that disabling the dc6 state sometimes
796          * doesn't stick and dmc keeps returning old value. Make sure
797          * the write really sticks enough times and also force rewrite until
798          * we are confident that state is exactly what we want.
799          */
800         do  {
801                 v = intel_de_read(dev_priv, DC_STATE_EN);
802
803                 if (v != state) {
804                         intel_de_write(dev_priv, DC_STATE_EN, state);
805                         rewrites++;
806                         rereads = 0;
807                 } else if (rereads++ > 5) {
808                         break;
809                 }
810
811         } while (rewrites < 100);
812
813         if (v != state)
814                 drm_err(&dev_priv->drm,
815                         "Writing dc state to 0x%x failed, now 0x%x\n",
816                         state, v);
817
818         /* Most of the times we need one retry, avoid spam */
819         if (rewrites > 1)
820                 drm_dbg_kms(&dev_priv->drm,
821                             "Rewrote dc state to 0x%x %d times\n",
822                             state, rewrites);
823 }
824
825 static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
826 {
827         u32 mask;
828
829         mask = DC_STATE_EN_UPTO_DC5;
830
831         if (INTEL_GEN(dev_priv) >= 12)
832                 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6
833                                           | DC_STATE_EN_DC9;
834         else if (IS_GEN(dev_priv, 11))
835                 mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9;
836         else if (IS_GEN9_LP(dev_priv))
837                 mask |= DC_STATE_EN_DC9;
838         else
839                 mask |= DC_STATE_EN_UPTO_DC6;
840
841         return mask;
842 }
843
844 static void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
845 {
846         u32 val;
847
848         val = intel_de_read(dev_priv, DC_STATE_EN) & gen9_dc_mask(dev_priv);
849
850         drm_dbg_kms(&dev_priv->drm,
851                     "Resetting DC state tracking from %02x to %02x\n",
852                     dev_priv->csr.dc_state, val);
853         dev_priv->csr.dc_state = val;
854 }
855
856 /**
857  * gen9_set_dc_state - set target display C power state
858  * @dev_priv: i915 device instance
859  * @state: target DC power state
860  * - DC_STATE_DISABLE
861  * - DC_STATE_EN_UPTO_DC5
862  * - DC_STATE_EN_UPTO_DC6
863  * - DC_STATE_EN_DC9
864  *
865  * Signal to DMC firmware/HW the target DC power state passed in @state.
866  * DMC/HW can turn off individual display clocks and power rails when entering
867  * a deeper DC power state (higher in number) and turns these back when exiting
868  * that state to a shallower power state (lower in number). The HW will decide
869  * when to actually enter a given state on an on-demand basis, for instance
870  * depending on the active state of display pipes. The state of display
871  * registers backed by affected power rails are saved/restored as needed.
872  *
873  * Based on the above enabling a deeper DC power state is asynchronous wrt.
874  * enabling it. Disabling a deeper power state is synchronous: for instance
875  * setting %DC_STATE_DISABLE won't complete until all HW resources are turned
876  * back on and register state is restored. This is guaranteed by the MMIO write
877  * to DC_STATE_EN blocking until the state is restored.
878  */
879 static void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
880 {
881         u32 val;
882         u32 mask;
883
884         if (drm_WARN_ON_ONCE(&dev_priv->drm,
885                              state & ~dev_priv->csr.allowed_dc_mask))
886                 state &= dev_priv->csr.allowed_dc_mask;
887
888         val = intel_de_read(dev_priv, DC_STATE_EN);
889         mask = gen9_dc_mask(dev_priv);
890         drm_dbg_kms(&dev_priv->drm, "Setting DC state from %02x to %02x\n",
891                     val & mask, state);
892
893         /* Check if DMC is ignoring our DC state requests */
894         if ((val & mask) != dev_priv->csr.dc_state)
895                 drm_err(&dev_priv->drm, "DC state mismatch (0x%x -> 0x%x)\n",
896                         dev_priv->csr.dc_state, val & mask);
897
898         val &= ~mask;
899         val |= state;
900
901         gen9_write_dc_state(dev_priv, val);
902
903         dev_priv->csr.dc_state = val & mask;
904 }
905
906 static u32
907 sanitize_target_dc_state(struct drm_i915_private *dev_priv,
908                          u32 target_dc_state)
909 {
910         u32 states[] = {
911                 DC_STATE_EN_UPTO_DC6,
912                 DC_STATE_EN_UPTO_DC5,
913                 DC_STATE_EN_DC3CO,
914                 DC_STATE_DISABLE,
915         };
916         int i;
917
918         for (i = 0; i < ARRAY_SIZE(states) - 1; i++) {
919                 if (target_dc_state != states[i])
920                         continue;
921
922                 if (dev_priv->csr.allowed_dc_mask & target_dc_state)
923                         break;
924
925                 target_dc_state = states[i + 1];
926         }
927
928         return target_dc_state;
929 }
930
931 static void tgl_enable_dc3co(struct drm_i915_private *dev_priv)
932 {
933         drm_dbg_kms(&dev_priv->drm, "Enabling DC3CO\n");
934         gen9_set_dc_state(dev_priv, DC_STATE_EN_DC3CO);
935 }
936
937 static void tgl_disable_dc3co(struct drm_i915_private *dev_priv)
938 {
939         u32 val;
940
941         drm_dbg_kms(&dev_priv->drm, "Disabling DC3CO\n");
942         val = intel_de_read(dev_priv, DC_STATE_EN);
943         val &= ~DC_STATE_DC3CO_STATUS;
944         intel_de_write(dev_priv, DC_STATE_EN, val);
945         gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
946         /*
947          * Delay of 200us DC3CO Exit time B.Spec 49196
948          */
949         usleep_range(200, 210);
950 }
951
952 static void bxt_enable_dc9(struct drm_i915_private *dev_priv)
953 {
954         assert_can_enable_dc9(dev_priv);
955
956         drm_dbg_kms(&dev_priv->drm, "Enabling DC9\n");
957         /*
958          * Power sequencer reset is not needed on
959          * platforms with South Display Engine on PCH,
960          * because PPS registers are always on.
961          */
962         if (!HAS_PCH_SPLIT(dev_priv))
963                 intel_power_sequencer_reset(dev_priv);
964         gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
965 }
966
967 static void bxt_disable_dc9(struct drm_i915_private *dev_priv)
968 {
969         assert_can_disable_dc9(dev_priv);
970
971         drm_dbg_kms(&dev_priv->drm, "Disabling DC9\n");
972
973         gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
974
975         intel_pps_unlock_regs_wa(dev_priv);
976 }
977
978 static void assert_csr_loaded(struct drm_i915_private *dev_priv)
979 {
980         drm_WARN_ONCE(&dev_priv->drm,
981                       !intel_de_read(dev_priv, CSR_PROGRAM(0)),
982                       "CSR program storage start is NULL\n");
983         drm_WARN_ONCE(&dev_priv->drm, !intel_de_read(dev_priv, CSR_SSP_BASE),
984                       "CSR SSP Base Not fine\n");
985         drm_WARN_ONCE(&dev_priv->drm, !intel_de_read(dev_priv, CSR_HTP_SKL),
986                       "CSR HTP Not fine\n");
987 }
988
989 static struct i915_power_well *
990 lookup_power_well(struct drm_i915_private *dev_priv,
991                   enum i915_power_well_id power_well_id)
992 {
993         struct i915_power_well *power_well;
994
995         for_each_power_well(dev_priv, power_well)
996                 if (power_well->desc->id == power_well_id)
997                         return power_well;
998
999         /*
1000          * It's not feasible to add error checking code to the callers since
1001          * this condition really shouldn't happen and it doesn't even make sense
1002          * to abort things like display initialization sequences. Just return
1003          * the first power well and hope the WARN gets reported so we can fix
1004          * our driver.
1005          */
1006         drm_WARN(&dev_priv->drm, 1,
1007                  "Power well %d not defined for this platform\n",
1008                  power_well_id);
1009         return &dev_priv->power_domains.power_wells[0];
1010 }
1011
1012 /**
1013  * intel_display_power_set_target_dc_state - Set target dc state.
1014  * @dev_priv: i915 device
1015  * @state: state which needs to be set as target_dc_state.
1016  *
1017  * This function set the "DC off" power well target_dc_state,
1018  * based upon this target_dc_stste, "DC off" power well will
1019  * enable desired DC state.
1020  */
1021 void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv,
1022                                              u32 state)
1023 {
1024         struct i915_power_well *power_well;
1025         bool dc_off_enabled;
1026         struct i915_power_domains *power_domains = &dev_priv->power_domains;
1027
1028         mutex_lock(&power_domains->lock);
1029         power_well = lookup_power_well(dev_priv, SKL_DISP_DC_OFF);
1030
1031         if (drm_WARN_ON(&dev_priv->drm, !power_well))
1032                 goto unlock;
1033
1034         state = sanitize_target_dc_state(dev_priv, state);
1035
1036         if (state == dev_priv->csr.target_dc_state)
1037                 goto unlock;
1038
1039         dc_off_enabled = power_well->desc->ops->is_enabled(dev_priv,
1040                                                            power_well);
1041         /*
1042          * If DC off power well is disabled, need to enable and disable the
1043          * DC off power well to effect target DC state.
1044          */
1045         if (!dc_off_enabled)
1046                 power_well->desc->ops->enable(dev_priv, power_well);
1047
1048         dev_priv->csr.target_dc_state = state;
1049
1050         if (!dc_off_enabled)
1051                 power_well->desc->ops->disable(dev_priv, power_well);
1052
1053 unlock:
1054         mutex_unlock(&power_domains->lock);
1055 }
1056
1057 static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
1058 {
1059         enum i915_power_well_id high_pg;
1060
1061         /* Power wells at this level and above must be disabled for DC5 entry */
1062         if (INTEL_GEN(dev_priv) >= 12)
1063                 high_pg = ICL_DISP_PW_3;
1064         else
1065                 high_pg = SKL_DISP_PW_2;
1066
1067         drm_WARN_ONCE(&dev_priv->drm,
1068                       intel_display_power_well_is_enabled(dev_priv, high_pg),
1069                       "Power wells above platform's DC5 limit still enabled.\n");
1070
1071         drm_WARN_ONCE(&dev_priv->drm,
1072                       (intel_de_read(dev_priv, DC_STATE_EN) &
1073                        DC_STATE_EN_UPTO_DC5),
1074                       "DC5 already programmed to be enabled.\n");
1075         assert_rpm_wakelock_held(&dev_priv->runtime_pm);
1076
1077         assert_csr_loaded(dev_priv);
1078 }
1079
1080 static void gen9_enable_dc5(struct drm_i915_private *dev_priv)
1081 {
1082         assert_can_enable_dc5(dev_priv);
1083
1084         drm_dbg_kms(&dev_priv->drm, "Enabling DC5\n");
1085
1086         /* Wa Display #1183: skl,kbl,cfl */
1087         if (IS_GEN9_BC(dev_priv))
1088                 intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1,
1089                                intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT);
1090
1091         gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
1092 }
1093
1094 static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
1095 {
1096         drm_WARN_ONCE(&dev_priv->drm,
1097                       intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
1098                       "Backlight is not disabled.\n");
1099         drm_WARN_ONCE(&dev_priv->drm,
1100                       (intel_de_read(dev_priv, DC_STATE_EN) &
1101                        DC_STATE_EN_UPTO_DC6),
1102                       "DC6 already programmed to be enabled.\n");
1103
1104         assert_csr_loaded(dev_priv);
1105 }
1106
1107 static void skl_enable_dc6(struct drm_i915_private *dev_priv)
1108 {
1109         assert_can_enable_dc6(dev_priv);
1110
1111         drm_dbg_kms(&dev_priv->drm, "Enabling DC6\n");
1112
1113         /* Wa Display #1183: skl,kbl,cfl */
1114         if (IS_GEN9_BC(dev_priv))
1115                 intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1,
1116                                intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT);
1117
1118         gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
1119 }
1120
1121 static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
1122                                    struct i915_power_well *power_well)
1123 {
1124         const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
1125         int pw_idx = power_well->desc->hsw.idx;
1126         u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
1127         u32 bios_req = intel_de_read(dev_priv, regs->bios);
1128
1129         /* Take over the request bit if set by BIOS. */
1130         if (bios_req & mask) {
1131                 u32 drv_req = intel_de_read(dev_priv, regs->driver);
1132
1133                 if (!(drv_req & mask))
1134                         intel_de_write(dev_priv, regs->driver, drv_req | mask);
1135                 intel_de_write(dev_priv, regs->bios, bios_req & ~mask);
1136         }
1137 }
1138
1139 static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1140                                            struct i915_power_well *power_well)
1141 {
1142         bxt_ddi_phy_init(dev_priv, power_well->desc->bxt.phy);
1143 }
1144
1145 static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1146                                             struct i915_power_well *power_well)
1147 {
1148         bxt_ddi_phy_uninit(dev_priv, power_well->desc->bxt.phy);
1149 }
1150
1151 static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
1152                                             struct i915_power_well *power_well)
1153 {
1154         return bxt_ddi_phy_is_enabled(dev_priv, power_well->desc->bxt.phy);
1155 }
1156
1157 static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
1158 {
1159         struct i915_power_well *power_well;
1160
1161         power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A);
1162         if (power_well->count > 0)
1163                 bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
1164
1165         power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
1166         if (power_well->count > 0)
1167                 bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
1168
1169         if (IS_GEMINILAKE(dev_priv)) {
1170                 power_well = lookup_power_well(dev_priv,
1171                                                GLK_DISP_PW_DPIO_CMN_C);
1172                 if (power_well->count > 0)
1173                         bxt_ddi_phy_verify_state(dev_priv,
1174                                                  power_well->desc->bxt.phy);
1175         }
1176 }
1177
1178 static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
1179                                            struct i915_power_well *power_well)
1180 {
1181         return ((intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC3CO) == 0 &&
1182                 (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0);
1183 }
1184
1185 static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
1186 {
1187         u8 hw_enabled_dbuf_slices = intel_enabled_dbuf_slices_mask(dev_priv);
1188         u8 enabled_dbuf_slices = dev_priv->enabled_dbuf_slices_mask;
1189
1190         drm_WARN(&dev_priv->drm,
1191                  hw_enabled_dbuf_slices != enabled_dbuf_slices,
1192                  "Unexpected DBuf power power state (0x%08x, expected 0x%08x)\n",
1193                  hw_enabled_dbuf_slices,
1194                  enabled_dbuf_slices);
1195 }
1196
1197 static void gen9_disable_dc_states(struct drm_i915_private *dev_priv)
1198 {
1199         struct intel_cdclk_config cdclk_config = {};
1200
1201         if (dev_priv->csr.target_dc_state == DC_STATE_EN_DC3CO) {
1202                 tgl_disable_dc3co(dev_priv);
1203                 return;
1204         }
1205
1206         gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
1207
1208         dev_priv->display.get_cdclk(dev_priv, &cdclk_config);
1209         /* Can't read out voltage_level so can't use intel_cdclk_changed() */
1210         drm_WARN_ON(&dev_priv->drm,
1211                     intel_cdclk_needs_modeset(&dev_priv->cdclk.hw,
1212                                               &cdclk_config));
1213
1214         gen9_assert_dbuf_enabled(dev_priv);
1215
1216         if (IS_GEN9_LP(dev_priv))
1217                 bxt_verify_ddi_phy_power_wells(dev_priv);
1218
1219         if (INTEL_GEN(dev_priv) >= 11)
1220                 /*
1221                  * DMC retains HW context only for port A, the other combo
1222                  * PHY's HW context for port B is lost after DC transitions,
1223                  * so we need to restore it manually.
1224                  */
1225                 intel_combo_phy_init(dev_priv);
1226 }
1227
1228 static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
1229                                           struct i915_power_well *power_well)
1230 {
1231         gen9_disable_dc_states(dev_priv);
1232 }
1233
1234 static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
1235                                            struct i915_power_well *power_well)
1236 {
1237         if (!dev_priv->csr.dmc_payload)
1238                 return;
1239
1240         switch (dev_priv->csr.target_dc_state) {
1241         case DC_STATE_EN_DC3CO:
1242                 tgl_enable_dc3co(dev_priv);
1243                 break;
1244         case DC_STATE_EN_UPTO_DC6:
1245                 skl_enable_dc6(dev_priv);
1246                 break;
1247         case DC_STATE_EN_UPTO_DC5:
1248                 gen9_enable_dc5(dev_priv);
1249                 break;
1250         }
1251 }
1252
1253 static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv,
1254                                          struct i915_power_well *power_well)
1255 {
1256 }
1257
1258 static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
1259                                            struct i915_power_well *power_well)
1260 {
1261 }
1262
1263 static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
1264                                              struct i915_power_well *power_well)
1265 {
1266         return true;
1267 }
1268
1269 static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv,
1270                                          struct i915_power_well *power_well)
1271 {
1272         if ((intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0)
1273                 i830_enable_pipe(dev_priv, PIPE_A);
1274         if ((intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0)
1275                 i830_enable_pipe(dev_priv, PIPE_B);
1276 }
1277
1278 static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv,
1279                                           struct i915_power_well *power_well)
1280 {
1281         i830_disable_pipe(dev_priv, PIPE_B);
1282         i830_disable_pipe(dev_priv, PIPE_A);
1283 }
1284
1285 static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv,
1286                                           struct i915_power_well *power_well)
1287 {
1288         return intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE &&
1289                 intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
1290 }
1291
1292 static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv,
1293                                           struct i915_power_well *power_well)
1294 {
1295         if (power_well->count > 0)
1296                 i830_pipes_power_well_enable(dev_priv, power_well);
1297         else
1298                 i830_pipes_power_well_disable(dev_priv, power_well);
1299 }
1300
1301 static void vlv_set_power_well(struct drm_i915_private *dev_priv,
1302                                struct i915_power_well *power_well, bool enable)
1303 {
1304         int pw_idx = power_well->desc->vlv.idx;
1305         u32 mask;
1306         u32 state;
1307         u32 ctrl;
1308
1309         mask = PUNIT_PWRGT_MASK(pw_idx);
1310         state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) :
1311                          PUNIT_PWRGT_PWR_GATE(pw_idx);
1312
1313         vlv_punit_get(dev_priv);
1314
1315 #define COND \
1316         ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
1317
1318         if (COND)
1319                 goto out;
1320
1321         ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
1322         ctrl &= ~mask;
1323         ctrl |= state;
1324         vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
1325
1326         if (wait_for(COND, 100))
1327                 drm_err(&dev_priv->drm,
1328                         "timeout setting power well state %08x (%08x)\n",
1329                         state,
1330                         vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
1331
1332 #undef COND
1333
1334 out:
1335         vlv_punit_put(dev_priv);
1336 }
1337
1338 static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
1339                                   struct i915_power_well *power_well)
1340 {
1341         vlv_set_power_well(dev_priv, power_well, true);
1342 }
1343
1344 static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
1345                                    struct i915_power_well *power_well)
1346 {
1347         vlv_set_power_well(dev_priv, power_well, false);
1348 }
1349
1350 static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
1351                                    struct i915_power_well *power_well)
1352 {
1353         int pw_idx = power_well->desc->vlv.idx;
1354         bool enabled = false;
1355         u32 mask;
1356         u32 state;
1357         u32 ctrl;
1358
1359         mask = PUNIT_PWRGT_MASK(pw_idx);
1360         ctrl = PUNIT_PWRGT_PWR_ON(pw_idx);
1361
1362         vlv_punit_get(dev_priv);
1363
1364         state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
1365         /*
1366          * We only ever set the power-on and power-gate states, anything
1367          * else is unexpected.
1368          */
1369         drm_WARN_ON(&dev_priv->drm, state != PUNIT_PWRGT_PWR_ON(pw_idx) &&
1370                     state != PUNIT_PWRGT_PWR_GATE(pw_idx));
1371         if (state == ctrl)
1372                 enabled = true;
1373
1374         /*
1375          * A transient state at this point would mean some unexpected party
1376          * is poking at the power controls too.
1377          */
1378         ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
1379         drm_WARN_ON(&dev_priv->drm, ctrl != state);
1380
1381         vlv_punit_put(dev_priv);
1382
1383         return enabled;
1384 }
1385
1386 static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
1387 {
1388         u32 val;
1389
1390         /*
1391          * On driver load, a pipe may be active and driving a DSI display.
1392          * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
1393          * (and never recovering) in this case. intel_dsi_post_disable() will
1394          * clear it when we turn off the display.
1395          */
1396         val = intel_de_read(dev_priv, DSPCLK_GATE_D);
1397         val &= DPOUNIT_CLOCK_GATE_DISABLE;
1398         val |= VRHUNIT_CLOCK_GATE_DISABLE;
1399         intel_de_write(dev_priv, DSPCLK_GATE_D, val);
1400
1401         /*
1402          * Disable trickle feed and enable pnd deadline calculation
1403          */
1404         intel_de_write(dev_priv, MI_ARB_VLV,
1405                        MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
1406         intel_de_write(dev_priv, CBR1_VLV, 0);
1407
1408         drm_WARN_ON(&dev_priv->drm, RUNTIME_INFO(dev_priv)->rawclk_freq == 0);
1409         intel_de_write(dev_priv, RAWCLK_FREQ_VLV,
1410                        DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq,
1411                                          1000));
1412 }
1413
1414 static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
1415 {
1416         struct intel_encoder *encoder;
1417         enum pipe pipe;
1418
1419         /*
1420          * Enable the CRI clock source so we can get at the
1421          * display and the reference clock for VGA
1422          * hotplug / manual detection. Supposedly DSI also
1423          * needs the ref clock up and running.
1424          *
1425          * CHV DPLL B/C have some issues if VGA mode is enabled.
1426          */
1427         for_each_pipe(dev_priv, pipe) {
1428                 u32 val = intel_de_read(dev_priv, DPLL(pipe));
1429
1430                 val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1431                 if (pipe != PIPE_A)
1432                         val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1433
1434                 intel_de_write(dev_priv, DPLL(pipe), val);
1435         }
1436
1437         vlv_init_display_clock_gating(dev_priv);
1438
1439         spin_lock_irq(&dev_priv->irq_lock);
1440         valleyview_enable_display_irqs(dev_priv);
1441         spin_unlock_irq(&dev_priv->irq_lock);
1442
1443         /*
1444          * During driver initialization/resume we can avoid restoring the
1445          * part of the HW/SW state that will be inited anyway explicitly.
1446          */
1447         if (dev_priv->power_domains.initializing)
1448                 return;
1449
1450         intel_hpd_init(dev_priv);
1451
1452         /* Re-enable the ADPA, if we have one */
1453         for_each_intel_encoder(&dev_priv->drm, encoder) {
1454                 if (encoder->type == INTEL_OUTPUT_ANALOG)
1455                         intel_crt_reset(&encoder->base);
1456         }
1457
1458         intel_vga_redisable_power_on(dev_priv);
1459
1460         intel_pps_unlock_regs_wa(dev_priv);
1461 }
1462
1463 static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
1464 {
1465         spin_lock_irq(&dev_priv->irq_lock);
1466         valleyview_disable_display_irqs(dev_priv);
1467         spin_unlock_irq(&dev_priv->irq_lock);
1468
1469         /* make sure we're done processing display irqs */
1470         intel_synchronize_irq(dev_priv);
1471
1472         intel_power_sequencer_reset(dev_priv);
1473
1474         /* Prevent us from re-enabling polling on accident in late suspend */
1475         if (!dev_priv->drm.dev->power.is_suspended)
1476                 intel_hpd_poll_init(dev_priv);
1477 }
1478
1479 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
1480                                           struct i915_power_well *power_well)
1481 {
1482         vlv_set_power_well(dev_priv, power_well, true);
1483
1484         vlv_display_power_well_init(dev_priv);
1485 }
1486
1487 static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
1488                                            struct i915_power_well *power_well)
1489 {
1490         vlv_display_power_well_deinit(dev_priv);
1491
1492         vlv_set_power_well(dev_priv, power_well, false);
1493 }
1494
1495 static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1496                                            struct i915_power_well *power_well)
1497 {
1498         /* since ref/cri clock was enabled */
1499         udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1500
1501         vlv_set_power_well(dev_priv, power_well, true);
1502
1503         /*
1504          * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
1505          *  6.  De-assert cmn_reset/side_reset. Same as VLV X0.
1506          *   a. GUnit 0x2110 bit[0] set to 1 (def 0)
1507          *   b. The other bits such as sfr settings / modesel may all
1508          *      be set to 0.
1509          *
1510          * This should only be done on init and resume from S3 with
1511          * both PLLs disabled, or we risk losing DPIO and PLL
1512          * synchronization.
1513          */
1514         intel_de_write(dev_priv, DPIO_CTL,
1515                        intel_de_read(dev_priv, DPIO_CTL) | DPIO_CMNRST);
1516 }
1517
1518 static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1519                                             struct i915_power_well *power_well)
1520 {
1521         enum pipe pipe;
1522
1523         for_each_pipe(dev_priv, pipe)
1524                 assert_pll_disabled(dev_priv, pipe);
1525
1526         /* Assert common reset */
1527         intel_de_write(dev_priv, DPIO_CTL,
1528                        intel_de_read(dev_priv, DPIO_CTL) & ~DPIO_CMNRST);
1529
1530         vlv_set_power_well(dev_priv, power_well, false);
1531 }
1532
1533 #define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0))
1534
1535 #define BITS_SET(val, bits) (((val) & (bits)) == (bits))
1536
1537 static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
1538 {
1539         struct i915_power_well *cmn_bc =
1540                 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
1541         struct i915_power_well *cmn_d =
1542                 lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
1543         u32 phy_control = dev_priv->chv_phy_control;
1544         u32 phy_status = 0;
1545         u32 phy_status_mask = 0xffffffff;
1546
1547         /*
1548          * The BIOS can leave the PHY is some weird state
1549          * where it doesn't fully power down some parts.
1550          * Disable the asserts until the PHY has been fully
1551          * reset (ie. the power well has been disabled at
1552          * least once).
1553          */
1554         if (!dev_priv->chv_phy_assert[DPIO_PHY0])
1555                 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
1556                                      PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
1557                                      PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
1558                                      PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
1559                                      PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
1560                                      PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
1561
1562         if (!dev_priv->chv_phy_assert[DPIO_PHY1])
1563                 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
1564                                      PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
1565                                      PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
1566
1567         if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
1568                 phy_status |= PHY_POWERGOOD(DPIO_PHY0);
1569
1570                 /* this assumes override is only used to enable lanes */
1571                 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
1572                         phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
1573
1574                 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
1575                         phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
1576
1577                 /* CL1 is on whenever anything is on in either channel */
1578                 if (BITS_SET(phy_control,
1579                              PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
1580                              PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
1581                         phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
1582
1583                 /*
1584                  * The DPLLB check accounts for the pipe B + port A usage
1585                  * with CL2 powered up but all the lanes in the second channel
1586                  * powered down.
1587                  */
1588                 if (BITS_SET(phy_control,
1589                              PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
1590                     (intel_de_read(dev_priv, DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
1591                         phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
1592
1593                 if (BITS_SET(phy_control,
1594                              PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
1595                         phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
1596                 if (BITS_SET(phy_control,
1597                              PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
1598                         phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
1599
1600                 if (BITS_SET(phy_control,
1601                              PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
1602                         phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
1603                 if (BITS_SET(phy_control,
1604                              PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
1605                         phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
1606         }
1607
1608         if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
1609                 phy_status |= PHY_POWERGOOD(DPIO_PHY1);
1610
1611                 /* this assumes override is only used to enable lanes */
1612                 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
1613                         phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
1614
1615                 if (BITS_SET(phy_control,
1616                              PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
1617                         phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
1618
1619                 if (BITS_SET(phy_control,
1620                              PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
1621                         phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
1622                 if (BITS_SET(phy_control,
1623                              PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
1624                         phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
1625         }
1626
1627         phy_status &= phy_status_mask;
1628
1629         /*
1630          * The PHY may be busy with some initial calibration and whatnot,
1631          * so the power state can take a while to actually change.
1632          */
1633         if (intel_de_wait_for_register(dev_priv, DISPLAY_PHY_STATUS,
1634                                        phy_status_mask, phy_status, 10))
1635                 drm_err(&dev_priv->drm,
1636                         "Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
1637                         intel_de_read(dev_priv, DISPLAY_PHY_STATUS) & phy_status_mask,
1638                         phy_status, dev_priv->chv_phy_control);
1639 }
1640
1641 #undef BITS_SET
1642
1643 static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1644                                            struct i915_power_well *power_well)
1645 {
1646         enum dpio_phy phy;
1647         enum pipe pipe;
1648         u32 tmp;
1649
1650         drm_WARN_ON_ONCE(&dev_priv->drm,
1651                          power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
1652                          power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
1653
1654         if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1655                 pipe = PIPE_A;
1656                 phy = DPIO_PHY0;
1657         } else {
1658                 pipe = PIPE_C;
1659                 phy = DPIO_PHY1;
1660         }
1661
1662         /* since ref/cri clock was enabled */
1663         udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1664         vlv_set_power_well(dev_priv, power_well, true);
1665
1666         /* Poll for phypwrgood signal */
1667         if (intel_de_wait_for_set(dev_priv, DISPLAY_PHY_STATUS,
1668                                   PHY_POWERGOOD(phy), 1))
1669                 drm_err(&dev_priv->drm, "Display PHY %d is not power up\n",
1670                         phy);
1671
1672         vlv_dpio_get(dev_priv);
1673
1674         /* Enable dynamic power down */
1675         tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
1676         tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
1677                 DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
1678         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
1679
1680         if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1681                 tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
1682                 tmp |= DPIO_DYNPWRDOWNEN_CH1;
1683                 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
1684         } else {
1685                 /*
1686                  * Force the non-existing CL2 off. BXT does this
1687                  * too, so maybe it saves some power even though
1688                  * CL2 doesn't exist?
1689                  */
1690                 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
1691                 tmp |= DPIO_CL2_LDOFUSE_PWRENB;
1692                 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
1693         }
1694
1695         vlv_dpio_put(dev_priv);
1696
1697         dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
1698         intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1699                        dev_priv->chv_phy_control);
1700
1701         drm_dbg_kms(&dev_priv->drm,
1702                     "Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1703                     phy, dev_priv->chv_phy_control);
1704
1705         assert_chv_phy_status(dev_priv);
1706 }
1707
1708 static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1709                                             struct i915_power_well *power_well)
1710 {
1711         enum dpio_phy phy;
1712
1713         drm_WARN_ON_ONCE(&dev_priv->drm,
1714                          power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
1715                          power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
1716
1717         if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1718                 phy = DPIO_PHY0;
1719                 assert_pll_disabled(dev_priv, PIPE_A);
1720                 assert_pll_disabled(dev_priv, PIPE_B);
1721         } else {
1722                 phy = DPIO_PHY1;
1723                 assert_pll_disabled(dev_priv, PIPE_C);
1724         }
1725
1726         dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
1727         intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1728                        dev_priv->chv_phy_control);
1729
1730         vlv_set_power_well(dev_priv, power_well, false);
1731
1732         drm_dbg_kms(&dev_priv->drm,
1733                     "Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1734                     phy, dev_priv->chv_phy_control);
1735
1736         /* PHY is fully reset now, so we can enable the PHY state asserts */
1737         dev_priv->chv_phy_assert[phy] = true;
1738
1739         assert_chv_phy_status(dev_priv);
1740 }
1741
1742 static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1743                                      enum dpio_channel ch, bool override, unsigned int mask)
1744 {
1745         enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
1746         u32 reg, val, expected, actual;
1747
1748         /*
1749          * The BIOS can leave the PHY is some weird state
1750          * where it doesn't fully power down some parts.
1751          * Disable the asserts until the PHY has been fully
1752          * reset (ie. the power well has been disabled at
1753          * least once).
1754          */
1755         if (!dev_priv->chv_phy_assert[phy])
1756                 return;
1757
1758         if (ch == DPIO_CH0)
1759                 reg = _CHV_CMN_DW0_CH0;
1760         else
1761                 reg = _CHV_CMN_DW6_CH1;
1762
1763         vlv_dpio_get(dev_priv);
1764         val = vlv_dpio_read(dev_priv, pipe, reg);
1765         vlv_dpio_put(dev_priv);
1766
1767         /*
1768          * This assumes !override is only used when the port is disabled.
1769          * All lanes should power down even without the override when
1770          * the port is disabled.
1771          */
1772         if (!override || mask == 0xf) {
1773                 expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1774                 /*
1775                  * If CH1 common lane is not active anymore
1776                  * (eg. for pipe B DPLL) the entire channel will
1777                  * shut down, which causes the common lane registers
1778                  * to read as 0. That means we can't actually check
1779                  * the lane power down status bits, but as the entire
1780                  * register reads as 0 it's a good indication that the
1781                  * channel is indeed entirely powered down.
1782                  */
1783                 if (ch == DPIO_CH1 && val == 0)
1784                         expected = 0;
1785         } else if (mask != 0x0) {
1786                 expected = DPIO_ANYDL_POWERDOWN;
1787         } else {
1788                 expected = 0;
1789         }
1790
1791         if (ch == DPIO_CH0)
1792                 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0;
1793         else
1794                 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
1795         actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1796
1797         drm_WARN(&dev_priv->drm, actual != expected,
1798                  "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
1799                  !!(actual & DPIO_ALLDL_POWERDOWN),
1800                  !!(actual & DPIO_ANYDL_POWERDOWN),
1801                  !!(expected & DPIO_ALLDL_POWERDOWN),
1802                  !!(expected & DPIO_ANYDL_POWERDOWN),
1803                  reg, val);
1804 }
1805
1806 bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1807                           enum dpio_channel ch, bool override)
1808 {
1809         struct i915_power_domains *power_domains = &dev_priv->power_domains;
1810         bool was_override;
1811
1812         mutex_lock(&power_domains->lock);
1813
1814         was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1815
1816         if (override == was_override)
1817                 goto out;
1818
1819         if (override)
1820                 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1821         else
1822                 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1823
1824         intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1825                        dev_priv->chv_phy_control);
1826
1827         drm_dbg_kms(&dev_priv->drm,
1828                     "Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
1829                     phy, ch, dev_priv->chv_phy_control);
1830
1831         assert_chv_phy_status(dev_priv);
1832
1833 out:
1834         mutex_unlock(&power_domains->lock);
1835
1836         return was_override;
1837 }
1838
1839 void chv_phy_powergate_lanes(struct intel_encoder *encoder,
1840                              bool override, unsigned int mask)
1841 {
1842         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1843         struct i915_power_domains *power_domains = &dev_priv->power_domains;
1844         enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(encoder));
1845         enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(encoder));
1846
1847         mutex_lock(&power_domains->lock);
1848
1849         dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
1850         dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
1851
1852         if (override)
1853                 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1854         else
1855                 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1856
1857         intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1858                        dev_priv->chv_phy_control);
1859
1860         drm_dbg_kms(&dev_priv->drm,
1861                     "Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
1862                     phy, ch, mask, dev_priv->chv_phy_control);
1863
1864         assert_chv_phy_status(dev_priv);
1865
1866         assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
1867
1868         mutex_unlock(&power_domains->lock);
1869 }
1870
1871 static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
1872                                         struct i915_power_well *power_well)
1873 {
1874         enum pipe pipe = PIPE_A;
1875         bool enabled;
1876         u32 state, ctrl;
1877
1878         vlv_punit_get(dev_priv);
1879
1880         state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe);
1881         /*
1882          * We only ever set the power-on and power-gate states, anything
1883          * else is unexpected.
1884          */
1885         drm_WARN_ON(&dev_priv->drm, state != DP_SSS_PWR_ON(pipe) &&
1886                     state != DP_SSS_PWR_GATE(pipe));
1887         enabled = state == DP_SSS_PWR_ON(pipe);
1888
1889         /*
1890          * A transient state at this point would mean some unexpected party
1891          * is poking at the power controls too.
1892          */
1893         ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe);
1894         drm_WARN_ON(&dev_priv->drm, ctrl << 16 != state);
1895
1896         vlv_punit_put(dev_priv);
1897
1898         return enabled;
1899 }
1900
1901 static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
1902                                     struct i915_power_well *power_well,
1903                                     bool enable)
1904 {
1905         enum pipe pipe = PIPE_A;
1906         u32 state;
1907         u32 ctrl;
1908
1909         state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
1910
1911         vlv_punit_get(dev_priv);
1912
1913 #define COND \
1914         ((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state)
1915
1916         if (COND)
1917                 goto out;
1918
1919         ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
1920         ctrl &= ~DP_SSC_MASK(pipe);
1921         ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
1922         vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl);
1923
1924         if (wait_for(COND, 100))
1925                 drm_err(&dev_priv->drm,
1926                         "timeout setting power well state %08x (%08x)\n",
1927                         state,
1928                         vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM));
1929
1930 #undef COND
1931
1932 out:
1933         vlv_punit_put(dev_priv);
1934 }
1935
1936 static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
1937                                         struct i915_power_well *power_well)
1938 {
1939         intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1940                        dev_priv->chv_phy_control);
1941 }
1942
1943 static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
1944                                        struct i915_power_well *power_well)
1945 {
1946         chv_set_pipe_power_well(dev_priv, power_well, true);
1947
1948         vlv_display_power_well_init(dev_priv);
1949 }
1950
1951 static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
1952                                         struct i915_power_well *power_well)
1953 {
1954         vlv_display_power_well_deinit(dev_priv);
1955
1956         chv_set_pipe_power_well(dev_priv, power_well, false);
1957 }
1958
1959 static u64 __async_put_domains_mask(struct i915_power_domains *power_domains)
1960 {
1961         return power_domains->async_put_domains[0] |
1962                power_domains->async_put_domains[1];
1963 }
1964
1965 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
1966
1967 static bool
1968 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
1969 {
1970         return !WARN_ON(power_domains->async_put_domains[0] &
1971                         power_domains->async_put_domains[1]);
1972 }
1973
1974 static bool
1975 __async_put_domains_state_ok(struct i915_power_domains *power_domains)
1976 {
1977         enum intel_display_power_domain domain;
1978         bool err = false;
1979
1980         err |= !assert_async_put_domain_masks_disjoint(power_domains);
1981         err |= WARN_ON(!!power_domains->async_put_wakeref !=
1982                        !!__async_put_domains_mask(power_domains));
1983
1984         for_each_power_domain(domain, __async_put_domains_mask(power_domains))
1985                 err |= WARN_ON(power_domains->domain_use_count[domain] != 1);
1986
1987         return !err;
1988 }
1989
1990 static void print_power_domains(struct i915_power_domains *power_domains,
1991                                 const char *prefix, u64 mask)
1992 {
1993         struct drm_i915_private *i915 = container_of(power_domains,
1994                                                      struct drm_i915_private,
1995                                                      power_domains);
1996         enum intel_display_power_domain domain;
1997
1998         drm_dbg(&i915->drm, "%s (%lu):\n", prefix, hweight64(mask));
1999         for_each_power_domain(domain, mask)
2000                 drm_dbg(&i915->drm, "%s use_count %d\n",
2001                         intel_display_power_domain_str(domain),
2002                         power_domains->domain_use_count[domain]);
2003 }
2004
2005 static void
2006 print_async_put_domains_state(struct i915_power_domains *power_domains)
2007 {
2008         struct drm_i915_private *i915 = container_of(power_domains,
2009                                                      struct drm_i915_private,
2010                                                      power_domains);
2011
2012         drm_dbg(&i915->drm, "async_put_wakeref %u\n",
2013                 power_domains->async_put_wakeref);
2014
2015         print_power_domains(power_domains, "async_put_domains[0]",
2016                             power_domains->async_put_domains[0]);
2017         print_power_domains(power_domains, "async_put_domains[1]",
2018                             power_domains->async_put_domains[1]);
2019 }
2020
2021 static void
2022 verify_async_put_domains_state(struct i915_power_domains *power_domains)
2023 {
2024         if (!__async_put_domains_state_ok(power_domains))
2025                 print_async_put_domains_state(power_domains);
2026 }
2027
2028 #else
2029
2030 static void
2031 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
2032 {
2033 }
2034
2035 static void
2036 verify_async_put_domains_state(struct i915_power_domains *power_domains)
2037 {
2038 }
2039
2040 #endif /* CONFIG_DRM_I915_DEBUG_RUNTIME_PM */
2041
2042 static u64 async_put_domains_mask(struct i915_power_domains *power_domains)
2043 {
2044         assert_async_put_domain_masks_disjoint(power_domains);
2045
2046         return __async_put_domains_mask(power_domains);
2047 }
2048
2049 static void
2050 async_put_domains_clear_domain(struct i915_power_domains *power_domains,
2051                                enum intel_display_power_domain domain)
2052 {
2053         assert_async_put_domain_masks_disjoint(power_domains);
2054
2055         power_domains->async_put_domains[0] &= ~BIT_ULL(domain);
2056         power_domains->async_put_domains[1] &= ~BIT_ULL(domain);
2057 }
2058
2059 static bool
2060 intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv,
2061                                        enum intel_display_power_domain domain)
2062 {
2063         struct i915_power_domains *power_domains = &dev_priv->power_domains;
2064         bool ret = false;
2065
2066         if (!(async_put_domains_mask(power_domains) & BIT_ULL(domain)))
2067                 goto out_verify;
2068
2069         async_put_domains_clear_domain(power_domains, domain);
2070
2071         ret = true;
2072
2073         if (async_put_domains_mask(power_domains))
2074                 goto out_verify;
2075
2076         cancel_delayed_work(&power_domains->async_put_work);
2077         intel_runtime_pm_put_raw(&dev_priv->runtime_pm,
2078                                  fetch_and_zero(&power_domains->async_put_wakeref));
2079 out_verify:
2080         verify_async_put_domains_state(power_domains);
2081
2082         return ret;
2083 }
2084
2085 static void
2086 __intel_display_power_get_domain(struct drm_i915_private *dev_priv,
2087                                  enum intel_display_power_domain domain)
2088 {
2089         struct i915_power_domains *power_domains = &dev_priv->power_domains;
2090         struct i915_power_well *power_well;
2091
2092         if (intel_display_power_grab_async_put_ref(dev_priv, domain))
2093                 return;
2094
2095         for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain))
2096                 intel_power_well_get(dev_priv, power_well);
2097
2098         power_domains->domain_use_count[domain]++;
2099 }
2100
2101 /**
2102  * intel_display_power_get - grab a power domain reference
2103  * @dev_priv: i915 device instance
2104  * @domain: power domain to reference
2105  *
2106  * This function grabs a power domain reference for @domain and ensures that the
2107  * power domain and all its parents are powered up. Therefore users should only
2108  * grab a reference to the innermost power domain they need.
2109  *
2110  * Any power domain reference obtained by this function must have a symmetric
2111  * call to intel_display_power_put() to release the reference again.
2112  */
2113 intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
2114                                         enum intel_display_power_domain domain)
2115 {
2116         struct i915_power_domains *power_domains = &dev_priv->power_domains;
2117         intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2118
2119         mutex_lock(&power_domains->lock);
2120         __intel_display_power_get_domain(dev_priv, domain);
2121         mutex_unlock(&power_domains->lock);
2122
2123         return wakeref;
2124 }
2125
2126 /**
2127  * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
2128  * @dev_priv: i915 device instance
2129  * @domain: power domain to reference
2130  *
2131  * This function grabs a power domain reference for @domain and ensures that the
2132  * power domain and all its parents are powered up. Therefore users should only
2133  * grab a reference to the innermost power domain they need.
2134  *
2135  * Any power domain reference obtained by this function must have a symmetric
2136  * call to intel_display_power_put() to release the reference again.
2137  */
2138 intel_wakeref_t
2139 intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
2140                                    enum intel_display_power_domain domain)
2141 {
2142         struct i915_power_domains *power_domains = &dev_priv->power_domains;
2143         intel_wakeref_t wakeref;
2144         bool is_enabled;
2145
2146         wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm);
2147         if (!wakeref)
2148                 return false;
2149
2150         mutex_lock(&power_domains->lock);
2151
2152         if (__intel_display_power_is_enabled(dev_priv, domain)) {
2153                 __intel_display_power_get_domain(dev_priv, domain);
2154                 is_enabled = true;
2155         } else {
2156                 is_enabled = false;
2157         }
2158
2159         mutex_unlock(&power_domains->lock);
2160
2161         if (!is_enabled) {
2162                 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2163                 wakeref = 0;
2164         }
2165
2166         return wakeref;
2167 }
2168
2169 static void
2170 __intel_display_power_put_domain(struct drm_i915_private *dev_priv,
2171                                  enum intel_display_power_domain domain)
2172 {
2173         struct i915_power_domains *power_domains;
2174         struct i915_power_well *power_well;
2175         const char *name = intel_display_power_domain_str(domain);
2176
2177         power_domains = &dev_priv->power_domains;
2178
2179         drm_WARN(&dev_priv->drm, !power_domains->domain_use_count[domain],
2180                  "Use count on domain %s is already zero\n",
2181                  name);
2182         drm_WARN(&dev_priv->drm,
2183                  async_put_domains_mask(power_domains) & BIT_ULL(domain),
2184                  "Async disabling of domain %s is pending\n",
2185                  name);
2186
2187         power_domains->domain_use_count[domain]--;
2188
2189         for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain))
2190                 intel_power_well_put(dev_priv, power_well);
2191 }
2192
2193 static void __intel_display_power_put(struct drm_i915_private *dev_priv,
2194                                       enum intel_display_power_domain domain)
2195 {
2196         struct i915_power_domains *power_domains = &dev_priv->power_domains;
2197
2198         mutex_lock(&power_domains->lock);
2199         __intel_display_power_put_domain(dev_priv, domain);
2200         mutex_unlock(&power_domains->lock);
2201 }
2202
2203 /**
2204  * intel_display_power_put_unchecked - release an unchecked power domain reference
2205  * @dev_priv: i915 device instance
2206  * @domain: power domain to reference
2207  *
2208  * This function drops the power domain reference obtained by
2209  * intel_display_power_get() and might power down the corresponding hardware
2210  * block right away if this is the last reference.
2211  *
2212  * This function exists only for historical reasons and should be avoided in
2213  * new code, as the correctness of its use cannot be checked. Always use
2214  * intel_display_power_put() instead.
2215  */
2216 void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
2217                                        enum intel_display_power_domain domain)
2218 {
2219         __intel_display_power_put(dev_priv, domain);
2220         intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
2221 }
2222
2223 static void
2224 queue_async_put_domains_work(struct i915_power_domains *power_domains,
2225                              intel_wakeref_t wakeref)
2226 {
2227         WARN_ON(power_domains->async_put_wakeref);
2228         power_domains->async_put_wakeref = wakeref;
2229         WARN_ON(!queue_delayed_work(system_unbound_wq,
2230                                     &power_domains->async_put_work,
2231                                     msecs_to_jiffies(100)));
2232 }
2233
2234 static void
2235 release_async_put_domains(struct i915_power_domains *power_domains, u64 mask)
2236 {
2237         struct drm_i915_private *dev_priv =
2238                 container_of(power_domains, struct drm_i915_private,
2239                              power_domains);
2240         struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
2241         enum intel_display_power_domain domain;
2242         intel_wakeref_t wakeref;
2243
2244         /*
2245          * The caller must hold already raw wakeref, upgrade that to a proper
2246          * wakeref to make the state checker happy about the HW access during
2247          * power well disabling.
2248          */
2249         assert_rpm_raw_wakeref_held(rpm);
2250         wakeref = intel_runtime_pm_get(rpm);
2251
2252         for_each_power_domain(domain, mask) {
2253                 /* Clear before put, so put's sanity check is happy. */
2254                 async_put_domains_clear_domain(power_domains, domain);
2255                 __intel_display_power_put_domain(dev_priv, domain);
2256         }
2257
2258         intel_runtime_pm_put(rpm, wakeref);
2259 }
2260
2261 static void
2262 intel_display_power_put_async_work(struct work_struct *work)
2263 {
2264         struct drm_i915_private *dev_priv =
2265                 container_of(work, struct drm_i915_private,
2266                              power_domains.async_put_work.work);
2267         struct i915_power_domains *power_domains = &dev_priv->power_domains;
2268         struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
2269         intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm);
2270         intel_wakeref_t old_work_wakeref = 0;
2271
2272         mutex_lock(&power_domains->lock);
2273
2274         /*
2275          * Bail out if all the domain refs pending to be released were grabbed
2276          * by subsequent gets or a flush_work.
2277          */
2278         old_work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
2279         if (!old_work_wakeref)
2280                 goto out_verify;
2281
2282         release_async_put_domains(power_domains,
2283                                   power_domains->async_put_domains[0]);
2284
2285         /* Requeue the work if more domains were async put meanwhile. */
2286         if (power_domains->async_put_domains[1]) {
2287                 power_domains->async_put_domains[0] =
2288                         fetch_and_zero(&power_domains->async_put_domains[1]);
2289                 queue_async_put_domains_work(power_domains,
2290                                              fetch_and_zero(&new_work_wakeref));
2291         }
2292
2293 out_verify:
2294         verify_async_put_domains_state(power_domains);
2295
2296         mutex_unlock(&power_domains->lock);
2297
2298         if (old_work_wakeref)
2299                 intel_runtime_pm_put_raw(rpm, old_work_wakeref);
2300         if (new_work_wakeref)
2301                 intel_runtime_pm_put_raw(rpm, new_work_wakeref);
2302 }
2303
2304 /**
2305  * intel_display_power_put_async - release a power domain reference asynchronously
2306  * @i915: i915 device instance
2307  * @domain: power domain to reference
2308  * @wakeref: wakeref acquired for the reference that is being released
2309  *
2310  * This function drops the power domain reference obtained by
2311  * intel_display_power_get*() and schedules a work to power down the
2312  * corresponding hardware block if this is the last reference.
2313  */
2314 void __intel_display_power_put_async(struct drm_i915_private *i915,
2315                                      enum intel_display_power_domain domain,
2316                                      intel_wakeref_t wakeref)
2317 {
2318         struct i915_power_domains *power_domains = &i915->power_domains;
2319         struct intel_runtime_pm *rpm = &i915->runtime_pm;
2320         intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm);
2321
2322         mutex_lock(&power_domains->lock);
2323
2324         if (power_domains->domain_use_count[domain] > 1) {
2325                 __intel_display_power_put_domain(i915, domain);
2326
2327                 goto out_verify;
2328         }
2329
2330         drm_WARN_ON(&i915->drm, power_domains->domain_use_count[domain] != 1);
2331
2332         /* Let a pending work requeue itself or queue a new one. */
2333         if (power_domains->async_put_wakeref) {
2334                 power_domains->async_put_domains[1] |= BIT_ULL(domain);
2335         } else {
2336                 power_domains->async_put_domains[0] |= BIT_ULL(domain);
2337                 queue_async_put_domains_work(power_domains,
2338                                              fetch_and_zero(&work_wakeref));
2339         }
2340
2341 out_verify:
2342         verify_async_put_domains_state(power_domains);
2343
2344         mutex_unlock(&power_domains->lock);
2345
2346         if (work_wakeref)
2347                 intel_runtime_pm_put_raw(rpm, work_wakeref);
2348
2349         intel_runtime_pm_put(rpm, wakeref);
2350 }
2351
2352 /**
2353  * intel_display_power_flush_work - flushes the async display power disabling work
2354  * @i915: i915 device instance
2355  *
2356  * Flushes any pending work that was scheduled by a preceding
2357  * intel_display_power_put_async() call, completing the disabling of the
2358  * corresponding power domains.
2359  *
2360  * Note that the work handler function may still be running after this
2361  * function returns; to ensure that the work handler isn't running use
2362  * intel_display_power_flush_work_sync() instead.
2363  */
2364 void intel_display_power_flush_work(struct drm_i915_private *i915)
2365 {
2366         struct i915_power_domains *power_domains = &i915->power_domains;
2367         intel_wakeref_t work_wakeref;
2368
2369         mutex_lock(&power_domains->lock);
2370
2371         work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
2372         if (!work_wakeref)
2373                 goto out_verify;
2374
2375         release_async_put_domains(power_domains,
2376                                   async_put_domains_mask(power_domains));
2377         cancel_delayed_work(&power_domains->async_put_work);
2378
2379 out_verify:
2380         verify_async_put_domains_state(power_domains);
2381
2382         mutex_unlock(&power_domains->lock);
2383
2384         if (work_wakeref)
2385                 intel_runtime_pm_put_raw(&i915->runtime_pm, work_wakeref);
2386 }
2387
2388 /**
2389  * intel_display_power_flush_work_sync - flushes and syncs the async display power disabling work
2390  * @i915: i915 device instance
2391  *
2392  * Like intel_display_power_flush_work(), but also ensure that the work
2393  * handler function is not running any more when this function returns.
2394  */
2395 static void
2396 intel_display_power_flush_work_sync(struct drm_i915_private *i915)
2397 {
2398         struct i915_power_domains *power_domains = &i915->power_domains;
2399
2400         intel_display_power_flush_work(i915);
2401         cancel_delayed_work_sync(&power_domains->async_put_work);
2402
2403         verify_async_put_domains_state(power_domains);
2404
2405         drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref);
2406 }
2407
2408 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
2409 /**
2410  * intel_display_power_put - release a power domain reference
2411  * @dev_priv: i915 device instance
2412  * @domain: power domain to reference
2413  * @wakeref: wakeref acquired for the reference that is being released
2414  *
2415  * This function drops the power domain reference obtained by
2416  * intel_display_power_get() and might power down the corresponding hardware
2417  * block right away if this is the last reference.
2418  */
2419 void intel_display_power_put(struct drm_i915_private *dev_priv,
2420                              enum intel_display_power_domain domain,
2421                              intel_wakeref_t wakeref)
2422 {
2423         __intel_display_power_put(dev_priv, domain);
2424         intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2425 }
2426 #endif
2427
2428 #define I830_PIPES_POWER_DOMAINS (              \
2429         BIT_ULL(POWER_DOMAIN_PIPE_A) |          \
2430         BIT_ULL(POWER_DOMAIN_PIPE_B) |          \
2431         BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |     \
2432         BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |     \
2433         BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |    \
2434         BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |    \
2435         BIT_ULL(POWER_DOMAIN_INIT))
2436
2437 #define VLV_DISPLAY_POWER_DOMAINS (             \
2438         BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) |    \
2439         BIT_ULL(POWER_DOMAIN_PIPE_A) |          \
2440         BIT_ULL(POWER_DOMAIN_PIPE_B) |          \
2441         BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |     \
2442         BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |     \
2443         BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |    \
2444         BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |    \
2445         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |        \
2446         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |        \
2447         BIT_ULL(POWER_DOMAIN_PORT_DSI) |                \
2448         BIT_ULL(POWER_DOMAIN_PORT_CRT) |                \
2449         BIT_ULL(POWER_DOMAIN_VGA) |                     \
2450         BIT_ULL(POWER_DOMAIN_AUDIO) |           \
2451         BIT_ULL(POWER_DOMAIN_AUX_B) |           \
2452         BIT_ULL(POWER_DOMAIN_AUX_C) |           \
2453         BIT_ULL(POWER_DOMAIN_GMBUS) |           \
2454         BIT_ULL(POWER_DOMAIN_INIT))
2455
2456 #define VLV_DPIO_CMN_BC_POWER_DOMAINS (         \
2457         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |        \
2458         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |        \
2459         BIT_ULL(POWER_DOMAIN_PORT_CRT) |                \
2460         BIT_ULL(POWER_DOMAIN_AUX_B) |           \
2461         BIT_ULL(POWER_DOMAIN_AUX_C) |           \
2462         BIT_ULL(POWER_DOMAIN_INIT))
2463
2464 #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS (  \
2465         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |        \
2466         BIT_ULL(POWER_DOMAIN_AUX_B) |           \
2467         BIT_ULL(POWER_DOMAIN_INIT))
2468
2469 #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS (  \
2470         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |        \
2471         BIT_ULL(POWER_DOMAIN_AUX_B) |           \
2472         BIT_ULL(POWER_DOMAIN_INIT))
2473
2474 #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS (  \
2475         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |        \
2476         BIT_ULL(POWER_DOMAIN_AUX_C) |           \
2477         BIT_ULL(POWER_DOMAIN_INIT))
2478
2479 #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS (  \
2480         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |        \
2481         BIT_ULL(POWER_DOMAIN_AUX_C) |           \
2482         BIT_ULL(POWER_DOMAIN_INIT))
2483
2484 #define CHV_DISPLAY_POWER_DOMAINS (             \
2485         BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) |    \
2486         BIT_ULL(POWER_DOMAIN_PIPE_A) |          \
2487         BIT_ULL(POWER_DOMAIN_PIPE_B) |          \
2488         BIT_ULL(POWER_DOMAIN_PIPE_C) |          \
2489         BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |     \
2490         BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |     \
2491         BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |     \
2492         BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |    \
2493         BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |    \
2494         BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |    \
2495         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |        \
2496         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |        \
2497         BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |        \
2498         BIT_ULL(POWER_DOMAIN_PORT_DSI) |                \
2499         BIT_ULL(POWER_DOMAIN_VGA) |                     \
2500         BIT_ULL(POWER_DOMAIN_AUDIO) |           \
2501         BIT_ULL(POWER_DOMAIN_AUX_B) |           \
2502         BIT_ULL(POWER_DOMAIN_AUX_C) |           \
2503         BIT_ULL(POWER_DOMAIN_AUX_D) |           \
2504         BIT_ULL(POWER_DOMAIN_GMBUS) |           \
2505         BIT_ULL(POWER_DOMAIN_INIT))
2506
2507 #define CHV_DPIO_CMN_BC_POWER_DOMAINS (         \
2508         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |        \
2509         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |        \
2510         BIT_ULL(POWER_DOMAIN_AUX_B) |           \
2511         BIT_ULL(POWER_DOMAIN_AUX_C) |           \
2512         BIT_ULL(POWER_DOMAIN_INIT))
2513
2514 #define CHV_DPIO_CMN_D_POWER_DOMAINS (          \
2515         BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |        \
2516         BIT_ULL(POWER_DOMAIN_AUX_D) |           \
2517         BIT_ULL(POWER_DOMAIN_INIT))
2518
2519 #define HSW_DISPLAY_POWER_DOMAINS (                     \
2520         BIT_ULL(POWER_DOMAIN_PIPE_B) |                  \
2521         BIT_ULL(POWER_DOMAIN_PIPE_C) |                  \
2522         BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |             \
2523         BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |             \
2524         BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |             \
2525         BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |            \
2526         BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |            \
2527         BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |            \
2528         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |                \
2529         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |                \
2530         BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |                \
2531         BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */    \
2532         BIT_ULL(POWER_DOMAIN_VGA) |                             \
2533         BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
2534         BIT_ULL(POWER_DOMAIN_INIT))
2535
2536 #define BDW_DISPLAY_POWER_DOMAINS (                     \
2537         BIT_ULL(POWER_DOMAIN_PIPE_B) |                  \
2538         BIT_ULL(POWER_DOMAIN_PIPE_C) |                  \
2539         BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |             \
2540         BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |             \
2541         BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |            \
2542         BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |            \
2543         BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |            \
2544         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |                \
2545         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |                \
2546         BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |                \
2547         BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */    \
2548         BIT_ULL(POWER_DOMAIN_VGA) |                             \
2549         BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
2550         BIT_ULL(POWER_DOMAIN_INIT))
2551
2552 #define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS (         \
2553         BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |            \
2554         BIT_ULL(POWER_DOMAIN_PIPE_B) |                  \
2555         BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |            \
2556         BIT_ULL(POWER_DOMAIN_PIPE_C) |                  \
2557         BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |            \
2558         BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |             \
2559         BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |             \
2560         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |                \
2561         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |                \
2562         BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |                \
2563         BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) |                \
2564         BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
2565         BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
2566         BIT_ULL(POWER_DOMAIN_AUX_D) |                   \
2567         BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
2568         BIT_ULL(POWER_DOMAIN_VGA) |                             \
2569         BIT_ULL(POWER_DOMAIN_INIT))
2570 #define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS (          \
2571         BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) |           \
2572         BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) |           \
2573         BIT_ULL(POWER_DOMAIN_INIT))
2574 #define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS (            \
2575         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) |           \
2576         BIT_ULL(POWER_DOMAIN_INIT))
2577 #define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS (            \
2578         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) |           \
2579         BIT_ULL(POWER_DOMAIN_INIT))
2580 #define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS (            \
2581         BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) |           \
2582         BIT_ULL(POWER_DOMAIN_INIT))
2583 #define SKL_DISPLAY_DC_OFF_POWER_DOMAINS (              \
2584         SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS |         \
2585         BIT_ULL(POWER_DOMAIN_GT_IRQ) |                  \
2586         BIT_ULL(POWER_DOMAIN_MODESET) |                 \
2587         BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
2588         BIT_ULL(POWER_DOMAIN_INIT))
2589
2590 #define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS (         \
2591         BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |            \
2592         BIT_ULL(POWER_DOMAIN_PIPE_B) |                  \
2593         BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |            \
2594         BIT_ULL(POWER_DOMAIN_PIPE_C) |                  \
2595         BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |            \
2596         BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |             \
2597         BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |             \
2598         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |                \
2599         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |                \
2600         BIT_ULL(POWER_DOMAIN_AUX_B) |                   \
2601         BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
2602         BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
2603         BIT_ULL(POWER_DOMAIN_VGA) |                             \
2604         BIT_ULL(POWER_DOMAIN_INIT))
2605 #define BXT_DISPLAY_DC_OFF_POWER_DOMAINS (              \
2606         BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS |         \
2607         BIT_ULL(POWER_DOMAIN_GT_IRQ) |                  \
2608         BIT_ULL(POWER_DOMAIN_MODESET) |                 \
2609         BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
2610         BIT_ULL(POWER_DOMAIN_GMBUS) |                   \
2611         BIT_ULL(POWER_DOMAIN_INIT))
2612 #define BXT_DPIO_CMN_A_POWER_DOMAINS (                  \
2613         BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) |                \
2614         BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
2615         BIT_ULL(POWER_DOMAIN_INIT))
2616 #define BXT_DPIO_CMN_BC_POWER_DOMAINS (                 \
2617         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |                \
2618         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |                \
2619         BIT_ULL(POWER_DOMAIN_AUX_B) |                   \
2620         BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
2621         BIT_ULL(POWER_DOMAIN_INIT))
2622
2623 #define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS (         \
2624         BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |            \
2625         BIT_ULL(POWER_DOMAIN_PIPE_B) |                  \
2626         BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |            \
2627         BIT_ULL(POWER_DOMAIN_PIPE_C) |                  \
2628         BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |            \
2629         BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |             \
2630         BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |             \
2631         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |                \
2632         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |                \
2633         BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
2634         BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
2635         BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
2636         BIT_ULL(POWER_DOMAIN_VGA) |                             \
2637         BIT_ULL(POWER_DOMAIN_INIT))
2638 #define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS (            \
2639         BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
2640 #define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS (            \
2641         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
2642 #define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS (            \
2643         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
2644 #define GLK_DPIO_CMN_A_POWER_DOMAINS (                  \
2645         BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) |                \
2646         BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
2647         BIT_ULL(POWER_DOMAIN_INIT))
2648 #define GLK_DPIO_CMN_B_POWER_DOMAINS (                  \
2649         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |                \
2650         BIT_ULL(POWER_DOMAIN_AUX_B) |                   \
2651         BIT_ULL(POWER_DOMAIN_INIT))
2652 #define GLK_DPIO_CMN_C_POWER_DOMAINS (                  \
2653         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |                \
2654         BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
2655         BIT_ULL(POWER_DOMAIN_INIT))
2656 #define GLK_DISPLAY_AUX_A_POWER_DOMAINS (               \
2657         BIT_ULL(POWER_DOMAIN_AUX_A) |           \
2658         BIT_ULL(POWER_DOMAIN_AUX_IO_A) |                \
2659         BIT_ULL(POWER_DOMAIN_INIT))
2660 #define GLK_DISPLAY_AUX_B_POWER_DOMAINS (               \
2661         BIT_ULL(POWER_DOMAIN_AUX_B) |           \
2662         BIT_ULL(POWER_DOMAIN_INIT))
2663 #define GLK_DISPLAY_AUX_C_POWER_DOMAINS (               \
2664         BIT_ULL(POWER_DOMAIN_AUX_C) |           \
2665         BIT_ULL(POWER_DOMAIN_INIT))
2666 #define GLK_DISPLAY_DC_OFF_POWER_DOMAINS (              \
2667         GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS |         \
2668         BIT_ULL(POWER_DOMAIN_GT_IRQ) |                  \
2669         BIT_ULL(POWER_DOMAIN_MODESET) |                 \
2670         BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
2671         BIT_ULL(POWER_DOMAIN_GMBUS) |                   \
2672         BIT_ULL(POWER_DOMAIN_INIT))
2673
2674 #define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS (         \
2675         BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |            \
2676         BIT_ULL(POWER_DOMAIN_PIPE_B) |                  \
2677         BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |            \
2678         BIT_ULL(POWER_DOMAIN_PIPE_C) |                  \
2679         BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |            \
2680         BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |             \
2681         BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |             \
2682         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |                \
2683         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |                \
2684         BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |                \
2685         BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) |                \
2686         BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
2687         BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
2688         BIT_ULL(POWER_DOMAIN_AUX_D) |                   \
2689         BIT_ULL(POWER_DOMAIN_AUX_F) |                   \
2690         BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
2691         BIT_ULL(POWER_DOMAIN_VGA) |                             \
2692         BIT_ULL(POWER_DOMAIN_INIT))
2693 #define CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS (            \
2694         BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) |           \
2695         BIT_ULL(POWER_DOMAIN_INIT))
2696 #define CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS (            \
2697         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) |           \
2698         BIT_ULL(POWER_DOMAIN_INIT))
2699 #define CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS (            \
2700         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) |           \
2701         BIT_ULL(POWER_DOMAIN_INIT))
2702 #define CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS (            \
2703         BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) |           \
2704         BIT_ULL(POWER_DOMAIN_INIT))
2705 #define CNL_DISPLAY_AUX_A_POWER_DOMAINS (               \
2706         BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
2707         BIT_ULL(POWER_DOMAIN_AUX_IO_A) |                \
2708         BIT_ULL(POWER_DOMAIN_INIT))
2709 #define CNL_DISPLAY_AUX_B_POWER_DOMAINS (               \
2710         BIT_ULL(POWER_DOMAIN_AUX_B) |                   \
2711         BIT_ULL(POWER_DOMAIN_INIT))
2712 #define CNL_DISPLAY_AUX_C_POWER_DOMAINS (               \
2713         BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
2714         BIT_ULL(POWER_DOMAIN_INIT))
2715 #define CNL_DISPLAY_AUX_D_POWER_DOMAINS (               \
2716         BIT_ULL(POWER_DOMAIN_AUX_D) |                   \
2717         BIT_ULL(POWER_DOMAIN_INIT))
2718 #define CNL_DISPLAY_AUX_F_POWER_DOMAINS (               \
2719         BIT_ULL(POWER_DOMAIN_AUX_F) |                   \
2720         BIT_ULL(POWER_DOMAIN_INIT))
2721 #define CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS (            \
2722         BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) |           \
2723         BIT_ULL(POWER_DOMAIN_INIT))
2724 #define CNL_DISPLAY_DC_OFF_POWER_DOMAINS (              \
2725         CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS |         \
2726         BIT_ULL(POWER_DOMAIN_GT_IRQ) |                  \
2727         BIT_ULL(POWER_DOMAIN_MODESET) |                 \
2728         BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
2729         BIT_ULL(POWER_DOMAIN_INIT))
2730
2731 /*
2732  * ICL PW_0/PG_0 domains (HW/DMC control):
2733  * - PCI
2734  * - clocks except port PLL
2735  * - central power except FBC
2736  * - shared functions except pipe interrupts, pipe MBUS, DBUF registers
2737  * ICL PW_1/PG_1 domains (HW/DMC control):
2738  * - DBUF function
2739  * - PIPE_A and its planes, except VGA
2740  * - transcoder EDP + PSR
2741  * - transcoder DSI
2742  * - DDI_A
2743  * - FBC
2744  */
2745 #define ICL_PW_4_POWER_DOMAINS (                        \
2746         BIT_ULL(POWER_DOMAIN_PIPE_C) |                  \
2747         BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |     \
2748         BIT_ULL(POWER_DOMAIN_INIT))
2749         /* VDSC/joining */
2750 #define ICL_PW_3_POWER_DOMAINS (                        \
2751         ICL_PW_4_POWER_DOMAINS |                        \
2752         BIT_ULL(POWER_DOMAIN_PIPE_B) |                  \
2753         BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |            \
2754         BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |            \
2755         BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |            \
2756         BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |     \
2757         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |        \
2758         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |        \
2759         BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |        \
2760         BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) |        \
2761         BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) |        \
2762         BIT_ULL(POWER_DOMAIN_AUX_B) |                   \
2763         BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
2764         BIT_ULL(POWER_DOMAIN_AUX_D) |                   \
2765         BIT_ULL(POWER_DOMAIN_AUX_E) |                   \
2766         BIT_ULL(POWER_DOMAIN_AUX_F) |                   \
2767         BIT_ULL(POWER_DOMAIN_AUX_C_TBT) |               \
2768         BIT_ULL(POWER_DOMAIN_AUX_D_TBT) |               \
2769         BIT_ULL(POWER_DOMAIN_AUX_E_TBT) |               \
2770         BIT_ULL(POWER_DOMAIN_AUX_F_TBT) |               \
2771         BIT_ULL(POWER_DOMAIN_VGA) |                     \
2772         BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
2773         BIT_ULL(POWER_DOMAIN_INIT))
2774         /*
2775          * - transcoder WD
2776          * - KVMR (HW control)
2777          */
2778 #define ICL_PW_2_POWER_DOMAINS (                        \
2779         ICL_PW_3_POWER_DOMAINS |                        \
2780         BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) |             \
2781         BIT_ULL(POWER_DOMAIN_INIT))
2782         /*
2783          * - KVMR (HW control)
2784          */
2785 #define ICL_DISPLAY_DC_OFF_POWER_DOMAINS (              \
2786         ICL_PW_2_POWER_DOMAINS |                        \
2787         BIT_ULL(POWER_DOMAIN_MODESET) |                 \
2788         BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
2789         BIT_ULL(POWER_DOMAIN_DPLL_DC_OFF) |                     \
2790         BIT_ULL(POWER_DOMAIN_INIT))
2791
2792 #define ICL_DDI_IO_A_POWER_DOMAINS (                    \
2793         BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
2794 #define ICL_DDI_IO_B_POWER_DOMAINS (                    \
2795         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
2796 #define ICL_DDI_IO_C_POWER_DOMAINS (                    \
2797         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
2798 #define ICL_DDI_IO_D_POWER_DOMAINS (                    \
2799         BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO))
2800 #define ICL_DDI_IO_E_POWER_DOMAINS (                    \
2801         BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO))
2802 #define ICL_DDI_IO_F_POWER_DOMAINS (                    \
2803         BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO))
2804
2805 #define ICL_AUX_A_IO_POWER_DOMAINS (                    \
2806         BIT_ULL(POWER_DOMAIN_AUX_IO_A) |                \
2807         BIT_ULL(POWER_DOMAIN_AUX_A))
2808 #define ICL_AUX_B_IO_POWER_DOMAINS (                    \
2809         BIT_ULL(POWER_DOMAIN_AUX_B))
2810 #define ICL_AUX_C_TC1_IO_POWER_DOMAINS (                \
2811         BIT_ULL(POWER_DOMAIN_AUX_C))
2812 #define ICL_AUX_D_TC2_IO_POWER_DOMAINS (                \
2813         BIT_ULL(POWER_DOMAIN_AUX_D))
2814 #define ICL_AUX_E_TC3_IO_POWER_DOMAINS (                \
2815         BIT_ULL(POWER_DOMAIN_AUX_E))
2816 #define ICL_AUX_F_TC4_IO_POWER_DOMAINS (                \
2817         BIT_ULL(POWER_DOMAIN_AUX_F))
2818 #define ICL_AUX_C_TBT1_IO_POWER_DOMAINS (               \
2819         BIT_ULL(POWER_DOMAIN_AUX_C_TBT))
2820 #define ICL_AUX_D_TBT2_IO_POWER_DOMAINS (               \
2821         BIT_ULL(POWER_DOMAIN_AUX_D_TBT))
2822 #define ICL_AUX_E_TBT3_IO_POWER_DOMAINS (               \
2823         BIT_ULL(POWER_DOMAIN_AUX_E_TBT))
2824 #define ICL_AUX_F_TBT4_IO_POWER_DOMAINS (               \
2825         BIT_ULL(POWER_DOMAIN_AUX_F_TBT))
2826
2827 #define TGL_PW_5_POWER_DOMAINS (                        \
2828         BIT_ULL(POWER_DOMAIN_PIPE_D) |                  \
2829         BIT_ULL(POWER_DOMAIN_TRANSCODER_D) |            \
2830         BIT_ULL(POWER_DOMAIN_PIPE_D_PANEL_FITTER) |     \
2831         BIT_ULL(POWER_DOMAIN_INIT))
2832
2833 #define TGL_PW_4_POWER_DOMAINS (                        \
2834         TGL_PW_5_POWER_DOMAINS |                        \
2835         BIT_ULL(POWER_DOMAIN_PIPE_C) |                  \
2836         BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |            \
2837         BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |     \
2838         BIT_ULL(POWER_DOMAIN_INIT))
2839
2840 #define TGL_PW_3_POWER_DOMAINS (                        \
2841         TGL_PW_4_POWER_DOMAINS |                        \
2842         BIT_ULL(POWER_DOMAIN_PIPE_B) |                  \
2843         BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |            \
2844         BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |     \
2845         BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |        \
2846         BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) |        \
2847         BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) |        \
2848         BIT_ULL(POWER_DOMAIN_PORT_DDI_G_LANES) |        \
2849         BIT_ULL(POWER_DOMAIN_PORT_DDI_H_LANES) |        \
2850         BIT_ULL(POWER_DOMAIN_PORT_DDI_I_LANES) |        \
2851         BIT_ULL(POWER_DOMAIN_AUX_D) |                   \
2852         BIT_ULL(POWER_DOMAIN_AUX_E) |                   \
2853         BIT_ULL(POWER_DOMAIN_AUX_F) |                   \
2854         BIT_ULL(POWER_DOMAIN_AUX_G) |                   \
2855         BIT_ULL(POWER_DOMAIN_AUX_H) |                   \
2856         BIT_ULL(POWER_DOMAIN_AUX_I) |                   \
2857         BIT_ULL(POWER_DOMAIN_AUX_D_TBT) |               \
2858         BIT_ULL(POWER_DOMAIN_AUX_E_TBT) |               \
2859         BIT_ULL(POWER_DOMAIN_AUX_F_TBT) |               \
2860         BIT_ULL(POWER_DOMAIN_AUX_G_TBT) |               \
2861         BIT_ULL(POWER_DOMAIN_AUX_H_TBT) |               \
2862         BIT_ULL(POWER_DOMAIN_AUX_I_TBT) |               \
2863         BIT_ULL(POWER_DOMAIN_VGA) |                     \
2864         BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
2865         BIT_ULL(POWER_DOMAIN_INIT))
2866
2867 #define TGL_PW_2_POWER_DOMAINS (                        \
2868         TGL_PW_3_POWER_DOMAINS |                        \
2869         BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) |     \
2870         BIT_ULL(POWER_DOMAIN_INIT))
2871
2872 #define TGL_DISPLAY_DC_OFF_POWER_DOMAINS (              \
2873         TGL_PW_3_POWER_DOMAINS |                        \
2874         BIT_ULL(POWER_DOMAIN_MODESET) |                 \
2875         BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
2876         BIT_ULL(POWER_DOMAIN_AUX_B) |                   \
2877         BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
2878         BIT_ULL(POWER_DOMAIN_INIT))
2879
2880 #define TGL_DDI_IO_D_TC1_POWER_DOMAINS (        \
2881         BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO))
2882 #define TGL_DDI_IO_E_TC2_POWER_DOMAINS (        \
2883         BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO))
2884 #define TGL_DDI_IO_F_TC3_POWER_DOMAINS (        \
2885         BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO))
2886 #define TGL_DDI_IO_G_TC4_POWER_DOMAINS (        \
2887         BIT_ULL(POWER_DOMAIN_PORT_DDI_G_IO))
2888 #define TGL_DDI_IO_H_TC5_POWER_DOMAINS (        \
2889         BIT_ULL(POWER_DOMAIN_PORT_DDI_H_IO))
2890 #define TGL_DDI_IO_I_TC6_POWER_DOMAINS (        \
2891         BIT_ULL(POWER_DOMAIN_PORT_DDI_I_IO))
2892
2893 #define TGL_AUX_A_IO_POWER_DOMAINS (            \
2894         BIT_ULL(POWER_DOMAIN_AUX_IO_A) |        \
2895         BIT_ULL(POWER_DOMAIN_AUX_A))
2896 #define TGL_AUX_B_IO_POWER_DOMAINS (            \
2897         BIT_ULL(POWER_DOMAIN_AUX_B))
2898 #define TGL_AUX_C_IO_POWER_DOMAINS (            \
2899         BIT_ULL(POWER_DOMAIN_AUX_C))
2900 #define TGL_AUX_D_TC1_IO_POWER_DOMAINS (        \
2901         BIT_ULL(POWER_DOMAIN_AUX_D))
2902 #define TGL_AUX_E_TC2_IO_POWER_DOMAINS (        \
2903         BIT_ULL(POWER_DOMAIN_AUX_E))
2904 #define TGL_AUX_F_TC3_IO_POWER_DOMAINS (        \
2905         BIT_ULL(POWER_DOMAIN_AUX_F))
2906 #define TGL_AUX_G_TC4_IO_POWER_DOMAINS (        \
2907         BIT_ULL(POWER_DOMAIN_AUX_G))
2908 #define TGL_AUX_H_TC5_IO_POWER_DOMAINS (        \
2909         BIT_ULL(POWER_DOMAIN_AUX_H))
2910 #define TGL_AUX_I_TC6_IO_POWER_DOMAINS (        \
2911         BIT_ULL(POWER_DOMAIN_AUX_I))
2912 #define TGL_AUX_D_TBT1_IO_POWER_DOMAINS (       \
2913         BIT_ULL(POWER_DOMAIN_AUX_D_TBT))
2914 #define TGL_AUX_E_TBT2_IO_POWER_DOMAINS (       \
2915         BIT_ULL(POWER_DOMAIN_AUX_E_TBT))
2916 #define TGL_AUX_F_TBT3_IO_POWER_DOMAINS (       \
2917         BIT_ULL(POWER_DOMAIN_AUX_F_TBT))
2918 #define TGL_AUX_G_TBT4_IO_POWER_DOMAINS (       \
2919         BIT_ULL(POWER_DOMAIN_AUX_G_TBT))
2920 #define TGL_AUX_H_TBT5_IO_POWER_DOMAINS (       \
2921         BIT_ULL(POWER_DOMAIN_AUX_H_TBT))
2922 #define TGL_AUX_I_TBT6_IO_POWER_DOMAINS (       \
2923         BIT_ULL(POWER_DOMAIN_AUX_I_TBT))
2924
2925 #define TGL_TC_COLD_OFF_POWER_DOMAINS (         \
2926         BIT_ULL(POWER_DOMAIN_AUX_D)     |       \
2927         BIT_ULL(POWER_DOMAIN_AUX_E)     |       \
2928         BIT_ULL(POWER_DOMAIN_AUX_F)     |       \
2929         BIT_ULL(POWER_DOMAIN_AUX_G)     |       \
2930         BIT_ULL(POWER_DOMAIN_AUX_H)     |       \
2931         BIT_ULL(POWER_DOMAIN_AUX_I)     |       \
2932         BIT_ULL(POWER_DOMAIN_AUX_D_TBT) |       \
2933         BIT_ULL(POWER_DOMAIN_AUX_E_TBT) |       \
2934         BIT_ULL(POWER_DOMAIN_AUX_F_TBT) |       \
2935         BIT_ULL(POWER_DOMAIN_AUX_G_TBT) |       \
2936         BIT_ULL(POWER_DOMAIN_AUX_H_TBT) |       \
2937         BIT_ULL(POWER_DOMAIN_AUX_I_TBT) |       \
2938         BIT_ULL(POWER_DOMAIN_TC_COLD_OFF))
2939
2940 static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
2941         .sync_hw = i9xx_power_well_sync_hw_noop,
2942         .enable = i9xx_always_on_power_well_noop,
2943         .disable = i9xx_always_on_power_well_noop,
2944         .is_enabled = i9xx_always_on_power_well_enabled,
2945 };
2946
2947 static const struct i915_power_well_ops chv_pipe_power_well_ops = {
2948         .sync_hw = chv_pipe_power_well_sync_hw,
2949         .enable = chv_pipe_power_well_enable,
2950         .disable = chv_pipe_power_well_disable,
2951         .is_enabled = chv_pipe_power_well_enabled,
2952 };
2953
2954 static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
2955         .sync_hw = i9xx_power_well_sync_hw_noop,
2956         .enable = chv_dpio_cmn_power_well_enable,
2957         .disable = chv_dpio_cmn_power_well_disable,
2958         .is_enabled = vlv_power_well_enabled,
2959 };
2960
2961 static const struct i915_power_well_desc i9xx_always_on_power_well[] = {
2962         {
2963                 .name = "always-on",
2964                 .always_on = true,
2965                 .domains = POWER_DOMAIN_MASK,
2966                 .ops = &i9xx_always_on_power_well_ops,
2967                 .id = DISP_PW_ID_NONE,
2968         },
2969 };
2970
2971 static const struct i915_power_well_ops i830_pipes_power_well_ops = {
2972         .sync_hw = i830_pipes_power_well_sync_hw,
2973         .enable = i830_pipes_power_well_enable,
2974         .disable = i830_pipes_power_well_disable,
2975         .is_enabled = i830_pipes_power_well_enabled,
2976 };
2977
2978 static const struct i915_power_well_desc i830_power_wells[] = {
2979         {
2980                 .name = "always-on",
2981                 .always_on = true,
2982                 .domains = POWER_DOMAIN_MASK,
2983                 .ops = &i9xx_always_on_power_well_ops,
2984                 .id = DISP_PW_ID_NONE,
2985         },
2986         {
2987                 .name = "pipes",
2988                 .domains = I830_PIPES_POWER_DOMAINS,
2989                 .ops = &i830_pipes_power_well_ops,
2990                 .id = DISP_PW_ID_NONE,
2991         },
2992 };
2993
2994 static const struct i915_power_well_ops hsw_power_well_ops = {
2995         .sync_hw = hsw_power_well_sync_hw,
2996         .enable = hsw_power_well_enable,
2997         .disable = hsw_power_well_disable,
2998         .is_enabled = hsw_power_well_enabled,
2999 };
3000
3001 static const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
3002         .sync_hw = i9xx_power_well_sync_hw_noop,
3003         .enable = gen9_dc_off_power_well_enable,
3004         .disable = gen9_dc_off_power_well_disable,
3005         .is_enabled = gen9_dc_off_power_well_enabled,
3006 };
3007
3008 static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = {
3009         .sync_hw = i9xx_power_well_sync_hw_noop,
3010         .enable = bxt_dpio_cmn_power_well_enable,
3011         .disable = bxt_dpio_cmn_power_well_disable,
3012         .is_enabled = bxt_dpio_cmn_power_well_enabled,
3013 };
3014
3015 static const struct i915_power_well_regs hsw_power_well_regs = {
3016         .bios   = HSW_PWR_WELL_CTL1,
3017         .driver = HSW_PWR_WELL_CTL2,
3018         .kvmr   = HSW_PWR_WELL_CTL3,
3019         .debug  = HSW_PWR_WELL_CTL4,
3020 };
3021
3022 static const struct i915_power_well_desc hsw_power_wells[] = {
3023         {
3024                 .name = "always-on",
3025                 .always_on = true,
3026                 .domains = POWER_DOMAIN_MASK,
3027                 .ops = &i9xx_always_on_power_well_ops,
3028                 .id = DISP_PW_ID_NONE,
3029         },
3030         {
3031                 .name = "display",
3032                 .domains = HSW_DISPLAY_POWER_DOMAINS,
3033                 .ops = &hsw_power_well_ops,
3034                 .id = HSW_DISP_PW_GLOBAL,
3035                 {
3036                         .hsw.regs = &hsw_power_well_regs,
3037                         .hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
3038                         .hsw.has_vga = true,
3039                 },
3040         },
3041 };
3042
3043 static const struct i915_power_well_desc bdw_power_wells[] = {
3044         {
3045                 .name = "always-on",
3046                 .always_on = true,
3047                 .domains = POWER_DOMAIN_MASK,
3048                 .ops = &i9xx_always_on_power_well_ops,
3049                 .id = DISP_PW_ID_NONE,
3050         },
3051         {
3052                 .name = "display",
3053                 .domains = BDW_DISPLAY_POWER_DOMAINS,
3054                 .ops = &hsw_power_well_ops,
3055                 .id = HSW_DISP_PW_GLOBAL,
3056                 {
3057                         .hsw.regs = &hsw_power_well_regs,
3058                         .hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
3059                         .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3060                         .hsw.has_vga = true,
3061                 },
3062         },
3063 };
3064
3065 static const struct i915_power_well_ops vlv_display_power_well_ops = {
3066         .sync_hw = i9xx_power_well_sync_hw_noop,
3067         .enable = vlv_display_power_well_enable,
3068         .disable = vlv_display_power_well_disable,
3069         .is_enabled = vlv_power_well_enabled,
3070 };
3071
3072 static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
3073         .sync_hw = i9xx_power_well_sync_hw_noop,
3074         .enable = vlv_dpio_cmn_power_well_enable,
3075         .disable = vlv_dpio_cmn_power_well_disable,
3076         .is_enabled = vlv_power_well_enabled,
3077 };
3078
3079 static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
3080         .sync_hw = i9xx_power_well_sync_hw_noop,
3081         .enable = vlv_power_well_enable,
3082         .disable = vlv_power_well_disable,
3083         .is_enabled = vlv_power_well_enabled,
3084 };
3085
3086 static const struct i915_power_well_desc vlv_power_wells[] = {
3087         {
3088                 .name = "always-on",
3089                 .always_on = true,
3090                 .domains = POWER_DOMAIN_MASK,
3091                 .ops = &i9xx_always_on_power_well_ops,
3092                 .id = DISP_PW_ID_NONE,
3093         },
3094         {
3095                 .name = "display",
3096                 .domains = VLV_DISPLAY_POWER_DOMAINS,
3097                 .ops = &vlv_display_power_well_ops,
3098                 .id = VLV_DISP_PW_DISP2D,
3099                 {
3100                         .vlv.idx = PUNIT_PWGT_IDX_DISP2D,
3101                 },
3102         },
3103         {
3104                 .name = "dpio-tx-b-01",
3105                 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
3106                            VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
3107                            VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
3108                            VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
3109                 .ops = &vlv_dpio_power_well_ops,
3110                 .id = DISP_PW_ID_NONE,
3111                 {
3112                         .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_01,
3113                 },
3114         },
3115         {
3116                 .name = "dpio-tx-b-23",
3117                 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
3118                            VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
3119                            VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
3120                            VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
3121                 .ops = &vlv_dpio_power_well_ops,
3122                 .id = DISP_PW_ID_NONE,
3123                 {
3124                         .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_23,
3125                 },
3126         },
3127         {
3128                 .name = "dpio-tx-c-01",
3129                 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
3130                            VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
3131                            VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
3132                            VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
3133                 .ops = &vlv_dpio_power_well_ops,
3134                 .id = DISP_PW_ID_NONE,
3135                 {
3136                         .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_01,
3137                 },
3138         },
3139         {
3140                 .name = "dpio-tx-c-23",
3141                 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
3142                            VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
3143                            VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
3144                            VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
3145                 .ops = &vlv_dpio_power_well_ops,
3146                 .id = DISP_PW_ID_NONE,
3147                 {
3148                         .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_23,
3149                 },
3150         },
3151         {
3152                 .name = "dpio-common",
3153                 .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
3154                 .ops = &vlv_dpio_cmn_power_well_ops,
3155                 .id = VLV_DISP_PW_DPIO_CMN_BC,
3156                 {
3157                         .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
3158                 },
3159         },
3160 };
3161
3162 static const struct i915_power_well_desc chv_power_wells[] = {
3163         {
3164                 .name = "always-on",
3165                 .always_on = true,
3166                 .domains = POWER_DOMAIN_MASK,
3167                 .ops = &i9xx_always_on_power_well_ops,
3168                 .id = DISP_PW_ID_NONE,
3169         },
3170         {
3171                 .name = "display",
3172                 /*
3173                  * Pipe A power well is the new disp2d well. Pipe B and C
3174                  * power wells don't actually exist. Pipe A power well is
3175                  * required for any pipe to work.
3176                  */
3177                 .domains = CHV_DISPLAY_POWER_DOMAINS,
3178                 .ops = &chv_pipe_power_well_ops,
3179                 .id = DISP_PW_ID_NONE,
3180         },
3181         {
3182                 .name = "dpio-common-bc",
3183                 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
3184                 .ops = &chv_dpio_cmn_power_well_ops,
3185                 .id = VLV_DISP_PW_DPIO_CMN_BC,
3186                 {
3187                         .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
3188                 },
3189         },
3190         {
3191                 .name = "dpio-common-d",
3192                 .domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
3193                 .ops = &chv_dpio_cmn_power_well_ops,
3194                 .id = CHV_DISP_PW_DPIO_CMN_D,
3195                 {
3196                         .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_D,
3197                 },
3198         },
3199 };
3200
3201 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
3202                                          enum i915_power_well_id power_well_id)
3203 {
3204         struct i915_power_well *power_well;
3205         bool ret;
3206
3207         power_well = lookup_power_well(dev_priv, power_well_id);
3208         ret = power_well->desc->ops->is_enabled(dev_priv, power_well);
3209
3210         return ret;
3211 }
3212
3213 static const struct i915_power_well_desc skl_power_wells[] = {
3214         {
3215                 .name = "always-on",
3216                 .always_on = true,
3217                 .domains = POWER_DOMAIN_MASK,
3218                 .ops = &i9xx_always_on_power_well_ops,
3219                 .id = DISP_PW_ID_NONE,
3220         },
3221         {
3222                 .name = "power well 1",
3223                 /* Handled by the DMC firmware */
3224                 .always_on = true,
3225                 .domains = 0,
3226                 .ops = &hsw_power_well_ops,
3227                 .id = SKL_DISP_PW_1,
3228                 {
3229                         .hsw.regs = &hsw_power_well_regs,
3230                         .hsw.idx = SKL_PW_CTL_IDX_PW_1,
3231                         .hsw.has_fuses = true,
3232                 },
3233         },
3234         {
3235                 .name = "MISC IO power well",
3236                 /* Handled by the DMC firmware */
3237                 .always_on = true,
3238                 .domains = 0,
3239                 .ops = &hsw_power_well_ops,
3240                 .id = SKL_DISP_PW_MISC_IO,
3241                 {
3242                         .hsw.regs = &hsw_power_well_regs,
3243                         .hsw.idx = SKL_PW_CTL_IDX_MISC_IO,
3244                 },
3245         },
3246         {
3247                 .name = "DC off",
3248                 .domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS,
3249                 .ops = &gen9_dc_off_power_well_ops,
3250                 .id = SKL_DISP_DC_OFF,
3251         },
3252         {
3253                 .name = "power well 2",
3254                 .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3255                 .ops = &hsw_power_well_ops,
3256                 .id = SKL_DISP_PW_2,
3257                 {
3258                         .hsw.regs = &hsw_power_well_regs,
3259                         .hsw.idx = SKL_PW_CTL_IDX_PW_2,
3260                         .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3261                         .hsw.has_vga = true,
3262                         .hsw.has_fuses = true,
3263                 },
3264         },
3265         {
3266                 .name = "DDI A/E IO power well",
3267                 .domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS,
3268                 .ops = &hsw_power_well_ops,
3269                 .id = DISP_PW_ID_NONE,
3270                 {
3271                         .hsw.regs = &hsw_power_well_regs,
3272                         .hsw.idx = SKL_PW_CTL_IDX_DDI_A_E,
3273                 },
3274         },
3275         {
3276                 .name = "DDI B IO power well",
3277                 .domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS,
3278                 .ops = &hsw_power_well_ops,
3279                 .id = DISP_PW_ID_NONE,
3280                 {
3281                         .hsw.regs = &hsw_power_well_regs,
3282                         .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
3283                 },
3284         },
3285         {
3286                 .name = "DDI C IO power well",
3287                 .domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS,
3288                 .ops = &hsw_power_well_ops,
3289                 .id = DISP_PW_ID_NONE,
3290                 {
3291                         .hsw.regs = &hsw_power_well_regs,
3292                         .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
3293                 },
3294         },
3295         {
3296                 .name = "DDI D IO power well",
3297                 .domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS,
3298                 .ops = &hsw_power_well_ops,
3299                 .id = DISP_PW_ID_NONE,
3300                 {
3301                         .hsw.regs = &hsw_power_well_regs,
3302                         .hsw.idx = SKL_PW_CTL_IDX_DDI_D,
3303                 },
3304         },
3305 };
3306
3307 static const struct i915_power_well_desc bxt_power_wells[] = {
3308         {
3309                 .name = "always-on",
3310                 .always_on = true,
3311                 .domains = POWER_DOMAIN_MASK,
3312                 .ops = &i9xx_always_on_power_well_ops,
3313                 .id = DISP_PW_ID_NONE,
3314         },
3315         {
3316                 .name = "power well 1",
3317                 /* Handled by the DMC firmware */
3318                 .always_on = true,
3319                 .domains = 0,
3320                 .ops = &hsw_power_well_ops,
3321                 .id = SKL_DISP_PW_1,
3322                 {
3323                         .hsw.regs = &hsw_power_well_regs,
3324                         .hsw.idx = SKL_PW_CTL_IDX_PW_1,
3325                         .hsw.has_fuses = true,
3326                 },
3327         },
3328         {
3329                 .name = "DC off",
3330                 .domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS,
3331                 .ops = &gen9_dc_off_power_well_ops,
3332                 .id = SKL_DISP_DC_OFF,
3333         },
3334         {
3335                 .name = "power well 2",
3336                 .domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3337                 .ops = &hsw_power_well_ops,
3338                 .id = SKL_DISP_PW_2,
3339                 {
3340                         .hsw.regs = &hsw_power_well_regs,
3341                         .hsw.idx = SKL_PW_CTL_IDX_PW_2,
3342                         .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3343                         .hsw.has_vga = true,
3344                         .hsw.has_fuses = true,
3345                 },
3346         },
3347         {
3348                 .name = "dpio-common-a",
3349                 .domains = BXT_DPIO_CMN_A_POWER_DOMAINS,
3350                 .ops = &bxt_dpio_cmn_power_well_ops,
3351                 .id = BXT_DISP_PW_DPIO_CMN_A,
3352                 {
3353                         .bxt.phy = DPIO_PHY1,
3354                 },
3355         },
3356         {
3357                 .name = "dpio-common-bc",
3358                 .domains = BXT_DPIO_CMN_BC_POWER_DOMAINS,
3359                 .ops = &bxt_dpio_cmn_power_well_ops,
3360                 .id = VLV_DISP_PW_DPIO_CMN_BC,
3361                 {
3362                         .bxt.phy = DPIO_PHY0,
3363                 },
3364         },
3365 };
3366
3367 static const struct i915_power_well_desc glk_power_wells[] = {
3368         {
3369                 .name = "always-on",
3370                 .always_on = true,
3371                 .domains = POWER_DOMAIN_MASK,
3372                 .ops = &i9xx_always_on_power_well_ops,
3373                 .id = DISP_PW_ID_NONE,
3374         },
3375         {
3376                 .name = "power well 1",
3377                 /* Handled by the DMC firmware */
3378                 .always_on = true,
3379                 .domains = 0,
3380                 .ops = &hsw_power_well_ops,
3381                 .id = SKL_DISP_PW_1,
3382                 {
3383                         .hsw.regs = &hsw_power_well_regs,
3384                         .hsw.idx = SKL_PW_CTL_IDX_PW_1,
3385                         .hsw.has_fuses = true,
3386                 },
3387         },
3388         {
3389                 .name = "DC off",
3390                 .domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS,
3391                 .ops = &gen9_dc_off_power_well_ops,
3392                 .id = SKL_DISP_DC_OFF,
3393         },
3394         {
3395                 .name = "power well 2",
3396                 .domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3397                 .ops = &hsw_power_well_ops,
3398                 .id = SKL_DISP_PW_2,
3399                 {
3400                         .hsw.regs = &hsw_power_well_regs,
3401                         .hsw.idx = SKL_PW_CTL_IDX_PW_2,
3402                         .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3403                         .hsw.has_vga = true,
3404                         .hsw.has_fuses = true,
3405                 },
3406         },
3407         {
3408                 .name = "dpio-common-a",
3409                 .domains = GLK_DPIO_CMN_A_POWER_DOMAINS,
3410                 .ops = &bxt_dpio_cmn_power_well_ops,
3411                 .id = BXT_DISP_PW_DPIO_CMN_A,
3412                 {
3413                         .bxt.phy = DPIO_PHY1,
3414                 },
3415         },
3416         {
3417                 .name = "dpio-common-b",
3418                 .domains = GLK_DPIO_CMN_B_POWER_DOMAINS,
3419                 .ops = &bxt_dpio_cmn_power_well_ops,
3420                 .id = VLV_DISP_PW_DPIO_CMN_BC,
3421                 {
3422                         .bxt.phy = DPIO_PHY0,
3423                 },
3424         },
3425         {
3426                 .name = "dpio-common-c",
3427                 .domains = GLK_DPIO_CMN_C_POWER_DOMAINS,
3428                 .ops = &bxt_dpio_cmn_power_well_ops,
3429                 .id = GLK_DISP_PW_DPIO_CMN_C,
3430                 {
3431                         .bxt.phy = DPIO_PHY2,
3432                 },
3433         },
3434         {
3435                 .name = "AUX A",
3436                 .domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS,
3437                 .ops = &hsw_power_well_ops,
3438                 .id = DISP_PW_ID_NONE,
3439                 {
3440                         .hsw.regs = &hsw_power_well_regs,
3441                         .hsw.idx = GLK_PW_CTL_IDX_AUX_A,
3442                 },
3443         },
3444         {
3445                 .name = "AUX B",
3446                 .domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS,
3447                 .ops = &hsw_power_well_ops,
3448                 .id = DISP_PW_ID_NONE,
3449                 {
3450                         .hsw.regs = &hsw_power_well_regs,
3451                         .hsw.idx = GLK_PW_CTL_IDX_AUX_B,
3452                 },
3453         },
3454         {
3455                 .name = "AUX C",
3456                 .domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS,
3457                 .ops = &hsw_power_well_ops,
3458                 .id = DISP_PW_ID_NONE,
3459                 {
3460                         .hsw.regs = &hsw_power_well_regs,
3461                         .hsw.idx = GLK_PW_CTL_IDX_AUX_C,
3462                 },
3463         },
3464         {
3465                 .name = "DDI A IO power well",
3466                 .domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS,
3467                 .ops = &hsw_power_well_ops,
3468                 .id = DISP_PW_ID_NONE,
3469                 {
3470                         .hsw.regs = &hsw_power_well_regs,
3471                         .hsw.idx = GLK_PW_CTL_IDX_DDI_A,
3472                 },
3473         },
3474         {
3475                 .name = "DDI B IO power well",
3476                 .domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS,
3477                 .ops = &hsw_power_well_ops,
3478                 .id = DISP_PW_ID_NONE,
3479                 {
3480                         .hsw.regs = &hsw_power_well_regs,
3481                         .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
3482                 },
3483         },
3484         {
3485                 .name = "DDI C IO power well",
3486                 .domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS,
3487                 .ops = &hsw_power_well_ops,
3488                 .id = DISP_PW_ID_NONE,
3489                 {
3490                         .hsw.regs = &hsw_power_well_regs,
3491                         .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
3492                 },
3493         },
3494 };
3495
3496 static const struct i915_power_well_desc cnl_power_wells[] = {
3497         {
3498                 .name = "always-on",
3499                 .always_on = true,
3500                 .domains = POWER_DOMAIN_MASK,
3501                 .ops = &i9xx_always_on_power_well_ops,
3502                 .id = DISP_PW_ID_NONE,
3503         },
3504         {
3505                 .name = "power well 1",
3506                 /* Handled by the DMC firmware */
3507                 .always_on = true,
3508                 .domains = 0,
3509                 .ops = &hsw_power_well_ops,
3510                 .id = SKL_DISP_PW_1,
3511                 {
3512                         .hsw.regs = &hsw_power_well_regs,
3513                         .hsw.idx = SKL_PW_CTL_IDX_PW_1,
3514                         .hsw.has_fuses = true,
3515                 },
3516         },
3517         {
3518                 .name = "AUX A",
3519                 .domains = CNL_DISPLAY_AUX_A_POWER_DOMAINS,
3520                 .ops = &hsw_power_well_ops,
3521                 .id = DISP_PW_ID_NONE,
3522                 {
3523                         .hsw.regs = &hsw_power_well_regs,
3524                         .hsw.idx = GLK_PW_CTL_IDX_AUX_A,
3525                 },
3526         },
3527         {
3528                 .name = "AUX B",
3529                 .domains = CNL_DISPLAY_AUX_B_POWER_DOMAINS,
3530                 .ops = &hsw_power_well_ops,
3531                 .id = DISP_PW_ID_NONE,
3532                 {
3533                         .hsw.regs = &hsw_power_well_regs,
3534                         .hsw.idx = GLK_PW_CTL_IDX_AUX_B,
3535                 },
3536         },
3537         {
3538                 .name = "AUX C",
3539                 .domains = CNL_DISPLAY_AUX_C_POWER_DOMAINS,
3540                 .ops = &hsw_power_well_ops,
3541                 .id = DISP_PW_ID_NONE,
3542                 {
3543                         .hsw.regs = &hsw_power_well_regs,
3544                         .hsw.idx = GLK_PW_CTL_IDX_AUX_C,
3545                 },
3546         },
3547         {
3548                 .name = "AUX D",
3549                 .domains = CNL_DISPLAY_AUX_D_POWER_DOMAINS,
3550                 .ops = &hsw_power_well_ops,
3551                 .id = DISP_PW_ID_NONE,
3552                 {
3553                         .hsw.regs = &hsw_power_well_regs,
3554                         .hsw.idx = CNL_PW_CTL_IDX_AUX_D,
3555                 },
3556         },
3557         {
3558                 .name = "DC off",
3559                 .domains = CNL_DISPLAY_DC_OFF_POWER_DOMAINS,
3560                 .ops = &gen9_dc_off_power_well_ops,
3561                 .id = SKL_DISP_DC_OFF,
3562         },
3563         {
3564                 .name = "power well 2",
3565                 .domains = CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3566                 .ops = &hsw_power_well_ops,
3567                 .id = SKL_DISP_PW_2,
3568                 {
3569                         .hsw.regs = &hsw_power_well_regs,
3570                         .hsw.idx = SKL_PW_CTL_IDX_PW_2,
3571                         .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3572                         .hsw.has_vga = true,
3573                         .hsw.has_fuses = true,
3574                 },
3575         },
3576         {
3577                 .name = "DDI A IO power well",
3578                 .domains = CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS,
3579                 .ops = &hsw_power_well_ops,
3580                 .id = DISP_PW_ID_NONE,
3581                 {
3582                         .hsw.regs = &hsw_power_well_regs,
3583                         .hsw.idx = GLK_PW_CTL_IDX_DDI_A,
3584                 },
3585         },
3586         {
3587                 .name = "DDI B IO power well",
3588                 .domains = CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS,
3589                 .ops = &hsw_power_well_ops,
3590                 .id = DISP_PW_ID_NONE,
3591                 {
3592                         .hsw.regs = &hsw_power_well_regs,
3593                         .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
3594                 },
3595         },
3596         {
3597                 .name = "DDI C IO power well",
3598                 .domains = CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS,
3599                 .ops = &hsw_power_well_ops,
3600                 .id = DISP_PW_ID_NONE,
3601                 {
3602                         .hsw.regs = &hsw_power_well_regs,
3603                         .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
3604                 },
3605         },
3606         {
3607                 .name = "DDI D IO power well",
3608                 .domains = CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS,
3609                 .ops = &hsw_power_well_ops,
3610                 .id = DISP_PW_ID_NONE,
3611                 {
3612                         .hsw.regs = &hsw_power_well_regs,
3613                         .hsw.idx = SKL_PW_CTL_IDX_DDI_D,
3614                 },
3615         },
3616         {
3617                 .name = "DDI F IO power well",
3618                 .domains = CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS,
3619                 .ops = &hsw_power_well_ops,
3620                 .id = DISP_PW_ID_NONE,
3621                 {
3622                         .hsw.regs = &hsw_power_well_regs,
3623                         .hsw.idx = CNL_PW_CTL_IDX_DDI_F,
3624                 },
3625         },
3626         {
3627                 .name = "AUX F",
3628                 .domains = CNL_DISPLAY_AUX_F_POWER_DOMAINS,
3629                 .ops = &hsw_power_well_ops,
3630                 .id = DISP_PW_ID_NONE,
3631                 {
3632                         .hsw.regs = &hsw_power_well_regs,
3633                         .hsw.idx = CNL_PW_CTL_IDX_AUX_F,
3634                 },
3635         },
3636 };
3637
3638 static const struct i915_power_well_ops icl_aux_power_well_ops = {
3639         .sync_hw = hsw_power_well_sync_hw,
3640         .enable = icl_aux_power_well_enable,
3641         .disable = icl_aux_power_well_disable,
3642         .is_enabled = hsw_power_well_enabled,
3643 };
3644
3645 static const struct i915_power_well_regs icl_aux_power_well_regs = {
3646         .bios   = ICL_PWR_WELL_CTL_AUX1,
3647         .driver = ICL_PWR_WELL_CTL_AUX2,
3648         .debug  = ICL_PWR_WELL_CTL_AUX4,
3649 };
3650
3651 static const struct i915_power_well_regs icl_ddi_power_well_regs = {
3652         .bios   = ICL_PWR_WELL_CTL_DDI1,
3653         .driver = ICL_PWR_WELL_CTL_DDI2,
3654         .debug  = ICL_PWR_WELL_CTL_DDI4,
3655 };
3656
3657 static const struct i915_power_well_desc icl_power_wells[] = {
3658         {
3659                 .name = "always-on",
3660                 .always_on = true,
3661                 .domains = POWER_DOMAIN_MASK,
3662                 .ops = &i9xx_always_on_power_well_ops,
3663                 .id = DISP_PW_ID_NONE,
3664         },
3665         {
3666                 .name = "power well 1",
3667                 /* Handled by the DMC firmware */
3668                 .always_on = true,
3669                 .domains = 0,
3670                 .ops = &hsw_power_well_ops,
3671                 .id = SKL_DISP_PW_1,
3672                 {
3673                         .hsw.regs = &hsw_power_well_regs,
3674                         .hsw.idx = ICL_PW_CTL_IDX_PW_1,
3675                         .hsw.has_fuses = true,
3676                 },
3677         },
3678         {
3679                 .name = "DC off",
3680                 .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
3681                 .ops = &gen9_dc_off_power_well_ops,
3682                 .id = SKL_DISP_DC_OFF,
3683         },
3684         {
3685                 .name = "power well 2",
3686                 .domains = ICL_PW_2_POWER_DOMAINS,
3687                 .ops = &hsw_power_well_ops,
3688                 .id = SKL_DISP_PW_2,
3689                 {
3690                         .hsw.regs = &hsw_power_well_regs,
3691                         .hsw.idx = ICL_PW_CTL_IDX_PW_2,
3692                         .hsw.has_fuses = true,
3693                 },
3694         },
3695         {
3696                 .name = "power well 3",
3697                 .domains = ICL_PW_3_POWER_DOMAINS,
3698                 .ops = &hsw_power_well_ops,
3699                 .id = ICL_DISP_PW_3,
3700                 {
3701                         .hsw.regs = &hsw_power_well_regs,
3702                         .hsw.idx = ICL_PW_CTL_IDX_PW_3,
3703                         .hsw.irq_pipe_mask = BIT(PIPE_B),
3704                         .hsw.has_vga = true,
3705                         .hsw.has_fuses = true,
3706                 },
3707         },
3708         {
3709                 .name = "DDI A IO",
3710                 .domains = ICL_DDI_IO_A_POWER_DOMAINS,
3711                 .ops = &hsw_power_well_ops,
3712                 .id = DISP_PW_ID_NONE,
3713                 {
3714                         .hsw.regs = &icl_ddi_power_well_regs,
3715                         .hsw.idx = ICL_PW_CTL_IDX_DDI_A,
3716                 },
3717         },
3718         {
3719                 .name = "DDI B IO",
3720                 .domains = ICL_DDI_IO_B_POWER_DOMAINS,
3721                 .ops = &hsw_power_well_ops,
3722                 .id = DISP_PW_ID_NONE,
3723                 {
3724                         .hsw.regs = &icl_ddi_power_well_regs,
3725                         .hsw.idx = ICL_PW_CTL_IDX_DDI_B,
3726                 },
3727         },
3728         {
3729                 .name = "DDI C IO",
3730                 .domains = ICL_DDI_IO_C_POWER_DOMAINS,
3731                 .ops = &hsw_power_well_ops,
3732                 .id = DISP_PW_ID_NONE,
3733                 {
3734                         .hsw.regs = &icl_ddi_power_well_regs,
3735                         .hsw.idx = ICL_PW_CTL_IDX_DDI_C,
3736                 },
3737         },
3738         {
3739                 .name = "DDI D IO",
3740                 .domains = ICL_DDI_IO_D_POWER_DOMAINS,
3741                 .ops = &hsw_power_well_ops,
3742                 .id = DISP_PW_ID_NONE,
3743                 {
3744                         .hsw.regs = &icl_ddi_power_well_regs,
3745                         .hsw.idx = ICL_PW_CTL_IDX_DDI_D,
3746                 },
3747         },
3748         {
3749                 .name = "DDI E IO",
3750                 .domains = ICL_DDI_IO_E_POWER_DOMAINS,
3751                 .ops = &hsw_power_well_ops,
3752                 .id = DISP_PW_ID_NONE,
3753                 {
3754                         .hsw.regs = &icl_ddi_power_well_regs,
3755                         .hsw.idx = ICL_PW_CTL_IDX_DDI_E,
3756                 },
3757         },
3758         {
3759                 .name = "DDI F IO",
3760                 .domains = ICL_DDI_IO_F_POWER_DOMAINS,
3761                 .ops = &hsw_power_well_ops,
3762                 .id = DISP_PW_ID_NONE,
3763                 {
3764                         .hsw.regs = &icl_ddi_power_well_regs,
3765                         .hsw.idx = ICL_PW_CTL_IDX_DDI_F,
3766                 },
3767         },
3768         {
3769                 .name = "AUX A",
3770                 .domains = ICL_AUX_A_IO_POWER_DOMAINS,
3771                 .ops = &icl_aux_power_well_ops,
3772                 .id = DISP_PW_ID_NONE,
3773                 {
3774                         .hsw.regs = &icl_aux_power_well_regs,
3775                         .hsw.idx = ICL_PW_CTL_IDX_AUX_A,
3776                 },
3777         },
3778         {
3779                 .name = "AUX B",
3780                 .domains = ICL_AUX_B_IO_POWER_DOMAINS,
3781                 .ops = &icl_aux_power_well_ops,
3782                 .id = DISP_PW_ID_NONE,
3783                 {
3784                         .hsw.regs = &icl_aux_power_well_regs,
3785                         .hsw.idx = ICL_PW_CTL_IDX_AUX_B,
3786                 },
3787         },
3788         {
3789                 .name = "AUX C TC1",
3790                 .domains = ICL_AUX_C_TC1_IO_POWER_DOMAINS,
3791                 .ops = &icl_aux_power_well_ops,
3792                 .id = DISP_PW_ID_NONE,
3793                 {
3794                         .hsw.regs = &icl_aux_power_well_regs,
3795                         .hsw.idx = ICL_PW_CTL_IDX_AUX_C,
3796                         .hsw.is_tc_tbt = false,
3797                 },
3798         },
3799         {
3800                 .name = "AUX D TC2",
3801                 .domains = ICL_AUX_D_TC2_IO_POWER_DOMAINS,
3802                 .ops = &icl_aux_power_well_ops,
3803                 .id = DISP_PW_ID_NONE,
3804                 {
3805                         .hsw.regs = &icl_aux_power_well_regs,
3806                         .hsw.idx = ICL_PW_CTL_IDX_AUX_D,
3807                         .hsw.is_tc_tbt = false,
3808                 },
3809         },
3810         {
3811                 .name = "AUX E TC3",
3812                 .domains = ICL_AUX_E_TC3_IO_POWER_DOMAINS,
3813                 .ops = &icl_aux_power_well_ops,
3814                 .id = DISP_PW_ID_NONE,
3815                 {
3816                         .hsw.regs = &icl_aux_power_well_regs,
3817                         .hsw.idx = ICL_PW_CTL_IDX_AUX_E,
3818                         .hsw.is_tc_tbt = false,
3819                 },
3820         },
3821         {
3822                 .name = "AUX F TC4",
3823                 .domains = ICL_AUX_F_TC4_IO_POWER_DOMAINS,
3824                 .ops = &icl_aux_power_well_ops,
3825                 .id = DISP_PW_ID_NONE,
3826                 {
3827                         .hsw.regs = &icl_aux_power_well_regs,
3828                         .hsw.idx = ICL_PW_CTL_IDX_AUX_F,
3829                         .hsw.is_tc_tbt = false,
3830                 },
3831         },
3832         {
3833                 .name = "AUX C TBT1",
3834                 .domains = ICL_AUX_C_TBT1_IO_POWER_DOMAINS,
3835                 .ops = &icl_aux_power_well_ops,
3836                 .id = DISP_PW_ID_NONE,
3837                 {
3838                         .hsw.regs = &icl_aux_power_well_regs,
3839                         .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1,
3840                         .hsw.is_tc_tbt = true,
3841                 },
3842         },
3843         {
3844                 .name = "AUX D TBT2",
3845                 .domains = ICL_AUX_D_TBT2_IO_POWER_DOMAINS,
3846                 .ops = &icl_aux_power_well_ops,
3847                 .id = DISP_PW_ID_NONE,
3848                 {
3849                         .hsw.regs = &icl_aux_power_well_regs,
3850                         .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2,
3851                         .hsw.is_tc_tbt = true,
3852                 },
3853         },
3854         {
3855                 .name = "AUX E TBT3",
3856                 .domains = ICL_AUX_E_TBT3_IO_POWER_DOMAINS,
3857                 .ops = &icl_aux_power_well_ops,
3858                 .id = DISP_PW_ID_NONE,
3859                 {
3860                         .hsw.regs = &icl_aux_power_well_regs,
3861                         .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3,
3862                         .hsw.is_tc_tbt = true,
3863                 },
3864         },
3865         {
3866                 .name = "AUX F TBT4",
3867                 .domains = ICL_AUX_F_TBT4_IO_POWER_DOMAINS,
3868                 .ops = &icl_aux_power_well_ops,
3869                 .id = DISP_PW_ID_NONE,
3870                 {
3871                         .hsw.regs = &icl_aux_power_well_regs,
3872                         .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4,
3873                         .hsw.is_tc_tbt = true,
3874                 },
3875         },
3876         {
3877                 .name = "power well 4",
3878                 .domains = ICL_PW_4_POWER_DOMAINS,
3879                 .ops = &hsw_power_well_ops,
3880                 .id = DISP_PW_ID_NONE,
3881                 {
3882                         .hsw.regs = &hsw_power_well_regs,
3883                         .hsw.idx = ICL_PW_CTL_IDX_PW_4,
3884                         .hsw.has_fuses = true,
3885                         .hsw.irq_pipe_mask = BIT(PIPE_C),
3886                 },
3887         },
3888 };
3889
3890 static void
3891 tgl_tc_cold_request(struct drm_i915_private *i915, bool block)
3892 {
3893         u8 tries = 0;
3894         int ret;
3895
3896         while (1) {
3897                 u32 low_val = 0, high_val;
3898
3899                 if (block)
3900                         high_val = TGL_PCODE_EXIT_TCCOLD_DATA_H_BLOCK_REQ;
3901                 else
3902                         high_val = TGL_PCODE_EXIT_TCCOLD_DATA_H_UNBLOCK_REQ;
3903
3904                 /*
3905                  * Spec states that we should timeout the request after 200us
3906                  * but the function below will timeout after 500us
3907                  */
3908                 ret = sandybridge_pcode_read(i915, TGL_PCODE_TCCOLD, &low_val,
3909                                              &high_val);
3910                 if (ret == 0) {
3911                         if (block &&
3912                             (low_val & TGL_PCODE_EXIT_TCCOLD_DATA_L_EXIT_FAILED))
3913                                 ret = -EIO;
3914                         else
3915                                 break;
3916                 }
3917
3918                 if (++tries == 3)
3919                         break;
3920
3921                 if (ret == -EAGAIN)
3922                         msleep(1);
3923         }
3924
3925         if (ret)
3926                 drm_err(&i915->drm, "TC cold %sblock failed\n",
3927                         block ? "" : "un");
3928         else
3929                 drm_dbg_kms(&i915->drm, "TC cold %sblock succeeded\n",
3930                             block ? "" : "un");
3931 }
3932
3933 static void
3934 tgl_tc_cold_off_power_well_enable(struct drm_i915_private *i915,
3935                                   struct i915_power_well *power_well)
3936 {
3937         tgl_tc_cold_request(i915, true);
3938 }
3939
3940 static void
3941 tgl_tc_cold_off_power_well_disable(struct drm_i915_private *i915,
3942                                    struct i915_power_well *power_well)
3943 {
3944         tgl_tc_cold_request(i915, false);
3945 }
3946
3947 static void
3948 tgl_tc_cold_off_power_well_sync_hw(struct drm_i915_private *i915,
3949                                    struct i915_power_well *power_well)
3950 {
3951         if (power_well->count > 0)
3952                 tgl_tc_cold_off_power_well_enable(i915, power_well);
3953         else
3954                 tgl_tc_cold_off_power_well_disable(i915, power_well);
3955 }
3956
3957 static bool
3958 tgl_tc_cold_off_power_well_is_enabled(struct drm_i915_private *dev_priv,
3959                                       struct i915_power_well *power_well)
3960 {
3961         /*
3962          * Not the correctly implementation but there is no way to just read it
3963          * from PCODE, so returning count to avoid state mismatch errors
3964          */
3965         return power_well->count;
3966 }
3967
3968 static const struct i915_power_well_ops tgl_tc_cold_off_ops = {
3969         .sync_hw = tgl_tc_cold_off_power_well_sync_hw,
3970         .enable = tgl_tc_cold_off_power_well_enable,
3971         .disable = tgl_tc_cold_off_power_well_disable,
3972         .is_enabled = tgl_tc_cold_off_power_well_is_enabled,
3973 };
3974
3975 static const struct i915_power_well_desc tgl_power_wells[] = {
3976         {
3977                 .name = "always-on",
3978                 .always_on = true,
3979                 .domains = POWER_DOMAIN_MASK,
3980                 .ops = &i9xx_always_on_power_well_ops,
3981                 .id = DISP_PW_ID_NONE,
3982         },
3983         {
3984                 .name = "power well 1",
3985                 /* Handled by the DMC firmware */
3986                 .always_on = true,
3987                 .domains = 0,
3988                 .ops = &hsw_power_well_ops,
3989                 .id = SKL_DISP_PW_1,
3990                 {
3991                         .hsw.regs = &hsw_power_well_regs,
3992                         .hsw.idx = ICL_PW_CTL_IDX_PW_1,
3993                         .hsw.has_fuses = true,
3994                 },
3995         },
3996         {
3997                 .name = "DC off",
3998                 .domains = TGL_DISPLAY_DC_OFF_POWER_DOMAINS,
3999                 .ops = &gen9_dc_off_power_well_ops,
4000                 .id = SKL_DISP_DC_OFF,
4001         },
4002         {
4003                 .name = "power well 2",
4004                 .domains = TGL_PW_2_POWER_DOMAINS,
4005                 .ops = &hsw_power_well_ops,
4006                 .id = SKL_DISP_PW_2,
4007                 {
4008                         .hsw.regs = &hsw_power_well_regs,
4009                         .hsw.idx = ICL_PW_CTL_IDX_PW_2,
4010                         .hsw.has_fuses = true,
4011                 },
4012         },
4013         {
4014                 .name = "power well 3",
4015                 .domains = TGL_PW_3_POWER_DOMAINS,
4016                 .ops = &hsw_power_well_ops,
4017                 .id = ICL_DISP_PW_3,
4018                 {
4019                         .hsw.regs = &hsw_power_well_regs,
4020                         .hsw.idx = ICL_PW_CTL_IDX_PW_3,
4021                         .hsw.irq_pipe_mask = BIT(PIPE_B),
4022                         .hsw.has_vga = true,
4023                         .hsw.has_fuses = true,
4024                 },
4025         },
4026         {
4027                 .name = "DDI A IO",
4028                 .domains = ICL_DDI_IO_A_POWER_DOMAINS,
4029                 .ops = &hsw_power_well_ops,
4030                 .id = DISP_PW_ID_NONE,
4031                 {
4032                         .hsw.regs = &icl_ddi_power_well_regs,
4033                         .hsw.idx = ICL_PW_CTL_IDX_DDI_A,
4034                 }
4035         },
4036         {
4037                 .name = "DDI B IO",
4038                 .domains = ICL_DDI_IO_B_POWER_DOMAINS,
4039                 .ops = &hsw_power_well_ops,
4040                 .id = DISP_PW_ID_NONE,
4041                 {
4042                         .hsw.regs = &icl_ddi_power_well_regs,
4043                         .hsw.idx = ICL_PW_CTL_IDX_DDI_B,
4044                 }
4045         },
4046         {
4047                 .name = "DDI C IO",
4048                 .domains = ICL_DDI_IO_C_POWER_DOMAINS,
4049                 .ops = &hsw_power_well_ops,
4050                 .id = DISP_PW_ID_NONE,
4051                 {
4052                         .hsw.regs = &icl_ddi_power_well_regs,
4053                         .hsw.idx = ICL_PW_CTL_IDX_DDI_C,
4054                 }
4055         },
4056         {
4057                 .name = "DDI D TC1 IO",
4058                 .domains = TGL_DDI_IO_D_TC1_POWER_DOMAINS,
4059                 .ops = &hsw_power_well_ops,
4060                 .id = DISP_PW_ID_NONE,
4061                 {
4062                         .hsw.regs = &icl_ddi_power_well_regs,
4063                         .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1,
4064                 },
4065         },
4066         {
4067                 .name = "DDI E TC2 IO",
4068                 .domains = TGL_DDI_IO_E_TC2_POWER_DOMAINS,
4069                 .ops = &hsw_power_well_ops,
4070                 .id = DISP_PW_ID_NONE,
4071                 {
4072                         .hsw.regs = &icl_ddi_power_well_regs,
4073                         .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2,
4074                 },
4075         },
4076         {
4077                 .name = "DDI F TC3 IO",
4078                 .domains = TGL_DDI_IO_F_TC3_POWER_DOMAINS,
4079                 .ops = &hsw_power_well_ops,
4080                 .id = DISP_PW_ID_NONE,
4081                 {
4082                         .hsw.regs = &icl_ddi_power_well_regs,
4083                         .hsw.idx = TGL_PW_CTL_IDX_DDI_TC3,
4084                 },
4085         },
4086         {
4087                 .name = "DDI G TC4 IO",
4088                 .domains = TGL_DDI_IO_G_TC4_POWER_DOMAINS,
4089                 .ops = &hsw_power_well_ops,
4090                 .id = DISP_PW_ID_NONE,
4091                 {
4092                         .hsw.regs = &icl_ddi_power_well_regs,
4093                         .hsw.idx = TGL_PW_CTL_IDX_DDI_TC4,
4094                 },
4095         },
4096         {
4097                 .name = "DDI H TC5 IO",
4098                 .domains = TGL_DDI_IO_H_TC5_POWER_DOMAINS,
4099                 .ops = &hsw_power_well_ops,
4100                 .id = DISP_PW_ID_NONE,
4101                 {
4102                         .hsw.regs = &icl_ddi_power_well_regs,
4103                         .hsw.idx = TGL_PW_CTL_IDX_DDI_TC5,
4104                 },
4105         },
4106         {
4107                 .name = "DDI I TC6 IO",
4108                 .domains = TGL_DDI_IO_I_TC6_POWER_DOMAINS,
4109                 .ops = &hsw_power_well_ops,
4110                 .id = DISP_PW_ID_NONE,
4111                 {
4112                         .hsw.regs = &icl_ddi_power_well_regs,
4113                         .hsw.idx = TGL_PW_CTL_IDX_DDI_TC6,
4114                 },
4115         },
4116         {
4117                 .name = "AUX A",
4118                 .domains = TGL_AUX_A_IO_POWER_DOMAINS,
4119                 .ops = &icl_aux_power_well_ops,
4120                 .id = DISP_PW_ID_NONE,
4121                 {
4122                         .hsw.regs = &icl_aux_power_well_regs,
4123                         .hsw.idx = ICL_PW_CTL_IDX_AUX_A,
4124                 },
4125         },
4126         {
4127                 .name = "AUX B",
4128                 .domains = TGL_AUX_B_IO_POWER_DOMAINS,
4129                 .ops = &icl_aux_power_well_ops,
4130                 .id = DISP_PW_ID_NONE,
4131                 {
4132                         .hsw.regs = &icl_aux_power_well_regs,
4133                         .hsw.idx = ICL_PW_CTL_IDX_AUX_B,
4134                 },
4135         },
4136         {
4137                 .name = "AUX C",
4138                 .domains = TGL_AUX_C_IO_POWER_DOMAINS,
4139                 .ops = &icl_aux_power_well_ops,
4140                 .id = DISP_PW_ID_NONE,
4141                 {
4142                         .hsw.regs = &icl_aux_power_well_regs,
4143                         .hsw.idx = ICL_PW_CTL_IDX_AUX_C,
4144                 },
4145         },
4146         {
4147                 .name = "AUX D TC1",
4148                 .domains = TGL_AUX_D_TC1_IO_POWER_DOMAINS,
4149                 .ops = &icl_aux_power_well_ops,
4150                 .id = DISP_PW_ID_NONE,
4151                 {
4152                         .hsw.regs = &icl_aux_power_well_regs,
4153                         .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1,
4154                         .hsw.is_tc_tbt = false,
4155                 },
4156         },
4157         {
4158                 .name = "AUX E TC2",
4159                 .domains = TGL_AUX_E_TC2_IO_POWER_DOMAINS,
4160                 .ops = &icl_aux_power_well_ops,
4161                 .id = DISP_PW_ID_NONE,
4162                 {
4163                         .hsw.regs = &icl_aux_power_well_regs,
4164                         .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2,
4165                         .hsw.is_tc_tbt = false,
4166                 },
4167         },
4168         {
4169                 .name = "AUX F TC3",
4170                 .domains = TGL_AUX_F_TC3_IO_POWER_DOMAINS,
4171                 .ops = &icl_aux_power_well_ops,
4172                 .id = DISP_PW_ID_NONE,
4173                 {
4174                         .hsw.regs = &icl_aux_power_well_regs,
4175                         .hsw.idx = TGL_PW_CTL_IDX_AUX_TC3,
4176                         .hsw.is_tc_tbt = false,
4177                 },
4178         },
4179         {
4180                 .name = "AUX G TC4",
4181                 .domains = TGL_AUX_G_TC4_IO_POWER_DOMAINS,
4182                 .ops = &icl_aux_power_well_ops,
4183                 .id = DISP_PW_ID_NONE,
4184                 {
4185                         .hsw.regs = &icl_aux_power_well_regs,
4186                         .hsw.idx = TGL_PW_CTL_IDX_AUX_TC4,
4187                         .hsw.is_tc_tbt = false,
4188                 },
4189         },
4190         {
4191                 .name = "AUX H TC5",
4192                 .domains = TGL_AUX_H_TC5_IO_POWER_DOMAINS,
4193                 .ops = &icl_aux_power_well_ops,
4194                 .id = DISP_PW_ID_NONE,
4195                 {
4196                         .hsw.regs = &icl_aux_power_well_regs,
4197                         .hsw.idx = TGL_PW_CTL_IDX_AUX_TC5,
4198                         .hsw.is_tc_tbt = false,
4199                 },
4200         },
4201         {
4202                 .name = "AUX I TC6",
4203                 .domains = TGL_AUX_I_TC6_IO_POWER_DOMAINS,
4204                 .ops = &icl_aux_power_well_ops,
4205                 .id = DISP_PW_ID_NONE,
4206                 {
4207                         .hsw.regs = &icl_aux_power_well_regs,
4208                         .hsw.idx = TGL_PW_CTL_IDX_AUX_TC6,
4209                         .hsw.is_tc_tbt = false,
4210                 },
4211         },
4212         {
4213                 .name = "AUX D TBT1",
4214                 .domains = TGL_AUX_D_TBT1_IO_POWER_DOMAINS,
4215                 .ops = &icl_aux_power_well_ops,
4216                 .id = DISP_PW_ID_NONE,
4217                 {
4218                         .hsw.regs = &icl_aux_power_well_regs,
4219                         .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1,
4220                         .hsw.is_tc_tbt = true,
4221                 },
4222         },
4223         {
4224                 .name = "AUX E TBT2",
4225                 .domains = TGL_AUX_E_TBT2_IO_POWER_DOMAINS,
4226                 .ops = &icl_aux_power_well_ops,
4227                 .id = DISP_PW_ID_NONE,
4228                 {
4229                         .hsw.regs = &icl_aux_power_well_regs,
4230                         .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2,
4231                         .hsw.is_tc_tbt = true,
4232                 },
4233         },
4234         {
4235                 .name = "AUX F TBT3",
4236                 .domains = TGL_AUX_F_TBT3_IO_POWER_DOMAINS,
4237                 .ops = &icl_aux_power_well_ops,
4238                 .id = DISP_PW_ID_NONE,
4239                 {
4240                         .hsw.regs = &icl_aux_power_well_regs,
4241                         .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3,
4242                         .hsw.is_tc_tbt = true,
4243                 },
4244         },
4245         {
4246                 .name = "AUX G TBT4",
4247                 .domains = TGL_AUX_G_TBT4_IO_POWER_DOMAINS,
4248                 .ops = &icl_aux_power_well_ops,
4249                 .id = DISP_PW_ID_NONE,
4250                 {
4251                         .hsw.regs = &icl_aux_power_well_regs,
4252                         .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4,
4253                         .hsw.is_tc_tbt = true,
4254                 },
4255         },
4256         {
4257                 .name = "AUX H TBT5",
4258                 .domains = TGL_AUX_H_TBT5_IO_POWER_DOMAINS,
4259                 .ops = &icl_aux_power_well_ops,
4260                 .id = DISP_PW_ID_NONE,
4261                 {
4262                         .hsw.regs = &icl_aux_power_well_regs,
4263                         .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT5,
4264                         .hsw.is_tc_tbt = true,
4265                 },
4266         },
4267         {
4268                 .name = "AUX I TBT6",
4269                 .domains = TGL_AUX_I_TBT6_IO_POWER_DOMAINS,
4270                 .ops = &icl_aux_power_well_ops,
4271                 .id = DISP_PW_ID_NONE,
4272                 {
4273                         .hsw.regs = &icl_aux_power_well_regs,
4274                         .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT6,
4275                         .hsw.is_tc_tbt = true,
4276                 },
4277         },
4278         {
4279                 .name = "power well 4",
4280                 .domains = TGL_PW_4_POWER_DOMAINS,
4281                 .ops = &hsw_power_well_ops,
4282                 .id = DISP_PW_ID_NONE,
4283                 {
4284                         .hsw.regs = &hsw_power_well_regs,
4285                         .hsw.idx = ICL_PW_CTL_IDX_PW_4,
4286                         .hsw.has_fuses = true,
4287                         .hsw.irq_pipe_mask = BIT(PIPE_C),
4288                 }
4289         },
4290         {
4291                 .name = "power well 5",
4292                 .domains = TGL_PW_5_POWER_DOMAINS,
4293                 .ops = &hsw_power_well_ops,
4294                 .id = DISP_PW_ID_NONE,
4295                 {
4296                         .hsw.regs = &hsw_power_well_regs,
4297                         .hsw.idx = TGL_PW_CTL_IDX_PW_5,
4298                         .hsw.has_fuses = true,
4299                         .hsw.irq_pipe_mask = BIT(PIPE_D),
4300                 },
4301         },
4302         {
4303                 .name = "TC cold off",
4304                 .domains = TGL_TC_COLD_OFF_POWER_DOMAINS,
4305                 .ops = &tgl_tc_cold_off_ops,
4306                 .id = DISP_PW_ID_NONE,
4307         },
4308 };
4309
4310 static int
4311 sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
4312                                    int disable_power_well)
4313 {
4314         if (disable_power_well >= 0)
4315                 return !!disable_power_well;
4316
4317         return 1;
4318 }
4319
4320 static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
4321                                int enable_dc)
4322 {
4323         u32 mask;
4324         int requested_dc;
4325         int max_dc;
4326
4327         if (INTEL_GEN(dev_priv) >= 12) {
4328                 max_dc = 4;
4329                 /*
4330                  * DC9 has a separate HW flow from the rest of the DC states,
4331                  * not depending on the DMC firmware. It's needed by system
4332                  * suspend/resume, so allow it unconditionally.
4333                  */
4334                 mask = DC_STATE_EN_DC9;
4335         } else if (IS_GEN(dev_priv, 11)) {
4336                 max_dc = 2;
4337                 mask = DC_STATE_EN_DC9;
4338         } else if (IS_GEN(dev_priv, 10) || IS_GEN9_BC(dev_priv)) {
4339                 max_dc = 2;
4340                 mask = 0;
4341         } else if (IS_GEN9_LP(dev_priv)) {
4342                 max_dc = 1;
4343                 mask = DC_STATE_EN_DC9;
4344         } else {
4345                 max_dc = 0;
4346                 mask = 0;
4347         }
4348
4349         if (!i915_modparams.disable_power_well)
4350                 max_dc = 0;
4351
4352         if (enable_dc >= 0 && enable_dc <= max_dc) {
4353                 requested_dc = enable_dc;
4354         } else if (enable_dc == -1) {
4355                 requested_dc = max_dc;
4356         } else if (enable_dc > max_dc && enable_dc <= 4) {
4357                 drm_dbg_kms(&dev_priv->drm,
4358                             "Adjusting requested max DC state (%d->%d)\n",
4359                             enable_dc, max_dc);
4360                 requested_dc = max_dc;
4361         } else {
4362                 drm_err(&dev_priv->drm,
4363                         "Unexpected value for enable_dc (%d)\n", enable_dc);
4364                 requested_dc = max_dc;
4365         }
4366
4367         switch (requested_dc) {
4368         case 4:
4369                 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6;
4370                 break;
4371         case 3:
4372                 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC5;
4373                 break;
4374         case 2:
4375                 mask |= DC_STATE_EN_UPTO_DC6;
4376                 break;
4377         case 1:
4378                 mask |= DC_STATE_EN_UPTO_DC5;
4379                 break;
4380         }
4381
4382         drm_dbg_kms(&dev_priv->drm, "Allowed DC state mask %02x\n", mask);
4383
4384         return mask;
4385 }
4386
4387 static int
4388 __set_power_wells(struct i915_power_domains *power_domains,
4389                   const struct i915_power_well_desc *power_well_descs,
4390                   int power_well_count)
4391 {
4392         u64 power_well_ids = 0;
4393         int i;
4394
4395         power_domains->power_well_count = power_well_count;
4396         power_domains->power_wells =
4397                                 kcalloc(power_well_count,
4398                                         sizeof(*power_domains->power_wells),
4399                                         GFP_KERNEL);
4400         if (!power_domains->power_wells)
4401                 return -ENOMEM;
4402
4403         for (i = 0; i < power_well_count; i++) {
4404                 enum i915_power_well_id id = power_well_descs[i].id;
4405
4406                 power_domains->power_wells[i].desc = &power_well_descs[i];
4407
4408                 if (id == DISP_PW_ID_NONE)
4409                         continue;
4410
4411                 WARN_ON(id >= sizeof(power_well_ids) * 8);
4412                 WARN_ON(power_well_ids & BIT_ULL(id));
4413                 power_well_ids |= BIT_ULL(id);
4414         }
4415
4416         return 0;
4417 }
4418
4419 #define set_power_wells(power_domains, __power_well_descs) \
4420         __set_power_wells(power_domains, __power_well_descs, \
4421                           ARRAY_SIZE(__power_well_descs))
4422
4423 /**
4424  * intel_power_domains_init - initializes the power domain structures
4425  * @dev_priv: i915 device instance
4426  *
4427  * Initializes the power domain structures for @dev_priv depending upon the
4428  * supported platform.
4429  */
4430 int intel_power_domains_init(struct drm_i915_private *dev_priv)
4431 {
4432         struct i915_power_domains *power_domains = &dev_priv->power_domains;
4433         int err;
4434
4435         i915_modparams.disable_power_well =
4436                 sanitize_disable_power_well_option(dev_priv,
4437                                                    i915_modparams.disable_power_well);
4438         dev_priv->csr.allowed_dc_mask =
4439                 get_allowed_dc_mask(dev_priv, i915_modparams.enable_dc);
4440
4441         dev_priv->csr.target_dc_state =
4442                 sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
4443
4444         BUILD_BUG_ON(POWER_DOMAIN_NUM > 64);
4445
4446         mutex_init(&power_domains->lock);
4447
4448         INIT_DELAYED_WORK(&power_domains->async_put_work,
4449                           intel_display_power_put_async_work);
4450
4451         /*
4452          * The enabling order will be from lower to higher indexed wells,
4453          * the disabling order is reversed.
4454          */
4455         if (IS_GEN(dev_priv, 12)) {
4456                 err = set_power_wells(power_domains, tgl_power_wells);
4457         } else if (IS_GEN(dev_priv, 11)) {
4458                 err = set_power_wells(power_domains, icl_power_wells);
4459         } else if (IS_CANNONLAKE(dev_priv)) {
4460                 err = set_power_wells(power_domains, cnl_power_wells);
4461
4462                 /*
4463                  * DDI and Aux IO are getting enabled for all ports
4464                  * regardless the presence or use. So, in order to avoid
4465                  * timeouts, lets remove them from the list
4466                  * for the SKUs without port F.
4467                  */
4468                 if (!IS_CNL_WITH_PORT_F(dev_priv))
4469                         power_domains->power_well_count -= 2;
4470         } else if (IS_GEMINILAKE(dev_priv)) {
4471                 err = set_power_wells(power_domains, glk_power_wells);
4472         } else if (IS_BROXTON(dev_priv)) {
4473                 err = set_power_wells(power_domains, bxt_power_wells);
4474         } else if (IS_GEN9_BC(dev_priv)) {
4475                 err = set_power_wells(power_domains, skl_power_wells);
4476         } else if (IS_CHERRYVIEW(dev_priv)) {
4477                 err = set_power_wells(power_domains, chv_power_wells);
4478         } else if (IS_BROADWELL(dev_priv)) {
4479                 err = set_power_wells(power_domains, bdw_power_wells);
4480         } else if (IS_HASWELL(dev_priv)) {
4481                 err = set_power_wells(power_domains, hsw_power_wells);
4482         } else if (IS_VALLEYVIEW(dev_priv)) {
4483                 err = set_power_wells(power_domains, vlv_power_wells);
4484         } else if (IS_I830(dev_priv)) {
4485                 err = set_power_wells(power_domains, i830_power_wells);
4486         } else {
4487                 err = set_power_wells(power_domains, i9xx_always_on_power_well);
4488         }
4489
4490         return err;
4491 }
4492
4493 /**
4494  * intel_power_domains_cleanup - clean up power domains resources
4495  * @dev_priv: i915 device instance
4496  *
4497  * Release any resources acquired by intel_power_domains_init()
4498  */
4499 void intel_power_domains_cleanup(struct drm_i915_private *dev_priv)
4500 {
4501         kfree(dev_priv->power_domains.power_wells);
4502 }
4503
4504 static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
4505 {
4506         struct i915_power_domains *power_domains = &dev_priv->power_domains;
4507         struct i915_power_well *power_well;
4508
4509         mutex_lock(&power_domains->lock);
4510         for_each_power_well(dev_priv, power_well) {
4511                 power_well->desc->ops->sync_hw(dev_priv, power_well);
4512                 power_well->hw_enabled =
4513                         power_well->desc->ops->is_enabled(dev_priv, power_well);
4514         }
4515         mutex_unlock(&power_domains->lock);
4516 }
4517
4518 static inline
4519 bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv,
4520                           i915_reg_t reg, bool enable)
4521 {
4522         u32 val, status;
4523
4524         val = intel_de_read(dev_priv, reg);
4525         val = enable ? (val | DBUF_POWER_REQUEST) : (val & ~DBUF_POWER_REQUEST);
4526         intel_de_write(dev_priv, reg, val);
4527         intel_de_posting_read(dev_priv, reg);
4528         udelay(10);
4529
4530         status = intel_de_read(dev_priv, reg) & DBUF_POWER_STATE;
4531         if ((enable && !status) || (!enable && status)) {
4532                 drm_err(&dev_priv->drm, "DBus power %s timeout!\n",
4533                         enable ? "enable" : "disable");
4534                 return false;
4535         }
4536         return true;
4537 }
4538
4539 static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
4540 {
4541         icl_dbuf_slices_update(dev_priv, BIT(DBUF_S1));
4542 }
4543
4544 static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
4545 {
4546         icl_dbuf_slices_update(dev_priv, 0);
4547 }
4548
4549 void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
4550                             u8 req_slices)
4551 {
4552         int i;
4553         int max_slices = INTEL_INFO(dev_priv)->num_supported_dbuf_slices;
4554         struct i915_power_domains *power_domains = &dev_priv->power_domains;
4555
4556         drm_WARN(&dev_priv->drm, hweight8(req_slices) > max_slices,
4557                  "Invalid number of dbuf slices requested\n");
4558
4559         drm_dbg_kms(&dev_priv->drm, "Updating dbuf slices to 0x%x\n",
4560                     req_slices);
4561
4562         /*
4563          * Might be running this in parallel to gen9_dc_off_power_well_enable
4564          * being called from intel_dp_detect for instance,
4565          * which causes assertion triggered by race condition,
4566          * as gen9_assert_dbuf_enabled might preempt this when registers
4567          * were already updated, while dev_priv was not.
4568          */
4569         mutex_lock(&power_domains->lock);
4570
4571         for (i = 0; i < max_slices; i++) {
4572                 intel_dbuf_slice_set(dev_priv,
4573                                      DBUF_CTL_S(i),
4574                                      (req_slices & BIT(i)) != 0);
4575         }
4576
4577         dev_priv->enabled_dbuf_slices_mask = req_slices;
4578
4579         mutex_unlock(&power_domains->lock);
4580 }
4581
4582 static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
4583 {
4584         skl_ddb_get_hw_state(dev_priv);
4585         /*
4586          * Just power up at least 1 slice, we will
4587          * figure out later which slices we have and what we need.
4588          */
4589         icl_dbuf_slices_update(dev_priv, dev_priv->enabled_dbuf_slices_mask |
4590                                BIT(DBUF_S1));
4591 }
4592
4593 static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
4594 {
4595         icl_dbuf_slices_update(dev_priv, 0);
4596 }
4597
4598 static void icl_mbus_init(struct drm_i915_private *dev_priv)
4599 {
4600         u32 mask, val;
4601
4602         mask = MBUS_ABOX_BT_CREDIT_POOL1_MASK |
4603                 MBUS_ABOX_BT_CREDIT_POOL2_MASK |
4604                 MBUS_ABOX_B_CREDIT_MASK |
4605                 MBUS_ABOX_BW_CREDIT_MASK;
4606         val = MBUS_ABOX_BT_CREDIT_POOL1(16) |
4607                 MBUS_ABOX_BT_CREDIT_POOL2(16) |
4608                 MBUS_ABOX_B_CREDIT(1) |
4609                 MBUS_ABOX_BW_CREDIT(1);
4610
4611         intel_de_rmw(dev_priv, MBUS_ABOX_CTL, mask, val);
4612         if (INTEL_GEN(dev_priv) >= 12) {
4613                 intel_de_rmw(dev_priv, MBUS_ABOX1_CTL, mask, val);
4614                 intel_de_rmw(dev_priv, MBUS_ABOX2_CTL, mask, val);
4615         }
4616 }
4617
4618 static void hsw_assert_cdclk(struct drm_i915_private *dev_priv)
4619 {
4620         u32 val = intel_de_read(dev_priv, LCPLL_CTL);
4621
4622         /*
4623          * The LCPLL register should be turned on by the BIOS. For now
4624          * let's just check its state and print errors in case
4625          * something is wrong.  Don't even try to turn it on.
4626          */
4627
4628         if (val & LCPLL_CD_SOURCE_FCLK)
4629                 drm_err(&dev_priv->drm, "CDCLK source is not LCPLL\n");
4630
4631         if (val & LCPLL_PLL_DISABLE)
4632                 drm_err(&dev_priv->drm, "LCPLL is disabled\n");
4633
4634         if ((val & LCPLL_REF_MASK) != LCPLL_REF_NON_SSC)
4635                 drm_err(&dev_priv->drm, "LCPLL not using non-SSC reference\n");
4636 }
4637
4638 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
4639 {
4640         struct drm_device *dev = &dev_priv->drm;
4641         struct intel_crtc *crtc;
4642
4643         for_each_intel_crtc(dev, crtc)
4644                 I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
4645                                 pipe_name(crtc->pipe));
4646
4647         I915_STATE_WARN(intel_de_read(dev_priv, HSW_PWR_WELL_CTL2),
4648                         "Display power well on\n");
4649         I915_STATE_WARN(intel_de_read(dev_priv, SPLL_CTL) & SPLL_PLL_ENABLE,
4650                         "SPLL enabled\n");
4651         I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(0)) & WRPLL_PLL_ENABLE,
4652                         "WRPLL1 enabled\n");
4653         I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(1)) & WRPLL_PLL_ENABLE,
4654                         "WRPLL2 enabled\n");
4655         I915_STATE_WARN(intel_de_read(dev_priv, PP_STATUS(0)) & PP_ON,
4656                         "Panel power on\n");
4657         I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
4658                         "CPU PWM1 enabled\n");
4659         if (IS_HASWELL(dev_priv))
4660                 I915_STATE_WARN(intel_de_read(dev_priv, HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
4661                                 "CPU PWM2 enabled\n");
4662         I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
4663                         "PCH PWM1 enabled\n");
4664         I915_STATE_WARN(intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
4665                         "Utility pin enabled\n");
4666         I915_STATE_WARN(intel_de_read(dev_priv, PCH_GTC_CTL) & PCH_GTC_ENABLE,
4667                         "PCH GTC enabled\n");
4668
4669         /*
4670          * In theory we can still leave IRQs enabled, as long as only the HPD
4671          * interrupts remain enabled. We used to check for that, but since it's
4672          * gen-specific and since we only disable LCPLL after we fully disable
4673          * the interrupts, the check below should be enough.
4674          */
4675         I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
4676 }
4677
4678 static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv)
4679 {
4680         if (IS_HASWELL(dev_priv))
4681                 return intel_de_read(dev_priv, D_COMP_HSW);
4682         else
4683                 return intel_de_read(dev_priv, D_COMP_BDW);
4684 }
4685
4686 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val)
4687 {
4688         if (IS_HASWELL(dev_priv)) {
4689                 if (sandybridge_pcode_write(dev_priv,
4690                                             GEN6_PCODE_WRITE_D_COMP, val))
4691                         drm_dbg_kms(&dev_priv->drm,
4692                                     "Failed to write to D_COMP\n");
4693         } else {
4694                 intel_de_write(dev_priv, D_COMP_BDW, val);
4695                 intel_de_posting_read(dev_priv, D_COMP_BDW);
4696         }
4697 }
4698
4699 /*
4700  * This function implements pieces of two sequences from BSpec:
4701  * - Sequence for display software to disable LCPLL
4702  * - Sequence for display software to allow package C8+
4703  * The steps implemented here are just the steps that actually touch the LCPLL
4704  * register. Callers should take care of disabling all the display engine
4705  * functions, doing the mode unset, fixing interrupts, etc.
4706  */
4707 static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
4708                               bool switch_to_fclk, bool allow_power_down)
4709 {
4710         u32 val;
4711
4712         assert_can_disable_lcpll(dev_priv);
4713
4714         val = intel_de_read(dev_priv, LCPLL_CTL);
4715
4716         if (switch_to_fclk) {
4717                 val |= LCPLL_CD_SOURCE_FCLK;
4718                 intel_de_write(dev_priv, LCPLL_CTL, val);
4719
4720                 if (wait_for_us(intel_de_read(dev_priv, LCPLL_CTL) &
4721                                 LCPLL_CD_SOURCE_FCLK_DONE, 1))
4722                         drm_err(&dev_priv->drm, "Switching to FCLK failed\n");
4723
4724                 val = intel_de_read(dev_priv, LCPLL_CTL);
4725         }
4726
4727         val |= LCPLL_PLL_DISABLE;
4728         intel_de_write(dev_priv, LCPLL_CTL, val);
4729         intel_de_posting_read(dev_priv, LCPLL_CTL);
4730
4731         if (intel_de_wait_for_clear(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 1))
4732                 drm_err(&dev_priv->drm, "LCPLL still locked\n");
4733
4734         val = hsw_read_dcomp(dev_priv);
4735         val |= D_COMP_COMP_DISABLE;
4736         hsw_write_dcomp(dev_priv, val);
4737         ndelay(100);
4738
4739         if (wait_for((hsw_read_dcomp(dev_priv) &
4740                       D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
4741                 drm_err(&dev_priv->drm, "D_COMP RCOMP still in progress\n");
4742
4743         if (allow_power_down) {
4744                 val = intel_de_read(dev_priv, LCPLL_CTL);
4745                 val |= LCPLL_POWER_DOWN_ALLOW;
4746                 intel_de_write(dev_priv, LCPLL_CTL, val);
4747                 intel_de_posting_read(dev_priv, LCPLL_CTL);
4748         }
4749 }
4750
4751 /*
4752  * Fully restores LCPLL, disallowing power down and switching back to LCPLL
4753  * source.
4754  */
4755 static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
4756 {
4757         u32 val;
4758
4759         val = intel_de_read(dev_priv, LCPLL_CTL);
4760
4761         if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
4762                     LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
4763                 return;
4764
4765         /*
4766          * Make sure we're not on PC8 state before disabling PC8, otherwise
4767          * we'll hang the machine. To prevent PC8 state, just enable force_wake.
4768          */
4769         intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
4770
4771         if (val & LCPLL_POWER_DOWN_ALLOW) {
4772                 val &= ~LCPLL_POWER_DOWN_ALLOW;
4773                 intel_de_write(dev_priv, LCPLL_CTL, val);
4774                 intel_de_posting_read(dev_priv, LCPLL_CTL);
4775         }
4776
4777         val = hsw_read_dcomp(dev_priv);
4778         val |= D_COMP_COMP_FORCE;
4779         val &= ~D_COMP_COMP_DISABLE;
4780         hsw_write_dcomp(dev_priv, val);
4781
4782         val = intel_de_read(dev_priv, LCPLL_CTL);
4783         val &= ~LCPLL_PLL_DISABLE;
4784         intel_de_write(dev_priv, LCPLL_CTL, val);
4785
4786         if (intel_de_wait_for_set(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 5))
4787                 drm_err(&dev_priv->drm, "LCPLL not locked yet\n");
4788
4789         if (val & LCPLL_CD_SOURCE_FCLK) {
4790                 val = intel_de_read(dev_priv, LCPLL_CTL);
4791                 val &= ~LCPLL_CD_SOURCE_FCLK;
4792                 intel_de_write(dev_priv, LCPLL_CTL, val);
4793
4794                 if (wait_for_us((intel_de_read(dev_priv, LCPLL_CTL) &
4795                                  LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
4796                         drm_err(&dev_priv->drm,
4797                                 "Switching back to LCPLL failed\n");
4798         }
4799
4800         intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
4801
4802         intel_update_cdclk(dev_priv);
4803         intel_dump_cdclk_config(&dev_priv->cdclk.hw, "Current CDCLK");
4804 }
4805
4806 /*
4807  * Package states C8 and deeper are really deep PC states that can only be
4808  * reached when all the devices on the system allow it, so even if the graphics
4809  * device allows PC8+, it doesn't mean the system will actually get to these
4810  * states. Our driver only allows PC8+ when going into runtime PM.
4811  *
4812  * The requirements for PC8+ are that all the outputs are disabled, the power
4813  * well is disabled and most interrupts are disabled, and these are also
4814  * requirements for runtime PM. When these conditions are met, we manually do
4815  * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
4816  * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
4817  * hang the machine.
4818  *
4819  * When we really reach PC8 or deeper states (not just when we allow it) we lose
4820  * the state of some registers, so when we come back from PC8+ we need to
4821  * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
4822  * need to take care of the registers kept by RC6. Notice that this happens even
4823  * if we don't put the device in PCI D3 state (which is what currently happens
4824  * because of the runtime PM support).
4825  *
4826  * For more, read "Display Sequences for Package C8" on the hardware
4827  * documentation.
4828  */
4829 static void hsw_enable_pc8(struct drm_i915_private *dev_priv)
4830 {
4831         u32 val;
4832
4833         drm_dbg_kms(&dev_priv->drm, "Enabling package C8+\n");
4834
4835         if (HAS_PCH_LPT_LP(dev_priv)) {
4836                 val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D);
4837                 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
4838                 intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val);
4839         }
4840
4841         lpt_disable_clkout_dp(dev_priv);
4842         hsw_disable_lcpll(dev_priv, true, true);
4843 }
4844
4845 static void hsw_disable_pc8(struct drm_i915_private *dev_priv)
4846 {
4847         u32 val;
4848
4849         drm_dbg_kms(&dev_priv->drm, "Disabling package C8+\n");
4850
4851         hsw_restore_lcpll(dev_priv);
4852         intel_init_pch_refclk(dev_priv);
4853
4854         if (HAS_PCH_LPT_LP(dev_priv)) {
4855                 val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D);
4856                 val |= PCH_LP_PARTITION_LEVEL_DISABLE;
4857                 intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val);
4858         }
4859 }
4860
4861 static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
4862                                       bool enable)
4863 {
4864         i915_reg_t reg;
4865         u32 reset_bits, val;
4866
4867         if (IS_IVYBRIDGE(dev_priv)) {
4868                 reg = GEN7_MSG_CTL;
4869                 reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK;
4870         } else {
4871                 reg = HSW_NDE_RSTWRN_OPT;
4872                 reset_bits = RESET_PCH_HANDSHAKE_ENABLE;
4873         }
4874
4875         val = intel_de_read(dev_priv, reg);
4876
4877         if (enable)
4878                 val |= reset_bits;
4879         else
4880                 val &= ~reset_bits;
4881
4882         intel_de_write(dev_priv, reg, val);
4883 }
4884
4885 static void skl_display_core_init(struct drm_i915_private *dev_priv,
4886                                   bool resume)
4887 {
4888         struct i915_power_domains *power_domains = &dev_priv->power_domains;
4889         struct i915_power_well *well;
4890
4891         gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
4892
4893         /* enable PCH reset handshake */
4894         intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
4895
4896         /* enable PG1 and Misc I/O */
4897         mutex_lock(&power_domains->lock);
4898
4899         well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4900         intel_power_well_enable(dev_priv, well);
4901
4902         well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
4903         intel_power_well_enable(dev_priv, well);
4904
4905         mutex_unlock(&power_domains->lock);
4906
4907         intel_cdclk_init_hw(dev_priv);
4908
4909         gen9_dbuf_enable(dev_priv);
4910
4911         if (resume && dev_priv->csr.dmc_payload)
4912                 intel_csr_load_program(dev_priv);
4913 }
4914
4915 static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
4916 {
4917         struct i915_power_domains *power_domains = &dev_priv->power_domains;
4918         struct i915_power_well *well;
4919
4920         gen9_disable_dc_states(dev_priv);
4921
4922         gen9_dbuf_disable(dev_priv);
4923
4924         intel_cdclk_uninit_hw(dev_priv);
4925
4926         /* The spec doesn't call for removing the reset handshake flag */
4927         /* disable PG1 and Misc I/O */
4928
4929         mutex_lock(&power_domains->lock);
4930
4931         /*
4932          * BSpec says to keep the MISC IO power well enabled here, only
4933          * remove our request for power well 1.
4934          * Note that even though the driver's request is removed power well 1
4935          * may stay enabled after this due to DMC's own request on it.
4936          */
4937         well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4938         intel_power_well_disable(dev_priv, well);
4939
4940         mutex_unlock(&power_domains->lock);
4941
4942         usleep_range(10, 30);           /* 10 us delay per Bspec */
4943 }
4944
4945 static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume)
4946 {
4947         struct i915_power_domains *power_domains = &dev_priv->power_domains;
4948         struct i915_power_well *well;
4949
4950         gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
4951
4952         /*
4953          * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
4954          * or else the reset will hang because there is no PCH to respond.
4955          * Move the handshake programming to initialization sequence.
4956          * Previously was left up to BIOS.
4957          */
4958         intel_pch_reset_handshake(dev_priv, false);
4959
4960         /* Enable PG1 */
4961         mutex_lock(&power_domains->lock);
4962
4963         well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4964         intel_power_well_enable(dev_priv, well);
4965
4966         mutex_unlock(&power_domains->lock);
4967
4968         intel_cdclk_init_hw(dev_priv);
4969
4970         gen9_dbuf_enable(dev_priv);
4971
4972         if (resume && dev_priv->csr.dmc_payload)
4973                 intel_csr_load_program(dev_priv);
4974 }
4975
4976 static void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
4977 {
4978         struct i915_power_domains *power_domains = &dev_priv->power_domains;
4979         struct i915_power_well *well;
4980
4981         gen9_disable_dc_states(dev_priv);
4982
4983         gen9_dbuf_disable(dev_priv);
4984
4985         intel_cdclk_uninit_hw(dev_priv);
4986
4987         /* The spec doesn't call for removing the reset handshake flag */
4988
4989         /*
4990          * Disable PW1 (PG1).
4991          * Note that even though the driver's request is removed power well 1
4992          * may stay enabled after this due to DMC's own request on it.
4993          */
4994         mutex_lock(&power_domains->lock);
4995
4996         well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4997         intel_power_well_disable(dev_priv, well);
4998
4999         mutex_unlock(&power_domains->lock);
5000
5001         usleep_range(10, 30);           /* 10 us delay per Bspec */
5002 }
5003
5004 static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume)
5005 {
5006         struct i915_power_domains *power_domains = &dev_priv->power_domains;
5007         struct i915_power_well *well;
5008
5009         gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
5010
5011         /* 1. Enable PCH Reset Handshake */
5012         intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
5013
5014         /* 2-3. */
5015         intel_combo_phy_init(dev_priv);
5016
5017         /*
5018          * 4. Enable Power Well 1 (PG1).
5019          *    The AUX IO power wells will be enabled on demand.
5020          */
5021         mutex_lock(&power_domains->lock);
5022         well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5023         intel_power_well_enable(dev_priv, well);
5024         mutex_unlock(&power_domains->lock);
5025
5026         /* 5. Enable CD clock */
5027         intel_cdclk_init_hw(dev_priv);
5028
5029         /* 6. Enable DBUF */
5030         gen9_dbuf_enable(dev_priv);
5031
5032         if (resume && dev_priv->csr.dmc_payload)
5033                 intel_csr_load_program(dev_priv);
5034 }
5035
5036 static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
5037 {
5038         struct i915_power_domains *power_domains = &dev_priv->power_domains;
5039         struct i915_power_well *well;
5040
5041         gen9_disable_dc_states(dev_priv);
5042
5043         /* 1. Disable all display engine functions -> aready done */
5044
5045         /* 2. Disable DBUF */
5046         gen9_dbuf_disable(dev_priv);
5047
5048         /* 3. Disable CD clock */
5049         intel_cdclk_uninit_hw(dev_priv);
5050
5051         /*
5052          * 4. Disable Power Well 1 (PG1).
5053          *    The AUX IO power wells are toggled on demand, so they are already
5054          *    disabled at this point.
5055          */
5056         mutex_lock(&power_domains->lock);
5057         well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5058         intel_power_well_disable(dev_priv, well);
5059         mutex_unlock(&power_domains->lock);
5060
5061         usleep_range(10, 30);           /* 10 us delay per Bspec */
5062
5063         /* 5. */
5064         intel_combo_phy_uninit(dev_priv);
5065 }
5066
5067 struct buddy_page_mask {
5068         u32 page_mask;
5069         u8 type;
5070         u8 num_channels;
5071 };
5072
5073 static const struct buddy_page_mask tgl_buddy_page_masks[] = {
5074         { .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0xE },
5075         { .num_channels = 1, .type = INTEL_DRAM_DDR4,   .page_mask = 0xF },
5076         { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1C },
5077         { .num_channels = 2, .type = INTEL_DRAM_DDR4,   .page_mask = 0x1F },
5078         {}
5079 };
5080
5081 static const struct buddy_page_mask wa_1409767108_buddy_page_masks[] = {
5082         { .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1 },
5083         { .num_channels = 1, .type = INTEL_DRAM_DDR4,   .page_mask = 0x1 },
5084         { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x3 },
5085         { .num_channels = 2, .type = INTEL_DRAM_DDR4,   .page_mask = 0x3 },
5086         {}
5087 };
5088
5089 static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv)
5090 {
5091         enum intel_dram_type type = dev_priv->dram_info.type;
5092         u8 num_channels = dev_priv->dram_info.num_channels;
5093         const struct buddy_page_mask *table;
5094         int i;
5095
5096         if (IS_TGL_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_B0))
5097                 /* Wa_1409767108: tgl */
5098                 table = wa_1409767108_buddy_page_masks;
5099         else
5100                 table = tgl_buddy_page_masks;
5101
5102         for (i = 0; table[i].page_mask != 0; i++)
5103                 if (table[i].num_channels == num_channels &&
5104                     table[i].type == type)
5105                         break;
5106
5107         if (table[i].page_mask == 0) {
5108                 drm_dbg(&dev_priv->drm,
5109                         "Unknown memory configuration; disabling address buddy logic.\n");
5110                 intel_de_write(dev_priv, BW_BUDDY1_CTL, BW_BUDDY_DISABLE);
5111                 intel_de_write(dev_priv, BW_BUDDY2_CTL, BW_BUDDY_DISABLE);
5112         } else {
5113                 intel_de_write(dev_priv, BW_BUDDY1_PAGE_MASK,
5114                                table[i].page_mask);
5115                 intel_de_write(dev_priv, BW_BUDDY2_PAGE_MASK,
5116                                table[i].page_mask);
5117
5118                 /* Wa_22010178259:tgl */
5119                 intel_de_rmw(dev_priv, BW_BUDDY1_CTL,
5120                              BW_BUDDY_TLB_REQ_TIMER_MASK,
5121                              REG_FIELD_PREP(BW_BUDDY_TLB_REQ_TIMER_MASK, 0x8));
5122                 intel_de_rmw(dev_priv, BW_BUDDY2_CTL,
5123                              BW_BUDDY_TLB_REQ_TIMER_MASK,
5124                              REG_FIELD_PREP(BW_BUDDY_TLB_REQ_TIMER_MASK, 0x8));
5125         }
5126 }
5127
5128 static void icl_display_core_init(struct drm_i915_private *dev_priv,
5129                                   bool resume)
5130 {
5131         struct i915_power_domains *power_domains = &dev_priv->power_domains;
5132         struct i915_power_well *well;
5133
5134         gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
5135
5136         /* 1. Enable PCH reset handshake. */
5137         intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
5138
5139         /* 2. Initialize all combo phys */
5140         intel_combo_phy_init(dev_priv);
5141
5142         /*
5143          * 3. Enable Power Well 1 (PG1).
5144          *    The AUX IO power wells will be enabled on demand.
5145          */
5146         mutex_lock(&power_domains->lock);
5147         well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5148         intel_power_well_enable(dev_priv, well);
5149         mutex_unlock(&power_domains->lock);
5150
5151         /* 4. Enable CDCLK. */
5152         intel_cdclk_init_hw(dev_priv);
5153
5154         /* 5. Enable DBUF. */
5155         icl_dbuf_enable(dev_priv);
5156
5157         /* 6. Setup MBUS. */
5158         icl_mbus_init(dev_priv);
5159
5160         /* 7. Program arbiter BW_BUDDY registers */
5161         if (INTEL_GEN(dev_priv) >= 12)
5162                 tgl_bw_buddy_init(dev_priv);
5163
5164         if (resume && dev_priv->csr.dmc_payload)
5165                 intel_csr_load_program(dev_priv);
5166 }
5167
5168 static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
5169 {
5170         struct i915_power_domains *power_domains = &dev_priv->power_domains;
5171         struct i915_power_well *well;
5172
5173         gen9_disable_dc_states(dev_priv);
5174
5175         /* 1. Disable all display engine functions -> aready done */
5176
5177         /* 2. Disable DBUF */
5178         icl_dbuf_disable(dev_priv);
5179
5180         /* 3. Disable CD clock */
5181         intel_cdclk_uninit_hw(dev_priv);
5182
5183         /*
5184          * 4. Disable Power Well 1 (PG1).
5185          *    The AUX IO power wells are toggled on demand, so they are already
5186          *    disabled at this point.
5187          */
5188         mutex_lock(&power_domains->lock);
5189         well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5190         intel_power_well_disable(dev_priv, well);
5191         mutex_unlock(&power_domains->lock);
5192
5193         /* 5. */
5194         intel_combo_phy_uninit(dev_priv);
5195 }
5196
5197 static void chv_phy_control_init(struct drm_i915_private *dev_priv)
5198 {
5199         struct i915_power_well *cmn_bc =
5200                 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
5201         struct i915_power_well *cmn_d =
5202                 lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
5203
5204         /*
5205          * DISPLAY_PHY_CONTROL can get corrupted if read. As a
5206          * workaround never ever read DISPLAY_PHY_CONTROL, and
5207          * instead maintain a shadow copy ourselves. Use the actual
5208          * power well state and lane status to reconstruct the
5209          * expected initial value.
5210          */
5211         dev_priv->chv_phy_control =
5212                 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
5213                 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
5214                 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
5215                 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
5216                 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
5217
5218         /*
5219          * If all lanes are disabled we leave the override disabled
5220          * with all power down bits cleared to match the state we
5221          * would use after disabling the port. Otherwise enable the
5222          * override and set the lane powerdown bits accding to the
5223          * current lane status.
5224          */
5225         if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
5226                 u32 status = intel_de_read(dev_priv, DPLL(PIPE_A));
5227                 unsigned int mask;
5228
5229                 mask = status & DPLL_PORTB_READY_MASK;
5230                 if (mask == 0xf)
5231                         mask = 0x0;
5232                 else
5233                         dev_priv->chv_phy_control |=
5234                                 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
5235
5236                 dev_priv->chv_phy_control |=
5237                         PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
5238
5239                 mask = (status & DPLL_PORTC_READY_MASK) >> 4;
5240                 if (mask == 0xf)
5241                         mask = 0x0;
5242                 else
5243                         dev_priv->chv_phy_control |=
5244                                 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
5245
5246                 dev_priv->chv_phy_control |=
5247                         PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
5248
5249                 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
5250
5251                 dev_priv->chv_phy_assert[DPIO_PHY0] = false;
5252         } else {
5253                 dev_priv->chv_phy_assert[DPIO_PHY0] = true;
5254         }
5255
5256         if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
5257                 u32 status = intel_de_read(dev_priv, DPIO_PHY_STATUS);
5258                 unsigned int mask;
5259
5260                 mask = status & DPLL_PORTD_READY_MASK;
5261
5262                 if (mask == 0xf)
5263                         mask = 0x0;
5264                 else
5265                         dev_priv->chv_phy_control |=
5266                                 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
5267
5268                 dev_priv->chv_phy_control |=
5269                         PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
5270
5271                 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
5272
5273                 dev_priv->chv_phy_assert[DPIO_PHY1] = false;
5274         } else {
5275                 dev_priv->chv_phy_assert[DPIO_PHY1] = true;
5276         }
5277
5278         drm_dbg_kms(&dev_priv->drm, "Initial PHY_CONTROL=0x%08x\n",
5279                     dev_priv->chv_phy_control);
5280
5281         /* Defer application of initial phy_control to enabling the powerwell */
5282 }
5283
5284 static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
5285 {
5286         struct i915_power_well *cmn =
5287                 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
5288         struct i915_power_well *disp2d =
5289                 lookup_power_well(dev_priv, VLV_DISP_PW_DISP2D);
5290
5291         /* If the display might be already active skip this */
5292         if (cmn->desc->ops->is_enabled(dev_priv, cmn) &&
5293             disp2d->desc->ops->is_enabled(dev_priv, disp2d) &&
5294             intel_de_read(dev_priv, DPIO_CTL) & DPIO_CMNRST)
5295                 return;
5296
5297         drm_dbg_kms(&dev_priv->drm, "toggling display PHY side reset\n");
5298
5299         /* cmnlane needs DPLL registers */
5300         disp2d->desc->ops->enable(dev_priv, disp2d);
5301
5302         /*
5303          * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
5304          * Need to assert and de-assert PHY SB reset by gating the
5305          * common lane power, then un-gating it.
5306          * Simply ungating isn't enough to reset the PHY enough to get
5307          * ports and lanes running.
5308          */
5309         cmn->desc->ops->disable(dev_priv, cmn);
5310 }
5311
5312 static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0)
5313 {
5314         bool ret;
5315
5316         vlv_punit_get(dev_priv);
5317         ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE;
5318         vlv_punit_put(dev_priv);
5319
5320         return ret;
5321 }
5322
5323 static void assert_ved_power_gated(struct drm_i915_private *dev_priv)
5324 {
5325         drm_WARN(&dev_priv->drm,
5326                  !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0),
5327                  "VED not power gated\n");
5328 }
5329
5330 static void assert_isp_power_gated(struct drm_i915_private *dev_priv)
5331 {
5332         static const struct pci_device_id isp_ids[] = {
5333                 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38)},
5334                 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x22b8)},
5335                 {}
5336         };
5337
5338         drm_WARN(&dev_priv->drm, !pci_dev_present(isp_ids) &&
5339                  !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0),
5340                  "ISP not power gated\n");
5341 }
5342
5343 static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
5344
5345 /**
5346  * intel_power_domains_init_hw - initialize hardware power domain state
5347  * @i915: i915 device instance
5348  * @resume: Called from resume code paths or not
5349  *
5350  * This function initializes the hardware power domain state and enables all
5351  * power wells belonging to the INIT power domain. Power wells in other
5352  * domains (and not in the INIT domain) are referenced or disabled by
5353  * intel_modeset_readout_hw_state(). After that the reference count of each
5354  * power well must match its HW enabled state, see
5355  * intel_power_domains_verify_state().
5356  *
5357  * It will return with power domains disabled (to be enabled later by
5358  * intel_power_domains_enable()) and must be paired with
5359  * intel_power_domains_driver_remove().
5360  */
5361 void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
5362 {
5363         struct i915_power_domains *power_domains = &i915->power_domains;
5364
5365         power_domains->initializing = true;
5366
5367         if (INTEL_GEN(i915) >= 11) {
5368                 icl_display_core_init(i915, resume);
5369         } else if (IS_CANNONLAKE(i915)) {
5370                 cnl_display_core_init(i915, resume);
5371         } else if (IS_GEN9_BC(i915)) {
5372                 skl_display_core_init(i915, resume);
5373         } else if (IS_GEN9_LP(i915)) {
5374                 bxt_display_core_init(i915, resume);
5375         } else if (IS_CHERRYVIEW(i915)) {
5376                 mutex_lock(&power_domains->lock);
5377                 chv_phy_control_init(i915);
5378                 mutex_unlock(&power_domains->lock);
5379                 assert_isp_power_gated(i915);
5380         } else if (IS_VALLEYVIEW(i915)) {
5381                 mutex_lock(&power_domains->lock);
5382                 vlv_cmnlane_wa(i915);
5383                 mutex_unlock(&power_domains->lock);
5384                 assert_ved_power_gated(i915);
5385                 assert_isp_power_gated(i915);
5386         } else if (IS_BROADWELL(i915) || IS_HASWELL(i915)) {
5387                 hsw_assert_cdclk(i915);
5388                 intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
5389         } else if (IS_IVYBRIDGE(i915)) {
5390                 intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
5391         }
5392
5393         /*
5394          * Keep all power wells enabled for any dependent HW access during
5395          * initialization and to make sure we keep BIOS enabled display HW
5396          * resources powered until display HW readout is complete. We drop
5397          * this reference in intel_power_domains_enable().
5398          */
5399         power_domains->wakeref =
5400                 intel_display_power_get(i915, POWER_DOMAIN_INIT);
5401
5402         /* Disable power support if the user asked so. */
5403         if (!i915_modparams.disable_power_well)
5404                 intel_display_power_get(i915, POWER_DOMAIN_INIT);
5405         intel_power_domains_sync_hw(i915);
5406
5407         power_domains->initializing = false;
5408 }
5409
5410 /**
5411  * intel_power_domains_driver_remove - deinitialize hw power domain state
5412  * @i915: i915 device instance
5413  *
5414  * De-initializes the display power domain HW state. It also ensures that the
5415  * device stays powered up so that the driver can be reloaded.
5416  *
5417  * It must be called with power domains already disabled (after a call to
5418  * intel_power_domains_disable()) and must be paired with
5419  * intel_power_domains_init_hw().
5420  */
5421 void intel_power_domains_driver_remove(struct drm_i915_private *i915)
5422 {
5423         intel_wakeref_t wakeref __maybe_unused =
5424                 fetch_and_zero(&i915->power_domains.wakeref);
5425
5426         /* Remove the refcount we took to keep power well support disabled. */
5427         if (!i915_modparams.disable_power_well)
5428                 intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
5429
5430         intel_display_power_flush_work_sync(i915);
5431
5432         intel_power_domains_verify_state(i915);
5433
5434         /* Keep the power well enabled, but cancel its rpm wakeref. */
5435         intel_runtime_pm_put(&i915->runtime_pm, wakeref);
5436 }
5437
5438 /**
5439  * intel_power_domains_enable - enable toggling of display power wells
5440  * @i915: i915 device instance
5441  *
5442  * Enable the ondemand enabling/disabling of the display power wells. Note that
5443  * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled
5444  * only at specific points of the display modeset sequence, thus they are not
5445  * affected by the intel_power_domains_enable()/disable() calls. The purpose
5446  * of these function is to keep the rest of power wells enabled until the end
5447  * of display HW readout (which will acquire the power references reflecting
5448  * the current HW state).
5449  */
5450 void intel_power_domains_enable(struct drm_i915_private *i915)
5451 {
5452         intel_wakeref_t wakeref __maybe_unused =
5453                 fetch_and_zero(&i915->power_domains.wakeref);
5454
5455         intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
5456         intel_power_domains_verify_state(i915);
5457 }
5458
5459 /**
5460  * intel_power_domains_disable - disable toggling of display power wells
5461  * @i915: i915 device instance
5462  *
5463  * Disable the ondemand enabling/disabling of the display power wells. See
5464  * intel_power_domains_enable() for which power wells this call controls.
5465  */
5466 void intel_power_domains_disable(struct drm_i915_private *i915)
5467 {
5468         struct i915_power_domains *power_domains = &i915->power_domains;
5469
5470         drm_WARN_ON(&i915->drm, power_domains->wakeref);
5471         power_domains->wakeref =
5472                 intel_display_power_get(i915, POWER_DOMAIN_INIT);
5473
5474         intel_power_domains_verify_state(i915);
5475 }
5476
5477 /**
5478  * intel_power_domains_suspend - suspend power domain state
5479  * @i915: i915 device instance
5480  * @suspend_mode: specifies the target suspend state (idle, mem, hibernation)
5481  *
5482  * This function prepares the hardware power domain state before entering
5483  * system suspend.
5484  *
5485  * It must be called with power domains already disabled (after a call to
5486  * intel_power_domains_disable()) and paired with intel_power_domains_resume().
5487  */
5488 void intel_power_domains_suspend(struct drm_i915_private *i915,
5489                                  enum i915_drm_suspend_mode suspend_mode)
5490 {
5491         struct i915_power_domains *power_domains = &i915->power_domains;
5492         intel_wakeref_t wakeref __maybe_unused =
5493                 fetch_and_zero(&power_domains->wakeref);
5494
5495         intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
5496
5497         /*
5498          * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9
5499          * support don't manually deinit the power domains. This also means the
5500          * CSR/DMC firmware will stay active, it will power down any HW
5501          * resources as required and also enable deeper system power states
5502          * that would be blocked if the firmware was inactive.
5503          */
5504         if (!(i915->csr.allowed_dc_mask & DC_STATE_EN_DC9) &&
5505             suspend_mode == I915_DRM_SUSPEND_IDLE &&
5506             i915->csr.dmc_payload) {
5507                 intel_display_power_flush_work(i915);
5508                 intel_power_domains_verify_state(i915);
5509                 return;
5510         }
5511
5512         /*
5513          * Even if power well support was disabled we still want to disable
5514          * power wells if power domains must be deinitialized for suspend.
5515          */
5516         if (!i915_modparams.disable_power_well)
5517                 intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
5518
5519         intel_display_power_flush_work(i915);
5520         intel_power_domains_verify_state(i915);
5521
5522         if (INTEL_GEN(i915) >= 11)
5523                 icl_display_core_uninit(i915);
5524         else if (IS_CANNONLAKE(i915))
5525                 cnl_display_core_uninit(i915);
5526         else if (IS_GEN9_BC(i915))
5527                 skl_display_core_uninit(i915);
5528         else if (IS_GEN9_LP(i915))
5529                 bxt_display_core_uninit(i915);
5530
5531         power_domains->display_core_suspended = true;
5532 }
5533
5534 /**
5535  * intel_power_domains_resume - resume power domain state
5536  * @i915: i915 device instance
5537  *
5538  * This function resume the hardware power domain state during system resume.
5539  *
5540  * It will return with power domain support disabled (to be enabled later by
5541  * intel_power_domains_enable()) and must be paired with
5542  * intel_power_domains_suspend().
5543  */
5544 void intel_power_domains_resume(struct drm_i915_private *i915)
5545 {
5546         struct i915_power_domains *power_domains = &i915->power_domains;
5547
5548         if (power_domains->display_core_suspended) {
5549                 intel_power_domains_init_hw(i915, true);
5550                 power_domains->display_core_suspended = false;
5551         } else {
5552                 drm_WARN_ON(&i915->drm, power_domains->wakeref);
5553                 power_domains->wakeref =
5554                         intel_display_power_get(i915, POWER_DOMAIN_INIT);
5555         }
5556
5557         intel_power_domains_verify_state(i915);
5558 }
5559
5560 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
5561
5562 static void intel_power_domains_dump_info(struct drm_i915_private *i915)
5563 {
5564         struct i915_power_domains *power_domains = &i915->power_domains;
5565         struct i915_power_well *power_well;
5566
5567         for_each_power_well(i915, power_well) {
5568                 enum intel_display_power_domain domain;
5569
5570                 drm_dbg(&i915->drm, "%-25s %d\n",
5571                         power_well->desc->name, power_well->count);
5572
5573                 for_each_power_domain(domain, power_well->desc->domains)
5574                         drm_dbg(&i915->drm, "  %-23s %d\n",
5575                                 intel_display_power_domain_str(domain),
5576                                 power_domains->domain_use_count[domain]);
5577         }
5578 }
5579
5580 /**
5581  * intel_power_domains_verify_state - verify the HW/SW state for all power wells
5582  * @i915: i915 device instance
5583  *
5584  * Verify if the reference count of each power well matches its HW enabled
5585  * state and the total refcount of the domains it belongs to. This must be
5586  * called after modeset HW state sanitization, which is responsible for
5587  * acquiring reference counts for any power wells in use and disabling the
5588  * ones left on by BIOS but not required by any active output.
5589  */
5590 static void intel_power_domains_verify_state(struct drm_i915_private *i915)
5591 {
5592         struct i915_power_domains *power_domains = &i915->power_domains;
5593         struct i915_power_well *power_well;
5594         bool dump_domain_info;
5595
5596         mutex_lock(&power_domains->lock);
5597
5598         verify_async_put_domains_state(power_domains);
5599
5600         dump_domain_info = false;
5601         for_each_power_well(i915, power_well) {
5602                 enum intel_display_power_domain domain;
5603                 int domains_count;
5604                 bool enabled;
5605
5606                 enabled = power_well->desc->ops->is_enabled(i915, power_well);
5607                 if ((power_well->count || power_well->desc->always_on) !=
5608                     enabled)
5609                         drm_err(&i915->drm,
5610                                 "power well %s state mismatch (refcount %d/enabled %d)",
5611                                 power_well->desc->name,
5612                                 power_well->count, enabled);
5613
5614                 domains_count = 0;
5615                 for_each_power_domain(domain, power_well->desc->domains)
5616                         domains_count += power_domains->domain_use_count[domain];
5617
5618                 if (power_well->count != domains_count) {
5619                         drm_err(&i915->drm,
5620                                 "power well %s refcount/domain refcount mismatch "
5621                                 "(refcount %d/domains refcount %d)\n",
5622                                 power_well->desc->name, power_well->count,
5623                                 domains_count);
5624                         dump_domain_info = true;
5625                 }
5626         }
5627
5628         if (dump_domain_info) {
5629                 static bool dumped;
5630
5631                 if (!dumped) {
5632                         intel_power_domains_dump_info(i915);
5633                         dumped = true;
5634                 }
5635         }
5636
5637         mutex_unlock(&power_domains->lock);
5638 }
5639
5640 #else
5641
5642 static void intel_power_domains_verify_state(struct drm_i915_private *i915)
5643 {
5644 }
5645
5646 #endif
5647
5648 void intel_display_power_suspend_late(struct drm_i915_private *i915)
5649 {
5650         if (INTEL_GEN(i915) >= 11 || IS_GEN9_LP(i915))
5651                 bxt_enable_dc9(i915);
5652         else if (IS_HASWELL(i915) || IS_BROADWELL(i915))
5653                 hsw_enable_pc8(i915);
5654 }
5655
5656 void intel_display_power_resume_early(struct drm_i915_private *i915)
5657 {
5658         if (INTEL_GEN(i915) >= 11 || IS_GEN9_LP(i915)) {
5659                 gen9_sanitize_dc_state(i915);
5660                 bxt_disable_dc9(i915);
5661         } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
5662                 hsw_disable_pc8(i915);
5663         }
5664 }
5665
5666 void intel_display_power_suspend(struct drm_i915_private *i915)
5667 {
5668         if (INTEL_GEN(i915) >= 11) {
5669                 icl_display_core_uninit(i915);
5670                 bxt_enable_dc9(i915);
5671         } else if (IS_GEN9_LP(i915)) {
5672                 bxt_display_core_uninit(i915);
5673                 bxt_enable_dc9(i915);
5674         } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
5675                 hsw_enable_pc8(i915);
5676         }
5677 }
5678
5679 void intel_display_power_resume(struct drm_i915_private *i915)
5680 {
5681         if (INTEL_GEN(i915) >= 11) {
5682                 bxt_disable_dc9(i915);
5683                 icl_display_core_init(i915, true);
5684                 if (i915->csr.dmc_payload) {
5685                         if (i915->csr.allowed_dc_mask &
5686                             DC_STATE_EN_UPTO_DC6)
5687                                 skl_enable_dc6(i915);
5688                         else if (i915->csr.allowed_dc_mask &
5689                                  DC_STATE_EN_UPTO_DC5)
5690                                 gen9_enable_dc5(i915);
5691                 }
5692         } else if (IS_GEN9_LP(i915)) {
5693                 bxt_disable_dc9(i915);
5694                 bxt_display_core_init(i915, true);
5695                 if (i915->csr.dmc_payload &&
5696                     (i915->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
5697                         gen9_enable_dc5(i915);
5698         } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
5699                 hsw_disable_pc8(i915);
5700         }
5701 }