fa46ea6c6e0175ee7e81812f1d4e44b68fcac8f7
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / display / intel_display_power.c
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5
6 #include "display/intel_crt.h"
7 #include "display/intel_dp.h"
8
9 #include "i915_drv.h"
10 #include "i915_irq.h"
11 #include "intel_cdclk.h"
12 #include "intel_combo_phy.h"
13 #include "intel_csr.h"
14 #include "intel_display_power.h"
15 #include "intel_display_types.h"
16 #include "intel_dpio_phy.h"
17 #include "intel_hotplug.h"
18 #include "intel_pm.h"
19 #include "intel_sideband.h"
20 #include "intel_tc.h"
21 #include "intel_vga.h"
22
23 static const struct i915_power_well_ops icl_tc_phy_aux_power_well_ops;
24
25 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
26                                          enum i915_power_well_id power_well_id);
27
28 const char *
29 intel_display_power_domain_str(enum intel_display_power_domain domain)
30 {
31         switch (domain) {
32         case POWER_DOMAIN_DISPLAY_CORE:
33                 return "DISPLAY_CORE";
34         case POWER_DOMAIN_PIPE_A:
35                 return "PIPE_A";
36         case POWER_DOMAIN_PIPE_B:
37                 return "PIPE_B";
38         case POWER_DOMAIN_PIPE_C:
39                 return "PIPE_C";
40         case POWER_DOMAIN_PIPE_D:
41                 return "PIPE_D";
42         case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
43                 return "PIPE_A_PANEL_FITTER";
44         case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
45                 return "PIPE_B_PANEL_FITTER";
46         case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
47                 return "PIPE_C_PANEL_FITTER";
48         case POWER_DOMAIN_PIPE_D_PANEL_FITTER:
49                 return "PIPE_D_PANEL_FITTER";
50         case POWER_DOMAIN_TRANSCODER_A:
51                 return "TRANSCODER_A";
52         case POWER_DOMAIN_TRANSCODER_B:
53                 return "TRANSCODER_B";
54         case POWER_DOMAIN_TRANSCODER_C:
55                 return "TRANSCODER_C";
56         case POWER_DOMAIN_TRANSCODER_D:
57                 return "TRANSCODER_D";
58         case POWER_DOMAIN_TRANSCODER_EDP:
59                 return "TRANSCODER_EDP";
60         case POWER_DOMAIN_TRANSCODER_VDSC_PW2:
61                 return "TRANSCODER_VDSC_PW2";
62         case POWER_DOMAIN_TRANSCODER_DSI_A:
63                 return "TRANSCODER_DSI_A";
64         case POWER_DOMAIN_TRANSCODER_DSI_C:
65                 return "TRANSCODER_DSI_C";
66         case POWER_DOMAIN_PORT_DDI_A_LANES:
67                 return "PORT_DDI_A_LANES";
68         case POWER_DOMAIN_PORT_DDI_B_LANES:
69                 return "PORT_DDI_B_LANES";
70         case POWER_DOMAIN_PORT_DDI_C_LANES:
71                 return "PORT_DDI_C_LANES";
72         case POWER_DOMAIN_PORT_DDI_D_LANES:
73                 return "PORT_DDI_D_LANES";
74         case POWER_DOMAIN_PORT_DDI_E_LANES:
75                 return "PORT_DDI_E_LANES";
76         case POWER_DOMAIN_PORT_DDI_F_LANES:
77                 return "PORT_DDI_F_LANES";
78         case POWER_DOMAIN_PORT_DDI_G_LANES:
79                 return "PORT_DDI_G_LANES";
80         case POWER_DOMAIN_PORT_DDI_H_LANES:
81                 return "PORT_DDI_H_LANES";
82         case POWER_DOMAIN_PORT_DDI_I_LANES:
83                 return "PORT_DDI_I_LANES";
84         case POWER_DOMAIN_PORT_DDI_A_IO:
85                 return "PORT_DDI_A_IO";
86         case POWER_DOMAIN_PORT_DDI_B_IO:
87                 return "PORT_DDI_B_IO";
88         case POWER_DOMAIN_PORT_DDI_C_IO:
89                 return "PORT_DDI_C_IO";
90         case POWER_DOMAIN_PORT_DDI_D_IO:
91                 return "PORT_DDI_D_IO";
92         case POWER_DOMAIN_PORT_DDI_E_IO:
93                 return "PORT_DDI_E_IO";
94         case POWER_DOMAIN_PORT_DDI_F_IO:
95                 return "PORT_DDI_F_IO";
96         case POWER_DOMAIN_PORT_DDI_G_IO:
97                 return "PORT_DDI_G_IO";
98         case POWER_DOMAIN_PORT_DDI_H_IO:
99                 return "PORT_DDI_H_IO";
100         case POWER_DOMAIN_PORT_DDI_I_IO:
101                 return "PORT_DDI_I_IO";
102         case POWER_DOMAIN_PORT_DSI:
103                 return "PORT_DSI";
104         case POWER_DOMAIN_PORT_CRT:
105                 return "PORT_CRT";
106         case POWER_DOMAIN_PORT_OTHER:
107                 return "PORT_OTHER";
108         case POWER_DOMAIN_VGA:
109                 return "VGA";
110         case POWER_DOMAIN_AUDIO:
111                 return "AUDIO";
112         case POWER_DOMAIN_AUX_A:
113                 return "AUX_A";
114         case POWER_DOMAIN_AUX_B:
115                 return "AUX_B";
116         case POWER_DOMAIN_AUX_C:
117                 return "AUX_C";
118         case POWER_DOMAIN_AUX_D:
119                 return "AUX_D";
120         case POWER_DOMAIN_AUX_E:
121                 return "AUX_E";
122         case POWER_DOMAIN_AUX_F:
123                 return "AUX_F";
124         case POWER_DOMAIN_AUX_G:
125                 return "AUX_G";
126         case POWER_DOMAIN_AUX_H:
127                 return "AUX_H";
128         case POWER_DOMAIN_AUX_I:
129                 return "AUX_I";
130         case POWER_DOMAIN_AUX_IO_A:
131                 return "AUX_IO_A";
132         case POWER_DOMAIN_AUX_C_TBT:
133                 return "AUX_C_TBT";
134         case POWER_DOMAIN_AUX_D_TBT:
135                 return "AUX_D_TBT";
136         case POWER_DOMAIN_AUX_E_TBT:
137                 return "AUX_E_TBT";
138         case POWER_DOMAIN_AUX_F_TBT:
139                 return "AUX_F_TBT";
140         case POWER_DOMAIN_AUX_G_TBT:
141                 return "AUX_G_TBT";
142         case POWER_DOMAIN_AUX_H_TBT:
143                 return "AUX_H_TBT";
144         case POWER_DOMAIN_AUX_I_TBT:
145                 return "AUX_I_TBT";
146         case POWER_DOMAIN_GMBUS:
147                 return "GMBUS";
148         case POWER_DOMAIN_INIT:
149                 return "INIT";
150         case POWER_DOMAIN_MODESET:
151                 return "MODESET";
152         case POWER_DOMAIN_GT_IRQ:
153                 return "GT_IRQ";
154         case POWER_DOMAIN_DPLL_DC_OFF:
155                 return "DPLL_DC_OFF";
156         case POWER_DOMAIN_TC_COLD_OFF:
157                 return "TC_COLD_OFF";
158         default:
159                 MISSING_CASE(domain);
160                 return "?";
161         }
162 }
163
164 static void intel_power_well_enable(struct drm_i915_private *dev_priv,
165                                     struct i915_power_well *power_well)
166 {
167         drm_dbg_kms(&dev_priv->drm, "enabling %s\n", power_well->desc->name);
168         power_well->desc->ops->enable(dev_priv, power_well);
169         power_well->hw_enabled = true;
170 }
171
172 static void intel_power_well_disable(struct drm_i915_private *dev_priv,
173                                      struct i915_power_well *power_well)
174 {
175         drm_dbg_kms(&dev_priv->drm, "disabling %s\n", power_well->desc->name);
176         power_well->hw_enabled = false;
177         power_well->desc->ops->disable(dev_priv, power_well);
178 }
179
180 static void intel_power_well_get(struct drm_i915_private *dev_priv,
181                                  struct i915_power_well *power_well)
182 {
183         if (!power_well->count++)
184                 intel_power_well_enable(dev_priv, power_well);
185 }
186
187 static void intel_power_well_put(struct drm_i915_private *dev_priv,
188                                  struct i915_power_well *power_well)
189 {
190         drm_WARN(&dev_priv->drm, !power_well->count,
191                  "Use count on power well %s is already zero",
192                  power_well->desc->name);
193
194         if (!--power_well->count)
195                 intel_power_well_disable(dev_priv, power_well);
196 }
197
198 /**
199  * __intel_display_power_is_enabled - unlocked check for a power domain
200  * @dev_priv: i915 device instance
201  * @domain: power domain to check
202  *
203  * This is the unlocked version of intel_display_power_is_enabled() and should
204  * only be used from error capture and recovery code where deadlocks are
205  * possible.
206  *
207  * Returns:
208  * True when the power domain is enabled, false otherwise.
209  */
210 bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
211                                       enum intel_display_power_domain domain)
212 {
213         struct i915_power_well *power_well;
214         bool is_enabled;
215
216         if (dev_priv->runtime_pm.suspended)
217                 return false;
218
219         is_enabled = true;
220
221         for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) {
222                 if (power_well->desc->always_on)
223                         continue;
224
225                 if (!power_well->hw_enabled) {
226                         is_enabled = false;
227                         break;
228                 }
229         }
230
231         return is_enabled;
232 }
233
234 /**
235  * intel_display_power_is_enabled - check for a power domain
236  * @dev_priv: i915 device instance
237  * @domain: power domain to check
238  *
239  * This function can be used to check the hw power domain state. It is mostly
240  * used in hardware state readout functions. Everywhere else code should rely
241  * upon explicit power domain reference counting to ensure that the hardware
242  * block is powered up before accessing it.
243  *
244  * Callers must hold the relevant modesetting locks to ensure that concurrent
245  * threads can't disable the power well while the caller tries to read a few
246  * registers.
247  *
248  * Returns:
249  * True when the power domain is enabled, false otherwise.
250  */
251 bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
252                                     enum intel_display_power_domain domain)
253 {
254         struct i915_power_domains *power_domains;
255         bool ret;
256
257         power_domains = &dev_priv->power_domains;
258
259         mutex_lock(&power_domains->lock);
260         ret = __intel_display_power_is_enabled(dev_priv, domain);
261         mutex_unlock(&power_domains->lock);
262
263         return ret;
264 }
265
266 /*
267  * Starting with Haswell, we have a "Power Down Well" that can be turned off
268  * when not needed anymore. We have 4 registers that can request the power well
269  * to be enabled, and it will only be disabled if none of the registers is
270  * requesting it to be enabled.
271  */
272 static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv,
273                                        u8 irq_pipe_mask, bool has_vga)
274 {
275         if (has_vga)
276                 intel_vga_reset_io_mem(dev_priv);
277
278         if (irq_pipe_mask)
279                 gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask);
280 }
281
282 static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv,
283                                        u8 irq_pipe_mask)
284 {
285         if (irq_pipe_mask)
286                 gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask);
287 }
288
289 #define ICL_AUX_PW_TO_CH(pw_idx)        \
290         ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A)
291
292 #define ICL_TBT_AUX_PW_TO_CH(pw_idx)    \
293         ((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C)
294
295 static enum aux_ch icl_tc_phy_aux_ch(struct drm_i915_private *dev_priv,
296                                      struct i915_power_well *power_well)
297 {
298         int pw_idx = power_well->desc->hsw.idx;
299
300         return power_well->desc->hsw.is_tc_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) :
301                                                  ICL_AUX_PW_TO_CH(pw_idx);
302 }
303
304 static struct intel_digital_port *
305 aux_ch_to_digital_port(struct drm_i915_private *dev_priv,
306                        enum aux_ch aux_ch)
307 {
308         struct intel_digital_port *dig_port = NULL;
309         struct intel_encoder *encoder;
310
311         for_each_intel_encoder(&dev_priv->drm, encoder) {
312                 /* We'll check the MST primary port */
313                 if (encoder->type == INTEL_OUTPUT_DP_MST)
314                         continue;
315
316                 dig_port = enc_to_dig_port(encoder);
317                 if (!dig_port)
318                         continue;
319
320                 if (dig_port->aux_ch != aux_ch) {
321                         dig_port = NULL;
322                         continue;
323                 }
324
325                 break;
326         }
327
328         return dig_port;
329 }
330
331 static bool tc_phy_aux_timeout_expected(struct drm_i915_private *dev_priv,
332                                         struct i915_power_well *power_well)
333 {
334         /* An AUX timeout is expected if the TBT DP tunnel is down. */
335         if (power_well->desc->hsw.is_tc_tbt)
336                 return true;
337
338         /*
339          * An AUX timeout is expected because we enable TC legacy port aux
340          * to hold port out of TC cold
341          */
342         if (INTEL_GEN(dev_priv) == 11 &&
343             power_well->desc->ops == &icl_tc_phy_aux_power_well_ops) {
344                 enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
345                 struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch);
346
347                 return dig_port->tc_legacy_port;
348         }
349
350         return false;
351 }
352
353 static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
354                                            struct i915_power_well *power_well)
355 {
356         const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
357         int pw_idx = power_well->desc->hsw.idx;
358
359         /* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
360         if (intel_de_wait_for_set(dev_priv, regs->driver,
361                                   HSW_PWR_WELL_CTL_STATE(pw_idx), 1)) {
362                 drm_dbg_kms(&dev_priv->drm, "%s power well enable timeout\n",
363                             power_well->desc->name);
364
365                 drm_WARN_ON(&dev_priv->drm,
366                             !tc_phy_aux_timeout_expected(dev_priv, power_well));
367
368         }
369 }
370
371 static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv,
372                                      const struct i915_power_well_regs *regs,
373                                      int pw_idx)
374 {
375         u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
376         u32 ret;
377
378         ret = intel_de_read(dev_priv, regs->bios) & req_mask ? 1 : 0;
379         ret |= intel_de_read(dev_priv, regs->driver) & req_mask ? 2 : 0;
380         if (regs->kvmr.reg)
381                 ret |= intel_de_read(dev_priv, regs->kvmr) & req_mask ? 4 : 0;
382         ret |= intel_de_read(dev_priv, regs->debug) & req_mask ? 8 : 0;
383
384         return ret;
385 }
386
387 static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv,
388                                             struct i915_power_well *power_well)
389 {
390         const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
391         int pw_idx = power_well->desc->hsw.idx;
392         bool disabled;
393         u32 reqs;
394
395         /*
396          * Bspec doesn't require waiting for PWs to get disabled, but still do
397          * this for paranoia. The known cases where a PW will be forced on:
398          * - a KVMR request on any power well via the KVMR request register
399          * - a DMC request on PW1 and MISC_IO power wells via the BIOS and
400          *   DEBUG request registers
401          * Skip the wait in case any of the request bits are set and print a
402          * diagnostic message.
403          */
404         wait_for((disabled = !(intel_de_read(dev_priv, regs->driver) &
405                                HSW_PWR_WELL_CTL_STATE(pw_idx))) ||
406                  (reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1);
407         if (disabled)
408                 return;
409
410         drm_dbg_kms(&dev_priv->drm,
411                     "%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n",
412                     power_well->desc->name,
413                     !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8));
414 }
415
416 static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv,
417                                            enum skl_power_gate pg)
418 {
419         /* Timeout 5us for PG#0, for other PGs 1us */
420         drm_WARN_ON(&dev_priv->drm,
421                     intel_de_wait_for_set(dev_priv, SKL_FUSE_STATUS,
422                                           SKL_FUSE_PG_DIST_STATUS(pg), 1));
423 }
424
425 static void hsw_power_well_enable_prepare(struct drm_i915_private *dev_priv,
426                                           struct i915_power_well *power_well)
427 {
428         const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
429         int pw_idx = power_well->desc->hsw.idx;
430         u32 val;
431
432         if (power_well->desc->hsw.has_fuses) {
433                 enum skl_power_gate pg;
434
435                 pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
436                                                  SKL_PW_CTL_IDX_TO_PG(pw_idx);
437                 /*
438                  * For PW1 we have to wait both for the PW0/PG0 fuse state
439                  * before enabling the power well and PW1/PG1's own fuse
440                  * state after the enabling. For all other power wells with
441                  * fuses we only have to wait for that PW/PG's fuse state
442                  * after the enabling.
443                  */
444                 if (pg == SKL_PG1)
445                         gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0);
446         }
447
448         val = intel_de_read(dev_priv, regs->driver);
449         intel_de_write(dev_priv, regs->driver,
450                        val | HSW_PWR_WELL_CTL_REQ(pw_idx));
451 }
452
453 static void hsw_power_well_enable_complete(struct drm_i915_private *dev_priv,
454                                            struct i915_power_well *power_well)
455 {
456         int pw_idx = power_well->desc->hsw.idx;
457
458         hsw_wait_for_power_well_enable(dev_priv, power_well);
459
460         /* Display WA #1178: cnl */
461         if (IS_CANNONLAKE(dev_priv) &&
462             pw_idx >= GLK_PW_CTL_IDX_AUX_B &&
463             pw_idx <= CNL_PW_CTL_IDX_AUX_F) {
464                 u32 val;
465
466                 val = intel_de_read(dev_priv, CNL_AUX_ANAOVRD1(pw_idx));
467                 val |= CNL_AUX_ANAOVRD1_ENABLE | CNL_AUX_ANAOVRD1_LDO_BYPASS;
468                 intel_de_write(dev_priv, CNL_AUX_ANAOVRD1(pw_idx), val);
469         }
470
471         if (power_well->desc->hsw.has_fuses) {
472                 enum skl_power_gate pg;
473
474                 pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
475                                                  SKL_PW_CTL_IDX_TO_PG(pw_idx);
476                 gen9_wait_for_power_well_fuses(dev_priv, pg);
477         }
478
479         hsw_power_well_post_enable(dev_priv,
480                                    power_well->desc->hsw.irq_pipe_mask,
481                                    power_well->desc->hsw.has_vga);
482 }
483
484 static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
485                                   struct i915_power_well *power_well)
486 {
487         hsw_power_well_enable_prepare(dev_priv, power_well);
488         hsw_power_well_enable_complete(dev_priv, power_well);
489 }
490
491 static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
492                                    struct i915_power_well *power_well)
493 {
494         const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
495         int pw_idx = power_well->desc->hsw.idx;
496         u32 val;
497
498         hsw_power_well_pre_disable(dev_priv,
499                                    power_well->desc->hsw.irq_pipe_mask);
500
501         val = intel_de_read(dev_priv, regs->driver);
502         intel_de_write(dev_priv, regs->driver,
503                        val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
504         hsw_wait_for_power_well_disable(dev_priv, power_well);
505 }
506
507 #define ICL_AUX_PW_TO_PHY(pw_idx)       ((pw_idx) - ICL_PW_CTL_IDX_AUX_A)
508
509 static void
510 icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
511                                     struct i915_power_well *power_well)
512 {
513         const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
514         int pw_idx = power_well->desc->hsw.idx;
515         enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx);
516         u32 val;
517
518         drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
519
520         val = intel_de_read(dev_priv, regs->driver);
521         intel_de_write(dev_priv, regs->driver,
522                        val | HSW_PWR_WELL_CTL_REQ(pw_idx));
523
524         if (INTEL_GEN(dev_priv) < 12) {
525                 val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy));
526                 intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy),
527                                val | ICL_LANE_ENABLE_AUX);
528         }
529
530         hsw_wait_for_power_well_enable(dev_priv, power_well);
531
532         /* Display WA #1178: icl */
533         if (pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B &&
534             !intel_bios_is_port_edp(dev_priv, (enum port)phy)) {
535                 val = intel_de_read(dev_priv, ICL_AUX_ANAOVRD1(pw_idx));
536                 val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS;
537                 intel_de_write(dev_priv, ICL_AUX_ANAOVRD1(pw_idx), val);
538         }
539 }
540
541 static void
542 icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
543                                      struct i915_power_well *power_well)
544 {
545         const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
546         int pw_idx = power_well->desc->hsw.idx;
547         enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx);
548         u32 val;
549
550         drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
551
552         val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy));
553         intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy),
554                        val & ~ICL_LANE_ENABLE_AUX);
555
556         val = intel_de_read(dev_priv, regs->driver);
557         intel_de_write(dev_priv, regs->driver,
558                        val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
559
560         hsw_wait_for_power_well_disable(dev_priv, power_well);
561 }
562
563 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
564
565 static u64 async_put_domains_mask(struct i915_power_domains *power_domains);
566
567 static int power_well_async_ref_count(struct drm_i915_private *dev_priv,
568                                       struct i915_power_well *power_well)
569 {
570         int refs = hweight64(power_well->desc->domains &
571                              async_put_domains_mask(&dev_priv->power_domains));
572
573         drm_WARN_ON(&dev_priv->drm, refs > power_well->count);
574
575         return refs;
576 }
577
578 static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
579                                         struct i915_power_well *power_well,
580                                         struct intel_digital_port *dig_port)
581 {
582         /* Bypass the check if all references are released asynchronously */
583         if (power_well_async_ref_count(dev_priv, power_well) ==
584             power_well->count)
585                 return;
586
587         if (drm_WARN_ON(&dev_priv->drm, !dig_port))
588                 return;
589
590         if (INTEL_GEN(dev_priv) == 11 && dig_port->tc_legacy_port)
591                 return;
592
593         drm_WARN_ON(&dev_priv->drm, !intel_tc_port_ref_held(dig_port));
594 }
595
596 #else
597
598 static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
599                                         struct i915_power_well *power_well,
600                                         struct intel_digital_port *dig_port)
601 {
602 }
603
604 #endif
605
606 #define TGL_AUX_PW_TO_TC_PORT(pw_idx)   ((pw_idx) - TGL_PW_CTL_IDX_AUX_TC1)
607
608 static void icl_tc_cold_exit(struct drm_i915_private *i915)
609 {
610         int ret, tries = 0;
611
612         while (1) {
613                 ret = sandybridge_pcode_write_timeout(i915,
614                                                       ICL_PCODE_EXIT_TCCOLD,
615                                                       0, 250, 1);
616                 if (ret != -EAGAIN || ++tries == 3)
617                         break;
618                 msleep(1);
619         }
620
621         /* Spec states that TC cold exit can take up to 1ms to complete */
622         if (!ret)
623                 msleep(1);
624
625         /* TODO: turn failure into a error as soon i915 CI updates ICL IFWI */
626         drm_dbg_kms(&i915->drm, "TC cold block %s\n", ret ? "failed" :
627                     "succeeded");
628 }
629
630 static void
631 icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
632                                  struct i915_power_well *power_well)
633 {
634         enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
635         struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch);
636         u32 val;
637
638         icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port);
639
640         val = intel_de_read(dev_priv, DP_AUX_CH_CTL(aux_ch));
641         val &= ~DP_AUX_CH_CTL_TBT_IO;
642         if (power_well->desc->hsw.is_tc_tbt)
643                 val |= DP_AUX_CH_CTL_TBT_IO;
644         intel_de_write(dev_priv, DP_AUX_CH_CTL(aux_ch), val);
645
646         hsw_power_well_enable_prepare(dev_priv, power_well);
647
648         if (INTEL_GEN(dev_priv) == 11 && dig_port->tc_legacy_port)
649                 icl_tc_cold_exit(dev_priv);
650
651         hsw_power_well_enable_complete(dev_priv, power_well);
652
653         if (INTEL_GEN(dev_priv) >= 12 && !power_well->desc->hsw.is_tc_tbt) {
654                 enum tc_port tc_port;
655
656                 tc_port = TGL_AUX_PW_TO_TC_PORT(power_well->desc->hsw.idx);
657                 intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
658                                HIP_INDEX_VAL(tc_port, 0x2));
659
660                 if (intel_de_wait_for_set(dev_priv, DKL_CMN_UC_DW_27(tc_port),
661                                           DKL_CMN_UC_DW27_UC_HEALTH, 1))
662                         drm_warn(&dev_priv->drm,
663                                  "Timeout waiting TC uC health\n");
664         }
665 }
666
667 static void
668 icl_tc_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
669                                   struct i915_power_well *power_well)
670 {
671         enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
672         struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch);
673
674         icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port);
675
676         hsw_power_well_disable(dev_priv, power_well);
677 }
678
679 /*
680  * We should only use the power well if we explicitly asked the hardware to
681  * enable it, so check if it's enabled and also check if we've requested it to
682  * be enabled.
683  */
684 static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
685                                    struct i915_power_well *power_well)
686 {
687         const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
688         enum i915_power_well_id id = power_well->desc->id;
689         int pw_idx = power_well->desc->hsw.idx;
690         u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) |
691                    HSW_PWR_WELL_CTL_STATE(pw_idx);
692         u32 val;
693
694         val = intel_de_read(dev_priv, regs->driver);
695
696         /*
697          * On GEN9 big core due to a DMC bug the driver's request bits for PW1
698          * and the MISC_IO PW will be not restored, so check instead for the
699          * BIOS's own request bits, which are forced-on for these power wells
700          * when exiting DC5/6.
701          */
702         if (IS_GEN(dev_priv, 9) && !IS_GEN9_LP(dev_priv) &&
703             (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO))
704                 val |= intel_de_read(dev_priv, regs->bios);
705
706         return (val & mask) == mask;
707 }
708
709 static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
710 {
711         drm_WARN_ONCE(&dev_priv->drm,
712                       (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC9),
713                       "DC9 already programmed to be enabled.\n");
714         drm_WARN_ONCE(&dev_priv->drm,
715                       intel_de_read(dev_priv, DC_STATE_EN) &
716                       DC_STATE_EN_UPTO_DC5,
717                       "DC5 still not disabled to enable DC9.\n");
718         drm_WARN_ONCE(&dev_priv->drm,
719                       intel_de_read(dev_priv, HSW_PWR_WELL_CTL2) &
720                       HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2),
721                       "Power well 2 on.\n");
722         drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv),
723                       "Interrupts not disabled yet.\n");
724
725          /*
726           * TODO: check for the following to verify the conditions to enter DC9
727           * state are satisfied:
728           * 1] Check relevant display engine registers to verify if mode set
729           * disable sequence was followed.
730           * 2] Check if display uninitialize sequence is initialized.
731           */
732 }
733
734 static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
735 {
736         drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv),
737                       "Interrupts not disabled yet.\n");
738         drm_WARN_ONCE(&dev_priv->drm,
739                       intel_de_read(dev_priv, DC_STATE_EN) &
740                       DC_STATE_EN_UPTO_DC5,
741                       "DC5 still not disabled.\n");
742
743          /*
744           * TODO: check for the following to verify DC9 state was indeed
745           * entered before programming to disable it:
746           * 1] Check relevant display engine registers to verify if mode
747           *  set disable sequence was followed.
748           * 2] Check if display uninitialize sequence is initialized.
749           */
750 }
751
752 static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
753                                 u32 state)
754 {
755         int rewrites = 0;
756         int rereads = 0;
757         u32 v;
758
759         intel_de_write(dev_priv, DC_STATE_EN, state);
760
761         /* It has been observed that disabling the dc6 state sometimes
762          * doesn't stick and dmc keeps returning old value. Make sure
763          * the write really sticks enough times and also force rewrite until
764          * we are confident that state is exactly what we want.
765          */
766         do  {
767                 v = intel_de_read(dev_priv, DC_STATE_EN);
768
769                 if (v != state) {
770                         intel_de_write(dev_priv, DC_STATE_EN, state);
771                         rewrites++;
772                         rereads = 0;
773                 } else if (rereads++ > 5) {
774                         break;
775                 }
776
777         } while (rewrites < 100);
778
779         if (v != state)
780                 drm_err(&dev_priv->drm,
781                         "Writing dc state to 0x%x failed, now 0x%x\n",
782                         state, v);
783
784         /* Most of the times we need one retry, avoid spam */
785         if (rewrites > 1)
786                 drm_dbg_kms(&dev_priv->drm,
787                             "Rewrote dc state to 0x%x %d times\n",
788                             state, rewrites);
789 }
790
791 static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
792 {
793         u32 mask;
794
795         mask = DC_STATE_EN_UPTO_DC5;
796
797         if (INTEL_GEN(dev_priv) >= 12)
798                 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6
799                                           | DC_STATE_EN_DC9;
800         else if (IS_GEN(dev_priv, 11))
801                 mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9;
802         else if (IS_GEN9_LP(dev_priv))
803                 mask |= DC_STATE_EN_DC9;
804         else
805                 mask |= DC_STATE_EN_UPTO_DC6;
806
807         return mask;
808 }
809
810 static void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
811 {
812         u32 val;
813
814         val = intel_de_read(dev_priv, DC_STATE_EN) & gen9_dc_mask(dev_priv);
815
816         drm_dbg_kms(&dev_priv->drm,
817                     "Resetting DC state tracking from %02x to %02x\n",
818                     dev_priv->csr.dc_state, val);
819         dev_priv->csr.dc_state = val;
820 }
821
822 /**
823  * gen9_set_dc_state - set target display C power state
824  * @dev_priv: i915 device instance
825  * @state: target DC power state
826  * - DC_STATE_DISABLE
827  * - DC_STATE_EN_UPTO_DC5
828  * - DC_STATE_EN_UPTO_DC6
829  * - DC_STATE_EN_DC9
830  *
831  * Signal to DMC firmware/HW the target DC power state passed in @state.
832  * DMC/HW can turn off individual display clocks and power rails when entering
833  * a deeper DC power state (higher in number) and turns these back when exiting
834  * that state to a shallower power state (lower in number). The HW will decide
835  * when to actually enter a given state on an on-demand basis, for instance
836  * depending on the active state of display pipes. The state of display
837  * registers backed by affected power rails are saved/restored as needed.
838  *
839  * Based on the above enabling a deeper DC power state is asynchronous wrt.
840  * enabling it. Disabling a deeper power state is synchronous: for instance
841  * setting %DC_STATE_DISABLE won't complete until all HW resources are turned
842  * back on and register state is restored. This is guaranteed by the MMIO write
843  * to DC_STATE_EN blocking until the state is restored.
844  */
845 static void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
846 {
847         u32 val;
848         u32 mask;
849
850         if (drm_WARN_ON_ONCE(&dev_priv->drm,
851                              state & ~dev_priv->csr.allowed_dc_mask))
852                 state &= dev_priv->csr.allowed_dc_mask;
853
854         val = intel_de_read(dev_priv, DC_STATE_EN);
855         mask = gen9_dc_mask(dev_priv);
856         drm_dbg_kms(&dev_priv->drm, "Setting DC state from %02x to %02x\n",
857                     val & mask, state);
858
859         /* Check if DMC is ignoring our DC state requests */
860         if ((val & mask) != dev_priv->csr.dc_state)
861                 drm_err(&dev_priv->drm, "DC state mismatch (0x%x -> 0x%x)\n",
862                         dev_priv->csr.dc_state, val & mask);
863
864         val &= ~mask;
865         val |= state;
866
867         gen9_write_dc_state(dev_priv, val);
868
869         dev_priv->csr.dc_state = val & mask;
870 }
871
872 static u32
873 sanitize_target_dc_state(struct drm_i915_private *dev_priv,
874                          u32 target_dc_state)
875 {
876         u32 states[] = {
877                 DC_STATE_EN_UPTO_DC6,
878                 DC_STATE_EN_UPTO_DC5,
879                 DC_STATE_EN_DC3CO,
880                 DC_STATE_DISABLE,
881         };
882         int i;
883
884         for (i = 0; i < ARRAY_SIZE(states) - 1; i++) {
885                 if (target_dc_state != states[i])
886                         continue;
887
888                 if (dev_priv->csr.allowed_dc_mask & target_dc_state)
889                         break;
890
891                 target_dc_state = states[i + 1];
892         }
893
894         return target_dc_state;
895 }
896
897 static void tgl_enable_dc3co(struct drm_i915_private *dev_priv)
898 {
899         drm_dbg_kms(&dev_priv->drm, "Enabling DC3CO\n");
900         gen9_set_dc_state(dev_priv, DC_STATE_EN_DC3CO);
901 }
902
903 static void tgl_disable_dc3co(struct drm_i915_private *dev_priv)
904 {
905         u32 val;
906
907         drm_dbg_kms(&dev_priv->drm, "Disabling DC3CO\n");
908         val = intel_de_read(dev_priv, DC_STATE_EN);
909         val &= ~DC_STATE_DC3CO_STATUS;
910         intel_de_write(dev_priv, DC_STATE_EN, val);
911         gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
912         /*
913          * Delay of 200us DC3CO Exit time B.Spec 49196
914          */
915         usleep_range(200, 210);
916 }
917
918 static void bxt_enable_dc9(struct drm_i915_private *dev_priv)
919 {
920         assert_can_enable_dc9(dev_priv);
921
922         drm_dbg_kms(&dev_priv->drm, "Enabling DC9\n");
923         /*
924          * Power sequencer reset is not needed on
925          * platforms with South Display Engine on PCH,
926          * because PPS registers are always on.
927          */
928         if (!HAS_PCH_SPLIT(dev_priv))
929                 intel_power_sequencer_reset(dev_priv);
930         gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
931 }
932
933 static void bxt_disable_dc9(struct drm_i915_private *dev_priv)
934 {
935         assert_can_disable_dc9(dev_priv);
936
937         drm_dbg_kms(&dev_priv->drm, "Disabling DC9\n");
938
939         gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
940
941         intel_pps_unlock_regs_wa(dev_priv);
942 }
943
944 static void assert_csr_loaded(struct drm_i915_private *dev_priv)
945 {
946         drm_WARN_ONCE(&dev_priv->drm,
947                       !intel_de_read(dev_priv, CSR_PROGRAM(0)),
948                       "CSR program storage start is NULL\n");
949         drm_WARN_ONCE(&dev_priv->drm, !intel_de_read(dev_priv, CSR_SSP_BASE),
950                       "CSR SSP Base Not fine\n");
951         drm_WARN_ONCE(&dev_priv->drm, !intel_de_read(dev_priv, CSR_HTP_SKL),
952                       "CSR HTP Not fine\n");
953 }
954
955 static struct i915_power_well *
956 lookup_power_well(struct drm_i915_private *dev_priv,
957                   enum i915_power_well_id power_well_id)
958 {
959         struct i915_power_well *power_well;
960
961         for_each_power_well(dev_priv, power_well)
962                 if (power_well->desc->id == power_well_id)
963                         return power_well;
964
965         /*
966          * It's not feasible to add error checking code to the callers since
967          * this condition really shouldn't happen and it doesn't even make sense
968          * to abort things like display initialization sequences. Just return
969          * the first power well and hope the WARN gets reported so we can fix
970          * our driver.
971          */
972         drm_WARN(&dev_priv->drm, 1,
973                  "Power well %d not defined for this platform\n",
974                  power_well_id);
975         return &dev_priv->power_domains.power_wells[0];
976 }
977
978 /**
979  * intel_display_power_set_target_dc_state - Set target dc state.
980  * @dev_priv: i915 device
981  * @state: state which needs to be set as target_dc_state.
982  *
983  * This function set the "DC off" power well target_dc_state,
984  * based upon this target_dc_stste, "DC off" power well will
985  * enable desired DC state.
986  */
987 void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv,
988                                              u32 state)
989 {
990         struct i915_power_well *power_well;
991         bool dc_off_enabled;
992         struct i915_power_domains *power_domains = &dev_priv->power_domains;
993
994         mutex_lock(&power_domains->lock);
995         power_well = lookup_power_well(dev_priv, SKL_DISP_DC_OFF);
996
997         if (drm_WARN_ON(&dev_priv->drm, !power_well))
998                 goto unlock;
999
1000         state = sanitize_target_dc_state(dev_priv, state);
1001
1002         if (state == dev_priv->csr.target_dc_state)
1003                 goto unlock;
1004
1005         dc_off_enabled = power_well->desc->ops->is_enabled(dev_priv,
1006                                                            power_well);
1007         /*
1008          * If DC off power well is disabled, need to enable and disable the
1009          * DC off power well to effect target DC state.
1010          */
1011         if (!dc_off_enabled)
1012                 power_well->desc->ops->enable(dev_priv, power_well);
1013
1014         dev_priv->csr.target_dc_state = state;
1015
1016         if (!dc_off_enabled)
1017                 power_well->desc->ops->disable(dev_priv, power_well);
1018
1019 unlock:
1020         mutex_unlock(&power_domains->lock);
1021 }
1022
1023 static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
1024 {
1025         enum i915_power_well_id high_pg;
1026
1027         /* Power wells at this level and above must be disabled for DC5 entry */
1028         if (INTEL_GEN(dev_priv) >= 12)
1029                 high_pg = ICL_DISP_PW_3;
1030         else
1031                 high_pg = SKL_DISP_PW_2;
1032
1033         drm_WARN_ONCE(&dev_priv->drm,
1034                       intel_display_power_well_is_enabled(dev_priv, high_pg),
1035                       "Power wells above platform's DC5 limit still enabled.\n");
1036
1037         drm_WARN_ONCE(&dev_priv->drm,
1038                       (intel_de_read(dev_priv, DC_STATE_EN) &
1039                        DC_STATE_EN_UPTO_DC5),
1040                       "DC5 already programmed to be enabled.\n");
1041         assert_rpm_wakelock_held(&dev_priv->runtime_pm);
1042
1043         assert_csr_loaded(dev_priv);
1044 }
1045
1046 static void gen9_enable_dc5(struct drm_i915_private *dev_priv)
1047 {
1048         assert_can_enable_dc5(dev_priv);
1049
1050         drm_dbg_kms(&dev_priv->drm, "Enabling DC5\n");
1051
1052         /* Wa Display #1183: skl,kbl,cfl */
1053         if (IS_GEN9_BC(dev_priv))
1054                 intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1,
1055                                intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT);
1056
1057         gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
1058 }
1059
1060 static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
1061 {
1062         drm_WARN_ONCE(&dev_priv->drm,
1063                       intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
1064                       "Backlight is not disabled.\n");
1065         drm_WARN_ONCE(&dev_priv->drm,
1066                       (intel_de_read(dev_priv, DC_STATE_EN) &
1067                        DC_STATE_EN_UPTO_DC6),
1068                       "DC6 already programmed to be enabled.\n");
1069
1070         assert_csr_loaded(dev_priv);
1071 }
1072
1073 static void skl_enable_dc6(struct drm_i915_private *dev_priv)
1074 {
1075         assert_can_enable_dc6(dev_priv);
1076
1077         drm_dbg_kms(&dev_priv->drm, "Enabling DC6\n");
1078
1079         /* Wa Display #1183: skl,kbl,cfl */
1080         if (IS_GEN9_BC(dev_priv))
1081                 intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1,
1082                                intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT);
1083
1084         gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
1085 }
1086
1087 static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
1088                                    struct i915_power_well *power_well)
1089 {
1090         const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
1091         int pw_idx = power_well->desc->hsw.idx;
1092         u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
1093         u32 bios_req = intel_de_read(dev_priv, regs->bios);
1094
1095         /* Take over the request bit if set by BIOS. */
1096         if (bios_req & mask) {
1097                 u32 drv_req = intel_de_read(dev_priv, regs->driver);
1098
1099                 if (!(drv_req & mask))
1100                         intel_de_write(dev_priv, regs->driver, drv_req | mask);
1101                 intel_de_write(dev_priv, regs->bios, bios_req & ~mask);
1102         }
1103 }
1104
1105 static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1106                                            struct i915_power_well *power_well)
1107 {
1108         bxt_ddi_phy_init(dev_priv, power_well->desc->bxt.phy);
1109 }
1110
1111 static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1112                                             struct i915_power_well *power_well)
1113 {
1114         bxt_ddi_phy_uninit(dev_priv, power_well->desc->bxt.phy);
1115 }
1116
1117 static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
1118                                             struct i915_power_well *power_well)
1119 {
1120         return bxt_ddi_phy_is_enabled(dev_priv, power_well->desc->bxt.phy);
1121 }
1122
1123 static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
1124 {
1125         struct i915_power_well *power_well;
1126
1127         power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A);
1128         if (power_well->count > 0)
1129                 bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
1130
1131         power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
1132         if (power_well->count > 0)
1133                 bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
1134
1135         if (IS_GEMINILAKE(dev_priv)) {
1136                 power_well = lookup_power_well(dev_priv,
1137                                                GLK_DISP_PW_DPIO_CMN_C);
1138                 if (power_well->count > 0)
1139                         bxt_ddi_phy_verify_state(dev_priv,
1140                                                  power_well->desc->bxt.phy);
1141         }
1142 }
1143
1144 static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
1145                                            struct i915_power_well *power_well)
1146 {
1147         return ((intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC3CO) == 0 &&
1148                 (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0);
1149 }
1150
1151 static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
1152 {
1153         u8 hw_enabled_dbuf_slices = intel_enabled_dbuf_slices_mask(dev_priv);
1154         u8 enabled_dbuf_slices = dev_priv->enabled_dbuf_slices_mask;
1155
1156         drm_WARN(&dev_priv->drm,
1157                  hw_enabled_dbuf_slices != enabled_dbuf_slices,
1158                  "Unexpected DBuf power power state (0x%08x, expected 0x%08x)\n",
1159                  hw_enabled_dbuf_slices,
1160                  enabled_dbuf_slices);
1161 }
1162
1163 static void gen9_disable_dc_states(struct drm_i915_private *dev_priv)
1164 {
1165         struct intel_cdclk_config cdclk_config = {};
1166
1167         if (dev_priv->csr.target_dc_state == DC_STATE_EN_DC3CO) {
1168                 tgl_disable_dc3co(dev_priv);
1169                 return;
1170         }
1171
1172         gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
1173
1174         dev_priv->display.get_cdclk(dev_priv, &cdclk_config);
1175         /* Can't read out voltage_level so can't use intel_cdclk_changed() */
1176         drm_WARN_ON(&dev_priv->drm,
1177                     intel_cdclk_needs_modeset(&dev_priv->cdclk.hw,
1178                                               &cdclk_config));
1179
1180         gen9_assert_dbuf_enabled(dev_priv);
1181
1182         if (IS_GEN9_LP(dev_priv))
1183                 bxt_verify_ddi_phy_power_wells(dev_priv);
1184
1185         if (INTEL_GEN(dev_priv) >= 11)
1186                 /*
1187                  * DMC retains HW context only for port A, the other combo
1188                  * PHY's HW context for port B is lost after DC transitions,
1189                  * so we need to restore it manually.
1190                  */
1191                 intel_combo_phy_init(dev_priv);
1192 }
1193
1194 static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
1195                                           struct i915_power_well *power_well)
1196 {
1197         gen9_disable_dc_states(dev_priv);
1198 }
1199
1200 static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
1201                                            struct i915_power_well *power_well)
1202 {
1203         if (!dev_priv->csr.dmc_payload)
1204                 return;
1205
1206         switch (dev_priv->csr.target_dc_state) {
1207         case DC_STATE_EN_DC3CO:
1208                 tgl_enable_dc3co(dev_priv);
1209                 break;
1210         case DC_STATE_EN_UPTO_DC6:
1211                 skl_enable_dc6(dev_priv);
1212                 break;
1213         case DC_STATE_EN_UPTO_DC5:
1214                 gen9_enable_dc5(dev_priv);
1215                 break;
1216         }
1217 }
1218
1219 static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv,
1220                                          struct i915_power_well *power_well)
1221 {
1222 }
1223
1224 static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
1225                                            struct i915_power_well *power_well)
1226 {
1227 }
1228
1229 static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
1230                                              struct i915_power_well *power_well)
1231 {
1232         return true;
1233 }
1234
1235 static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv,
1236                                          struct i915_power_well *power_well)
1237 {
1238         if ((intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0)
1239                 i830_enable_pipe(dev_priv, PIPE_A);
1240         if ((intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0)
1241                 i830_enable_pipe(dev_priv, PIPE_B);
1242 }
1243
1244 static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv,
1245                                           struct i915_power_well *power_well)
1246 {
1247         i830_disable_pipe(dev_priv, PIPE_B);
1248         i830_disable_pipe(dev_priv, PIPE_A);
1249 }
1250
1251 static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv,
1252                                           struct i915_power_well *power_well)
1253 {
1254         return intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE &&
1255                 intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
1256 }
1257
1258 static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv,
1259                                           struct i915_power_well *power_well)
1260 {
1261         if (power_well->count > 0)
1262                 i830_pipes_power_well_enable(dev_priv, power_well);
1263         else
1264                 i830_pipes_power_well_disable(dev_priv, power_well);
1265 }
1266
1267 static void vlv_set_power_well(struct drm_i915_private *dev_priv,
1268                                struct i915_power_well *power_well, bool enable)
1269 {
1270         int pw_idx = power_well->desc->vlv.idx;
1271         u32 mask;
1272         u32 state;
1273         u32 ctrl;
1274
1275         mask = PUNIT_PWRGT_MASK(pw_idx);
1276         state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) :
1277                          PUNIT_PWRGT_PWR_GATE(pw_idx);
1278
1279         vlv_punit_get(dev_priv);
1280
1281 #define COND \
1282         ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
1283
1284         if (COND)
1285                 goto out;
1286
1287         ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
1288         ctrl &= ~mask;
1289         ctrl |= state;
1290         vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
1291
1292         if (wait_for(COND, 100))
1293                 drm_err(&dev_priv->drm,
1294                         "timeout setting power well state %08x (%08x)\n",
1295                         state,
1296                         vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
1297
1298 #undef COND
1299
1300 out:
1301         vlv_punit_put(dev_priv);
1302 }
1303
1304 static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
1305                                   struct i915_power_well *power_well)
1306 {
1307         vlv_set_power_well(dev_priv, power_well, true);
1308 }
1309
1310 static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
1311                                    struct i915_power_well *power_well)
1312 {
1313         vlv_set_power_well(dev_priv, power_well, false);
1314 }
1315
1316 static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
1317                                    struct i915_power_well *power_well)
1318 {
1319         int pw_idx = power_well->desc->vlv.idx;
1320         bool enabled = false;
1321         u32 mask;
1322         u32 state;
1323         u32 ctrl;
1324
1325         mask = PUNIT_PWRGT_MASK(pw_idx);
1326         ctrl = PUNIT_PWRGT_PWR_ON(pw_idx);
1327
1328         vlv_punit_get(dev_priv);
1329
1330         state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
1331         /*
1332          * We only ever set the power-on and power-gate states, anything
1333          * else is unexpected.
1334          */
1335         drm_WARN_ON(&dev_priv->drm, state != PUNIT_PWRGT_PWR_ON(pw_idx) &&
1336                     state != PUNIT_PWRGT_PWR_GATE(pw_idx));
1337         if (state == ctrl)
1338                 enabled = true;
1339
1340         /*
1341          * A transient state at this point would mean some unexpected party
1342          * is poking at the power controls too.
1343          */
1344         ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
1345         drm_WARN_ON(&dev_priv->drm, ctrl != state);
1346
1347         vlv_punit_put(dev_priv);
1348
1349         return enabled;
1350 }
1351
1352 static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
1353 {
1354         u32 val;
1355
1356         /*
1357          * On driver load, a pipe may be active and driving a DSI display.
1358          * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
1359          * (and never recovering) in this case. intel_dsi_post_disable() will
1360          * clear it when we turn off the display.
1361          */
1362         val = intel_de_read(dev_priv, DSPCLK_GATE_D);
1363         val &= DPOUNIT_CLOCK_GATE_DISABLE;
1364         val |= VRHUNIT_CLOCK_GATE_DISABLE;
1365         intel_de_write(dev_priv, DSPCLK_GATE_D, val);
1366
1367         /*
1368          * Disable trickle feed and enable pnd deadline calculation
1369          */
1370         intel_de_write(dev_priv, MI_ARB_VLV,
1371                        MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
1372         intel_de_write(dev_priv, CBR1_VLV, 0);
1373
1374         drm_WARN_ON(&dev_priv->drm, RUNTIME_INFO(dev_priv)->rawclk_freq == 0);
1375         intel_de_write(dev_priv, RAWCLK_FREQ_VLV,
1376                        DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq,
1377                                          1000));
1378 }
1379
1380 static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
1381 {
1382         struct intel_encoder *encoder;
1383         enum pipe pipe;
1384
1385         /*
1386          * Enable the CRI clock source so we can get at the
1387          * display and the reference clock for VGA
1388          * hotplug / manual detection. Supposedly DSI also
1389          * needs the ref clock up and running.
1390          *
1391          * CHV DPLL B/C have some issues if VGA mode is enabled.
1392          */
1393         for_each_pipe(dev_priv, pipe) {
1394                 u32 val = intel_de_read(dev_priv, DPLL(pipe));
1395
1396                 val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1397                 if (pipe != PIPE_A)
1398                         val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1399
1400                 intel_de_write(dev_priv, DPLL(pipe), val);
1401         }
1402
1403         vlv_init_display_clock_gating(dev_priv);
1404
1405         spin_lock_irq(&dev_priv->irq_lock);
1406         valleyview_enable_display_irqs(dev_priv);
1407         spin_unlock_irq(&dev_priv->irq_lock);
1408
1409         /*
1410          * During driver initialization/resume we can avoid restoring the
1411          * part of the HW/SW state that will be inited anyway explicitly.
1412          */
1413         if (dev_priv->power_domains.initializing)
1414                 return;
1415
1416         intel_hpd_init(dev_priv);
1417
1418         /* Re-enable the ADPA, if we have one */
1419         for_each_intel_encoder(&dev_priv->drm, encoder) {
1420                 if (encoder->type == INTEL_OUTPUT_ANALOG)
1421                         intel_crt_reset(&encoder->base);
1422         }
1423
1424         intel_vga_redisable_power_on(dev_priv);
1425
1426         intel_pps_unlock_regs_wa(dev_priv);
1427 }
1428
1429 static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
1430 {
1431         spin_lock_irq(&dev_priv->irq_lock);
1432         valleyview_disable_display_irqs(dev_priv);
1433         spin_unlock_irq(&dev_priv->irq_lock);
1434
1435         /* make sure we're done processing display irqs */
1436         intel_synchronize_irq(dev_priv);
1437
1438         intel_power_sequencer_reset(dev_priv);
1439
1440         /* Prevent us from re-enabling polling on accident in late suspend */
1441         if (!dev_priv->drm.dev->power.is_suspended)
1442                 intel_hpd_poll_init(dev_priv);
1443 }
1444
1445 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
1446                                           struct i915_power_well *power_well)
1447 {
1448         vlv_set_power_well(dev_priv, power_well, true);
1449
1450         vlv_display_power_well_init(dev_priv);
1451 }
1452
1453 static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
1454                                            struct i915_power_well *power_well)
1455 {
1456         vlv_display_power_well_deinit(dev_priv);
1457
1458         vlv_set_power_well(dev_priv, power_well, false);
1459 }
1460
1461 static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1462                                            struct i915_power_well *power_well)
1463 {
1464         /* since ref/cri clock was enabled */
1465         udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1466
1467         vlv_set_power_well(dev_priv, power_well, true);
1468
1469         /*
1470          * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
1471          *  6.  De-assert cmn_reset/side_reset. Same as VLV X0.
1472          *   a. GUnit 0x2110 bit[0] set to 1 (def 0)
1473          *   b. The other bits such as sfr settings / modesel may all
1474          *      be set to 0.
1475          *
1476          * This should only be done on init and resume from S3 with
1477          * both PLLs disabled, or we risk losing DPIO and PLL
1478          * synchronization.
1479          */
1480         intel_de_write(dev_priv, DPIO_CTL,
1481                        intel_de_read(dev_priv, DPIO_CTL) | DPIO_CMNRST);
1482 }
1483
1484 static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1485                                             struct i915_power_well *power_well)
1486 {
1487         enum pipe pipe;
1488
1489         for_each_pipe(dev_priv, pipe)
1490                 assert_pll_disabled(dev_priv, pipe);
1491
1492         /* Assert common reset */
1493         intel_de_write(dev_priv, DPIO_CTL,
1494                        intel_de_read(dev_priv, DPIO_CTL) & ~DPIO_CMNRST);
1495
1496         vlv_set_power_well(dev_priv, power_well, false);
1497 }
1498
1499 #define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0))
1500
1501 #define BITS_SET(val, bits) (((val) & (bits)) == (bits))
1502
1503 static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
1504 {
1505         struct i915_power_well *cmn_bc =
1506                 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
1507         struct i915_power_well *cmn_d =
1508                 lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
1509         u32 phy_control = dev_priv->chv_phy_control;
1510         u32 phy_status = 0;
1511         u32 phy_status_mask = 0xffffffff;
1512
1513         /*
1514          * The BIOS can leave the PHY is some weird state
1515          * where it doesn't fully power down some parts.
1516          * Disable the asserts until the PHY has been fully
1517          * reset (ie. the power well has been disabled at
1518          * least once).
1519          */
1520         if (!dev_priv->chv_phy_assert[DPIO_PHY0])
1521                 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
1522                                      PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
1523                                      PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
1524                                      PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
1525                                      PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
1526                                      PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
1527
1528         if (!dev_priv->chv_phy_assert[DPIO_PHY1])
1529                 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
1530                                      PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
1531                                      PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
1532
1533         if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
1534                 phy_status |= PHY_POWERGOOD(DPIO_PHY0);
1535
1536                 /* this assumes override is only used to enable lanes */
1537                 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
1538                         phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
1539
1540                 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
1541                         phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
1542
1543                 /* CL1 is on whenever anything is on in either channel */
1544                 if (BITS_SET(phy_control,
1545                              PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
1546                              PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
1547                         phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
1548
1549                 /*
1550                  * The DPLLB check accounts for the pipe B + port A usage
1551                  * with CL2 powered up but all the lanes in the second channel
1552                  * powered down.
1553                  */
1554                 if (BITS_SET(phy_control,
1555                              PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
1556                     (intel_de_read(dev_priv, DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
1557                         phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
1558
1559                 if (BITS_SET(phy_control,
1560                              PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
1561                         phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
1562                 if (BITS_SET(phy_control,
1563                              PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
1564                         phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
1565
1566                 if (BITS_SET(phy_control,
1567                              PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
1568                         phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
1569                 if (BITS_SET(phy_control,
1570                              PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
1571                         phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
1572         }
1573
1574         if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
1575                 phy_status |= PHY_POWERGOOD(DPIO_PHY1);
1576
1577                 /* this assumes override is only used to enable lanes */
1578                 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
1579                         phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
1580
1581                 if (BITS_SET(phy_control,
1582                              PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
1583                         phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
1584
1585                 if (BITS_SET(phy_control,
1586                              PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
1587                         phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
1588                 if (BITS_SET(phy_control,
1589                              PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
1590                         phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
1591         }
1592
1593         phy_status &= phy_status_mask;
1594
1595         /*
1596          * The PHY may be busy with some initial calibration and whatnot,
1597          * so the power state can take a while to actually change.
1598          */
1599         if (intel_de_wait_for_register(dev_priv, DISPLAY_PHY_STATUS,
1600                                        phy_status_mask, phy_status, 10))
1601                 drm_err(&dev_priv->drm,
1602                         "Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
1603                         intel_de_read(dev_priv, DISPLAY_PHY_STATUS) & phy_status_mask,
1604                         phy_status, dev_priv->chv_phy_control);
1605 }
1606
1607 #undef BITS_SET
1608
1609 static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1610                                            struct i915_power_well *power_well)
1611 {
1612         enum dpio_phy phy;
1613         enum pipe pipe;
1614         u32 tmp;
1615
1616         drm_WARN_ON_ONCE(&dev_priv->drm,
1617                          power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
1618                          power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
1619
1620         if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1621                 pipe = PIPE_A;
1622                 phy = DPIO_PHY0;
1623         } else {
1624                 pipe = PIPE_C;
1625                 phy = DPIO_PHY1;
1626         }
1627
1628         /* since ref/cri clock was enabled */
1629         udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1630         vlv_set_power_well(dev_priv, power_well, true);
1631
1632         /* Poll for phypwrgood signal */
1633         if (intel_de_wait_for_set(dev_priv, DISPLAY_PHY_STATUS,
1634                                   PHY_POWERGOOD(phy), 1))
1635                 drm_err(&dev_priv->drm, "Display PHY %d is not power up\n",
1636                         phy);
1637
1638         vlv_dpio_get(dev_priv);
1639
1640         /* Enable dynamic power down */
1641         tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
1642         tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
1643                 DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
1644         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
1645
1646         if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1647                 tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
1648                 tmp |= DPIO_DYNPWRDOWNEN_CH1;
1649                 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
1650         } else {
1651                 /*
1652                  * Force the non-existing CL2 off. BXT does this
1653                  * too, so maybe it saves some power even though
1654                  * CL2 doesn't exist?
1655                  */
1656                 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
1657                 tmp |= DPIO_CL2_LDOFUSE_PWRENB;
1658                 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
1659         }
1660
1661         vlv_dpio_put(dev_priv);
1662
1663         dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
1664         intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1665                        dev_priv->chv_phy_control);
1666
1667         drm_dbg_kms(&dev_priv->drm,
1668                     "Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1669                     phy, dev_priv->chv_phy_control);
1670
1671         assert_chv_phy_status(dev_priv);
1672 }
1673
1674 static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1675                                             struct i915_power_well *power_well)
1676 {
1677         enum dpio_phy phy;
1678
1679         drm_WARN_ON_ONCE(&dev_priv->drm,
1680                          power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
1681                          power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
1682
1683         if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1684                 phy = DPIO_PHY0;
1685                 assert_pll_disabled(dev_priv, PIPE_A);
1686                 assert_pll_disabled(dev_priv, PIPE_B);
1687         } else {
1688                 phy = DPIO_PHY1;
1689                 assert_pll_disabled(dev_priv, PIPE_C);
1690         }
1691
1692         dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
1693         intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1694                        dev_priv->chv_phy_control);
1695
1696         vlv_set_power_well(dev_priv, power_well, false);
1697
1698         drm_dbg_kms(&dev_priv->drm,
1699                     "Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1700                     phy, dev_priv->chv_phy_control);
1701
1702         /* PHY is fully reset now, so we can enable the PHY state asserts */
1703         dev_priv->chv_phy_assert[phy] = true;
1704
1705         assert_chv_phy_status(dev_priv);
1706 }
1707
1708 static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1709                                      enum dpio_channel ch, bool override, unsigned int mask)
1710 {
1711         enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
1712         u32 reg, val, expected, actual;
1713
1714         /*
1715          * The BIOS can leave the PHY is some weird state
1716          * where it doesn't fully power down some parts.
1717          * Disable the asserts until the PHY has been fully
1718          * reset (ie. the power well has been disabled at
1719          * least once).
1720          */
1721         if (!dev_priv->chv_phy_assert[phy])
1722                 return;
1723
1724         if (ch == DPIO_CH0)
1725                 reg = _CHV_CMN_DW0_CH0;
1726         else
1727                 reg = _CHV_CMN_DW6_CH1;
1728
1729         vlv_dpio_get(dev_priv);
1730         val = vlv_dpio_read(dev_priv, pipe, reg);
1731         vlv_dpio_put(dev_priv);
1732
1733         /*
1734          * This assumes !override is only used when the port is disabled.
1735          * All lanes should power down even without the override when
1736          * the port is disabled.
1737          */
1738         if (!override || mask == 0xf) {
1739                 expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1740                 /*
1741                  * If CH1 common lane is not active anymore
1742                  * (eg. for pipe B DPLL) the entire channel will
1743                  * shut down, which causes the common lane registers
1744                  * to read as 0. That means we can't actually check
1745                  * the lane power down status bits, but as the entire
1746                  * register reads as 0 it's a good indication that the
1747                  * channel is indeed entirely powered down.
1748                  */
1749                 if (ch == DPIO_CH1 && val == 0)
1750                         expected = 0;
1751         } else if (mask != 0x0) {
1752                 expected = DPIO_ANYDL_POWERDOWN;
1753         } else {
1754                 expected = 0;
1755         }
1756
1757         if (ch == DPIO_CH0)
1758                 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0;
1759         else
1760                 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
1761         actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1762
1763         drm_WARN(&dev_priv->drm, actual != expected,
1764                  "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
1765                  !!(actual & DPIO_ALLDL_POWERDOWN),
1766                  !!(actual & DPIO_ANYDL_POWERDOWN),
1767                  !!(expected & DPIO_ALLDL_POWERDOWN),
1768                  !!(expected & DPIO_ANYDL_POWERDOWN),
1769                  reg, val);
1770 }
1771
1772 bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1773                           enum dpio_channel ch, bool override)
1774 {
1775         struct i915_power_domains *power_domains = &dev_priv->power_domains;
1776         bool was_override;
1777
1778         mutex_lock(&power_domains->lock);
1779
1780         was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1781
1782         if (override == was_override)
1783                 goto out;
1784
1785         if (override)
1786                 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1787         else
1788                 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1789
1790         intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1791                        dev_priv->chv_phy_control);
1792
1793         drm_dbg_kms(&dev_priv->drm,
1794                     "Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
1795                     phy, ch, dev_priv->chv_phy_control);
1796
1797         assert_chv_phy_status(dev_priv);
1798
1799 out:
1800         mutex_unlock(&power_domains->lock);
1801
1802         return was_override;
1803 }
1804
1805 void chv_phy_powergate_lanes(struct intel_encoder *encoder,
1806                              bool override, unsigned int mask)
1807 {
1808         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1809         struct i915_power_domains *power_domains = &dev_priv->power_domains;
1810         enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(encoder));
1811         enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(encoder));
1812
1813         mutex_lock(&power_domains->lock);
1814
1815         dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
1816         dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
1817
1818         if (override)
1819                 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1820         else
1821                 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1822
1823         intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1824                        dev_priv->chv_phy_control);
1825
1826         drm_dbg_kms(&dev_priv->drm,
1827                     "Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
1828                     phy, ch, mask, dev_priv->chv_phy_control);
1829
1830         assert_chv_phy_status(dev_priv);
1831
1832         assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
1833
1834         mutex_unlock(&power_domains->lock);
1835 }
1836
1837 static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
1838                                         struct i915_power_well *power_well)
1839 {
1840         enum pipe pipe = PIPE_A;
1841         bool enabled;
1842         u32 state, ctrl;
1843
1844         vlv_punit_get(dev_priv);
1845
1846         state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe);
1847         /*
1848          * We only ever set the power-on and power-gate states, anything
1849          * else is unexpected.
1850          */
1851         drm_WARN_ON(&dev_priv->drm, state != DP_SSS_PWR_ON(pipe) &&
1852                     state != DP_SSS_PWR_GATE(pipe));
1853         enabled = state == DP_SSS_PWR_ON(pipe);
1854
1855         /*
1856          * A transient state at this point would mean some unexpected party
1857          * is poking at the power controls too.
1858          */
1859         ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe);
1860         drm_WARN_ON(&dev_priv->drm, ctrl << 16 != state);
1861
1862         vlv_punit_put(dev_priv);
1863
1864         return enabled;
1865 }
1866
1867 static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
1868                                     struct i915_power_well *power_well,
1869                                     bool enable)
1870 {
1871         enum pipe pipe = PIPE_A;
1872         u32 state;
1873         u32 ctrl;
1874
1875         state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
1876
1877         vlv_punit_get(dev_priv);
1878
1879 #define COND \
1880         ((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state)
1881
1882         if (COND)
1883                 goto out;
1884
1885         ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
1886         ctrl &= ~DP_SSC_MASK(pipe);
1887         ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
1888         vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl);
1889
1890         if (wait_for(COND, 100))
1891                 drm_err(&dev_priv->drm,
1892                         "timeout setting power well state %08x (%08x)\n",
1893                         state,
1894                         vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM));
1895
1896 #undef COND
1897
1898 out:
1899         vlv_punit_put(dev_priv);
1900 }
1901
1902 static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
1903                                         struct i915_power_well *power_well)
1904 {
1905         intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1906                        dev_priv->chv_phy_control);
1907 }
1908
1909 static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
1910                                        struct i915_power_well *power_well)
1911 {
1912         chv_set_pipe_power_well(dev_priv, power_well, true);
1913
1914         vlv_display_power_well_init(dev_priv);
1915 }
1916
1917 static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
1918                                         struct i915_power_well *power_well)
1919 {
1920         vlv_display_power_well_deinit(dev_priv);
1921
1922         chv_set_pipe_power_well(dev_priv, power_well, false);
1923 }
1924
1925 static u64 __async_put_domains_mask(struct i915_power_domains *power_domains)
1926 {
1927         return power_domains->async_put_domains[0] |
1928                power_domains->async_put_domains[1];
1929 }
1930
1931 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
1932
1933 static bool
1934 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
1935 {
1936         return !WARN_ON(power_domains->async_put_domains[0] &
1937                         power_domains->async_put_domains[1]);
1938 }
1939
1940 static bool
1941 __async_put_domains_state_ok(struct i915_power_domains *power_domains)
1942 {
1943         enum intel_display_power_domain domain;
1944         bool err = false;
1945
1946         err |= !assert_async_put_domain_masks_disjoint(power_domains);
1947         err |= WARN_ON(!!power_domains->async_put_wakeref !=
1948                        !!__async_put_domains_mask(power_domains));
1949
1950         for_each_power_domain(domain, __async_put_domains_mask(power_domains))
1951                 err |= WARN_ON(power_domains->domain_use_count[domain] != 1);
1952
1953         return !err;
1954 }
1955
1956 static void print_power_domains(struct i915_power_domains *power_domains,
1957                                 const char *prefix, u64 mask)
1958 {
1959         struct drm_i915_private *i915 = container_of(power_domains,
1960                                                      struct drm_i915_private,
1961                                                      power_domains);
1962         enum intel_display_power_domain domain;
1963
1964         drm_dbg(&i915->drm, "%s (%lu):\n", prefix, hweight64(mask));
1965         for_each_power_domain(domain, mask)
1966                 drm_dbg(&i915->drm, "%s use_count %d\n",
1967                         intel_display_power_domain_str(domain),
1968                         power_domains->domain_use_count[domain]);
1969 }
1970
1971 static void
1972 print_async_put_domains_state(struct i915_power_domains *power_domains)
1973 {
1974         struct drm_i915_private *i915 = container_of(power_domains,
1975                                                      struct drm_i915_private,
1976                                                      power_domains);
1977
1978         drm_dbg(&i915->drm, "async_put_wakeref %u\n",
1979                 power_domains->async_put_wakeref);
1980
1981         print_power_domains(power_domains, "async_put_domains[0]",
1982                             power_domains->async_put_domains[0]);
1983         print_power_domains(power_domains, "async_put_domains[1]",
1984                             power_domains->async_put_domains[1]);
1985 }
1986
1987 static void
1988 verify_async_put_domains_state(struct i915_power_domains *power_domains)
1989 {
1990         if (!__async_put_domains_state_ok(power_domains))
1991                 print_async_put_domains_state(power_domains);
1992 }
1993
1994 #else
1995
1996 static void
1997 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
1998 {
1999 }
2000
2001 static void
2002 verify_async_put_domains_state(struct i915_power_domains *power_domains)
2003 {
2004 }
2005
2006 #endif /* CONFIG_DRM_I915_DEBUG_RUNTIME_PM */
2007
2008 static u64 async_put_domains_mask(struct i915_power_domains *power_domains)
2009 {
2010         assert_async_put_domain_masks_disjoint(power_domains);
2011
2012         return __async_put_domains_mask(power_domains);
2013 }
2014
2015 static void
2016 async_put_domains_clear_domain(struct i915_power_domains *power_domains,
2017                                enum intel_display_power_domain domain)
2018 {
2019         assert_async_put_domain_masks_disjoint(power_domains);
2020
2021         power_domains->async_put_domains[0] &= ~BIT_ULL(domain);
2022         power_domains->async_put_domains[1] &= ~BIT_ULL(domain);
2023 }
2024
2025 static bool
2026 intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv,
2027                                        enum intel_display_power_domain domain)
2028 {
2029         struct i915_power_domains *power_domains = &dev_priv->power_domains;
2030         bool ret = false;
2031
2032         if (!(async_put_domains_mask(power_domains) & BIT_ULL(domain)))
2033                 goto out_verify;
2034
2035         async_put_domains_clear_domain(power_domains, domain);
2036
2037         ret = true;
2038
2039         if (async_put_domains_mask(power_domains))
2040                 goto out_verify;
2041
2042         cancel_delayed_work(&power_domains->async_put_work);
2043         intel_runtime_pm_put_raw(&dev_priv->runtime_pm,
2044                                  fetch_and_zero(&power_domains->async_put_wakeref));
2045 out_verify:
2046         verify_async_put_domains_state(power_domains);
2047
2048         return ret;
2049 }
2050
2051 static void
2052 __intel_display_power_get_domain(struct drm_i915_private *dev_priv,
2053                                  enum intel_display_power_domain domain)
2054 {
2055         struct i915_power_domains *power_domains = &dev_priv->power_domains;
2056         struct i915_power_well *power_well;
2057
2058         if (intel_display_power_grab_async_put_ref(dev_priv, domain))
2059                 return;
2060
2061         for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain))
2062                 intel_power_well_get(dev_priv, power_well);
2063
2064         power_domains->domain_use_count[domain]++;
2065 }
2066
2067 /**
2068  * intel_display_power_get - grab a power domain reference
2069  * @dev_priv: i915 device instance
2070  * @domain: power domain to reference
2071  *
2072  * This function grabs a power domain reference for @domain and ensures that the
2073  * power domain and all its parents are powered up. Therefore users should only
2074  * grab a reference to the innermost power domain they need.
2075  *
2076  * Any power domain reference obtained by this function must have a symmetric
2077  * call to intel_display_power_put() to release the reference again.
2078  */
2079 intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
2080                                         enum intel_display_power_domain domain)
2081 {
2082         struct i915_power_domains *power_domains = &dev_priv->power_domains;
2083         intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2084
2085         mutex_lock(&power_domains->lock);
2086         __intel_display_power_get_domain(dev_priv, domain);
2087         mutex_unlock(&power_domains->lock);
2088
2089         return wakeref;
2090 }
2091
2092 /**
2093  * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
2094  * @dev_priv: i915 device instance
2095  * @domain: power domain to reference
2096  *
2097  * This function grabs a power domain reference for @domain and ensures that the
2098  * power domain and all its parents are powered up. Therefore users should only
2099  * grab a reference to the innermost power domain they need.
2100  *
2101  * Any power domain reference obtained by this function must have a symmetric
2102  * call to intel_display_power_put() to release the reference again.
2103  */
2104 intel_wakeref_t
2105 intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
2106                                    enum intel_display_power_domain domain)
2107 {
2108         struct i915_power_domains *power_domains = &dev_priv->power_domains;
2109         intel_wakeref_t wakeref;
2110         bool is_enabled;
2111
2112         wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm);
2113         if (!wakeref)
2114                 return false;
2115
2116         mutex_lock(&power_domains->lock);
2117
2118         if (__intel_display_power_is_enabled(dev_priv, domain)) {
2119                 __intel_display_power_get_domain(dev_priv, domain);
2120                 is_enabled = true;
2121         } else {
2122                 is_enabled = false;
2123         }
2124
2125         mutex_unlock(&power_domains->lock);
2126
2127         if (!is_enabled) {
2128                 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2129                 wakeref = 0;
2130         }
2131
2132         return wakeref;
2133 }
2134
2135 static void
2136 __intel_display_power_put_domain(struct drm_i915_private *dev_priv,
2137                                  enum intel_display_power_domain domain)
2138 {
2139         struct i915_power_domains *power_domains;
2140         struct i915_power_well *power_well;
2141         const char *name = intel_display_power_domain_str(domain);
2142
2143         power_domains = &dev_priv->power_domains;
2144
2145         drm_WARN(&dev_priv->drm, !power_domains->domain_use_count[domain],
2146                  "Use count on domain %s is already zero\n",
2147                  name);
2148         drm_WARN(&dev_priv->drm,
2149                  async_put_domains_mask(power_domains) & BIT_ULL(domain),
2150                  "Async disabling of domain %s is pending\n",
2151                  name);
2152
2153         power_domains->domain_use_count[domain]--;
2154
2155         for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain))
2156                 intel_power_well_put(dev_priv, power_well);
2157 }
2158
2159 static void __intel_display_power_put(struct drm_i915_private *dev_priv,
2160                                       enum intel_display_power_domain domain)
2161 {
2162         struct i915_power_domains *power_domains = &dev_priv->power_domains;
2163
2164         mutex_lock(&power_domains->lock);
2165         __intel_display_power_put_domain(dev_priv, domain);
2166         mutex_unlock(&power_domains->lock);
2167 }
2168
2169 /**
2170  * intel_display_power_put_unchecked - release an unchecked power domain reference
2171  * @dev_priv: i915 device instance
2172  * @domain: power domain to reference
2173  *
2174  * This function drops the power domain reference obtained by
2175  * intel_display_power_get() and might power down the corresponding hardware
2176  * block right away if this is the last reference.
2177  *
2178  * This function exists only for historical reasons and should be avoided in
2179  * new code, as the correctness of its use cannot be checked. Always use
2180  * intel_display_power_put() instead.
2181  */
2182 void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
2183                                        enum intel_display_power_domain domain)
2184 {
2185         __intel_display_power_put(dev_priv, domain);
2186         intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
2187 }
2188
2189 static void
2190 queue_async_put_domains_work(struct i915_power_domains *power_domains,
2191                              intel_wakeref_t wakeref)
2192 {
2193         WARN_ON(power_domains->async_put_wakeref);
2194         power_domains->async_put_wakeref = wakeref;
2195         WARN_ON(!queue_delayed_work(system_unbound_wq,
2196                                     &power_domains->async_put_work,
2197                                     msecs_to_jiffies(100)));
2198 }
2199
2200 static void
2201 release_async_put_domains(struct i915_power_domains *power_domains, u64 mask)
2202 {
2203         struct drm_i915_private *dev_priv =
2204                 container_of(power_domains, struct drm_i915_private,
2205                              power_domains);
2206         struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
2207         enum intel_display_power_domain domain;
2208         intel_wakeref_t wakeref;
2209
2210         /*
2211          * The caller must hold already raw wakeref, upgrade that to a proper
2212          * wakeref to make the state checker happy about the HW access during
2213          * power well disabling.
2214          */
2215         assert_rpm_raw_wakeref_held(rpm);
2216         wakeref = intel_runtime_pm_get(rpm);
2217
2218         for_each_power_domain(domain, mask) {
2219                 /* Clear before put, so put's sanity check is happy. */
2220                 async_put_domains_clear_domain(power_domains, domain);
2221                 __intel_display_power_put_domain(dev_priv, domain);
2222         }
2223
2224         intel_runtime_pm_put(rpm, wakeref);
2225 }
2226
2227 static void
2228 intel_display_power_put_async_work(struct work_struct *work)
2229 {
2230         struct drm_i915_private *dev_priv =
2231                 container_of(work, struct drm_i915_private,
2232                              power_domains.async_put_work.work);
2233         struct i915_power_domains *power_domains = &dev_priv->power_domains;
2234         struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
2235         intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm);
2236         intel_wakeref_t old_work_wakeref = 0;
2237
2238         mutex_lock(&power_domains->lock);
2239
2240         /*
2241          * Bail out if all the domain refs pending to be released were grabbed
2242          * by subsequent gets or a flush_work.
2243          */
2244         old_work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
2245         if (!old_work_wakeref)
2246                 goto out_verify;
2247
2248         release_async_put_domains(power_domains,
2249                                   power_domains->async_put_domains[0]);
2250
2251         /* Requeue the work if more domains were async put meanwhile. */
2252         if (power_domains->async_put_domains[1]) {
2253                 power_domains->async_put_domains[0] =
2254                         fetch_and_zero(&power_domains->async_put_domains[1]);
2255                 queue_async_put_domains_work(power_domains,
2256                                              fetch_and_zero(&new_work_wakeref));
2257         }
2258
2259 out_verify:
2260         verify_async_put_domains_state(power_domains);
2261
2262         mutex_unlock(&power_domains->lock);
2263
2264         if (old_work_wakeref)
2265                 intel_runtime_pm_put_raw(rpm, old_work_wakeref);
2266         if (new_work_wakeref)
2267                 intel_runtime_pm_put_raw(rpm, new_work_wakeref);
2268 }
2269
2270 /**
2271  * intel_display_power_put_async - release a power domain reference asynchronously
2272  * @i915: i915 device instance
2273  * @domain: power domain to reference
2274  * @wakeref: wakeref acquired for the reference that is being released
2275  *
2276  * This function drops the power domain reference obtained by
2277  * intel_display_power_get*() and schedules a work to power down the
2278  * corresponding hardware block if this is the last reference.
2279  */
2280 void __intel_display_power_put_async(struct drm_i915_private *i915,
2281                                      enum intel_display_power_domain domain,
2282                                      intel_wakeref_t wakeref)
2283 {
2284         struct i915_power_domains *power_domains = &i915->power_domains;
2285         struct intel_runtime_pm *rpm = &i915->runtime_pm;
2286         intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm);
2287
2288         mutex_lock(&power_domains->lock);
2289
2290         if (power_domains->domain_use_count[domain] > 1) {
2291                 __intel_display_power_put_domain(i915, domain);
2292
2293                 goto out_verify;
2294         }
2295
2296         drm_WARN_ON(&i915->drm, power_domains->domain_use_count[domain] != 1);
2297
2298         /* Let a pending work requeue itself or queue a new one. */
2299         if (power_domains->async_put_wakeref) {
2300                 power_domains->async_put_domains[1] |= BIT_ULL(domain);
2301         } else {
2302                 power_domains->async_put_domains[0] |= BIT_ULL(domain);
2303                 queue_async_put_domains_work(power_domains,
2304                                              fetch_and_zero(&work_wakeref));
2305         }
2306
2307 out_verify:
2308         verify_async_put_domains_state(power_domains);
2309
2310         mutex_unlock(&power_domains->lock);
2311
2312         if (work_wakeref)
2313                 intel_runtime_pm_put_raw(rpm, work_wakeref);
2314
2315         intel_runtime_pm_put(rpm, wakeref);
2316 }
2317
2318 /**
2319  * intel_display_power_flush_work - flushes the async display power disabling work
2320  * @i915: i915 device instance
2321  *
2322  * Flushes any pending work that was scheduled by a preceding
2323  * intel_display_power_put_async() call, completing the disabling of the
2324  * corresponding power domains.
2325  *
2326  * Note that the work handler function may still be running after this
2327  * function returns; to ensure that the work handler isn't running use
2328  * intel_display_power_flush_work_sync() instead.
2329  */
2330 void intel_display_power_flush_work(struct drm_i915_private *i915)
2331 {
2332         struct i915_power_domains *power_domains = &i915->power_domains;
2333         intel_wakeref_t work_wakeref;
2334
2335         mutex_lock(&power_domains->lock);
2336
2337         work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
2338         if (!work_wakeref)
2339                 goto out_verify;
2340
2341         release_async_put_domains(power_domains,
2342                                   async_put_domains_mask(power_domains));
2343         cancel_delayed_work(&power_domains->async_put_work);
2344
2345 out_verify:
2346         verify_async_put_domains_state(power_domains);
2347
2348         mutex_unlock(&power_domains->lock);
2349
2350         if (work_wakeref)
2351                 intel_runtime_pm_put_raw(&i915->runtime_pm, work_wakeref);
2352 }
2353
2354 /**
2355  * intel_display_power_flush_work_sync - flushes and syncs the async display power disabling work
2356  * @i915: i915 device instance
2357  *
2358  * Like intel_display_power_flush_work(), but also ensure that the work
2359  * handler function is not running any more when this function returns.
2360  */
2361 static void
2362 intel_display_power_flush_work_sync(struct drm_i915_private *i915)
2363 {
2364         struct i915_power_domains *power_domains = &i915->power_domains;
2365
2366         intel_display_power_flush_work(i915);
2367         cancel_delayed_work_sync(&power_domains->async_put_work);
2368
2369         verify_async_put_domains_state(power_domains);
2370
2371         drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref);
2372 }
2373
2374 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
2375 /**
2376  * intel_display_power_put - release a power domain reference
2377  * @dev_priv: i915 device instance
2378  * @domain: power domain to reference
2379  * @wakeref: wakeref acquired for the reference that is being released
2380  *
2381  * This function drops the power domain reference obtained by
2382  * intel_display_power_get() and might power down the corresponding hardware
2383  * block right away if this is the last reference.
2384  */
2385 void intel_display_power_put(struct drm_i915_private *dev_priv,
2386                              enum intel_display_power_domain domain,
2387                              intel_wakeref_t wakeref)
2388 {
2389         __intel_display_power_put(dev_priv, domain);
2390         intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2391 }
2392 #endif
2393
2394 #define I830_PIPES_POWER_DOMAINS (              \
2395         BIT_ULL(POWER_DOMAIN_PIPE_A) |          \
2396         BIT_ULL(POWER_DOMAIN_PIPE_B) |          \
2397         BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |     \
2398         BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |     \
2399         BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |    \
2400         BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |    \
2401         BIT_ULL(POWER_DOMAIN_INIT))
2402
2403 #define VLV_DISPLAY_POWER_DOMAINS (             \
2404         BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) |    \
2405         BIT_ULL(POWER_DOMAIN_PIPE_A) |          \
2406         BIT_ULL(POWER_DOMAIN_PIPE_B) |          \
2407         BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |     \
2408         BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |     \
2409         BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |    \
2410         BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |    \
2411         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |        \
2412         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |        \
2413         BIT_ULL(POWER_DOMAIN_PORT_DSI) |                \
2414         BIT_ULL(POWER_DOMAIN_PORT_CRT) |                \
2415         BIT_ULL(POWER_DOMAIN_VGA) |                     \
2416         BIT_ULL(POWER_DOMAIN_AUDIO) |           \
2417         BIT_ULL(POWER_DOMAIN_AUX_B) |           \
2418         BIT_ULL(POWER_DOMAIN_AUX_C) |           \
2419         BIT_ULL(POWER_DOMAIN_GMBUS) |           \
2420         BIT_ULL(POWER_DOMAIN_INIT))
2421
2422 #define VLV_DPIO_CMN_BC_POWER_DOMAINS (         \
2423         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |        \
2424         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |        \
2425         BIT_ULL(POWER_DOMAIN_PORT_CRT) |                \
2426         BIT_ULL(POWER_DOMAIN_AUX_B) |           \
2427         BIT_ULL(POWER_DOMAIN_AUX_C) |           \
2428         BIT_ULL(POWER_DOMAIN_INIT))
2429
2430 #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS (  \
2431         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |        \
2432         BIT_ULL(POWER_DOMAIN_AUX_B) |           \
2433         BIT_ULL(POWER_DOMAIN_INIT))
2434
2435 #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS (  \
2436         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |        \
2437         BIT_ULL(POWER_DOMAIN_AUX_B) |           \
2438         BIT_ULL(POWER_DOMAIN_INIT))
2439
2440 #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS (  \
2441         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |        \
2442         BIT_ULL(POWER_DOMAIN_AUX_C) |           \
2443         BIT_ULL(POWER_DOMAIN_INIT))
2444
2445 #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS (  \
2446         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |        \
2447         BIT_ULL(POWER_DOMAIN_AUX_C) |           \
2448         BIT_ULL(POWER_DOMAIN_INIT))
2449
2450 #define CHV_DISPLAY_POWER_DOMAINS (             \
2451         BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) |    \
2452         BIT_ULL(POWER_DOMAIN_PIPE_A) |          \
2453         BIT_ULL(POWER_DOMAIN_PIPE_B) |          \
2454         BIT_ULL(POWER_DOMAIN_PIPE_C) |          \
2455         BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |     \
2456         BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |     \
2457         BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |     \
2458         BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |    \
2459         BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |    \
2460         BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |    \
2461         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |        \
2462         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |        \
2463         BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |        \
2464         BIT_ULL(POWER_DOMAIN_PORT_DSI) |                \
2465         BIT_ULL(POWER_DOMAIN_VGA) |                     \
2466         BIT_ULL(POWER_DOMAIN_AUDIO) |           \
2467         BIT_ULL(POWER_DOMAIN_AUX_B) |           \
2468         BIT_ULL(POWER_DOMAIN_AUX_C) |           \
2469         BIT_ULL(POWER_DOMAIN_AUX_D) |           \
2470         BIT_ULL(POWER_DOMAIN_GMBUS) |           \
2471         BIT_ULL(POWER_DOMAIN_INIT))
2472
2473 #define CHV_DPIO_CMN_BC_POWER_DOMAINS (         \
2474         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |        \
2475         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |        \
2476         BIT_ULL(POWER_DOMAIN_AUX_B) |           \
2477         BIT_ULL(POWER_DOMAIN_AUX_C) |           \
2478         BIT_ULL(POWER_DOMAIN_INIT))
2479
2480 #define CHV_DPIO_CMN_D_POWER_DOMAINS (          \
2481         BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |        \
2482         BIT_ULL(POWER_DOMAIN_AUX_D) |           \
2483         BIT_ULL(POWER_DOMAIN_INIT))
2484
2485 #define HSW_DISPLAY_POWER_DOMAINS (                     \
2486         BIT_ULL(POWER_DOMAIN_PIPE_B) |                  \
2487         BIT_ULL(POWER_DOMAIN_PIPE_C) |                  \
2488         BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) |             \
2489         BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |             \
2490         BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |             \
2491         BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |            \
2492         BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |            \
2493         BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |            \
2494         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |                \
2495         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |                \
2496         BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |                \
2497         BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */    \
2498         BIT_ULL(POWER_DOMAIN_VGA) |                             \
2499         BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
2500         BIT_ULL(POWER_DOMAIN_INIT))
2501
2502 #define BDW_DISPLAY_POWER_DOMAINS (                     \
2503         BIT_ULL(POWER_DOMAIN_PIPE_B) |                  \
2504         BIT_ULL(POWER_DOMAIN_PIPE_C) |                  \
2505         BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |             \
2506         BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |             \
2507         BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |            \
2508         BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |            \
2509         BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |            \
2510         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |                \
2511         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |                \
2512         BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |                \
2513         BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */    \
2514         BIT_ULL(POWER_DOMAIN_VGA) |                             \
2515         BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
2516         BIT_ULL(POWER_DOMAIN_INIT))
2517
2518 #define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS (         \
2519         BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |            \
2520         BIT_ULL(POWER_DOMAIN_PIPE_B) |                  \
2521         BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |            \
2522         BIT_ULL(POWER_DOMAIN_PIPE_C) |                  \
2523         BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |            \
2524         BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |             \
2525         BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |             \
2526         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |                \
2527         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |                \
2528         BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |                \
2529         BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) |                \
2530         BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
2531         BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
2532         BIT_ULL(POWER_DOMAIN_AUX_D) |                   \
2533         BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
2534         BIT_ULL(POWER_DOMAIN_VGA) |                             \
2535         BIT_ULL(POWER_DOMAIN_INIT))
2536 #define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS (          \
2537         BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) |           \
2538         BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) |           \
2539         BIT_ULL(POWER_DOMAIN_INIT))
2540 #define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS (            \
2541         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) |           \
2542         BIT_ULL(POWER_DOMAIN_INIT))
2543 #define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS (            \
2544         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) |           \
2545         BIT_ULL(POWER_DOMAIN_INIT))
2546 #define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS (            \
2547         BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) |           \
2548         BIT_ULL(POWER_DOMAIN_INIT))
2549 #define SKL_DISPLAY_DC_OFF_POWER_DOMAINS (              \
2550         SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS |         \
2551         BIT_ULL(POWER_DOMAIN_GT_IRQ) |                  \
2552         BIT_ULL(POWER_DOMAIN_MODESET) |                 \
2553         BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
2554         BIT_ULL(POWER_DOMAIN_INIT))
2555
2556 #define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS (         \
2557         BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |            \
2558         BIT_ULL(POWER_DOMAIN_PIPE_B) |                  \
2559         BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |            \
2560         BIT_ULL(POWER_DOMAIN_PIPE_C) |                  \
2561         BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |            \
2562         BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |             \
2563         BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |             \
2564         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |                \
2565         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |                \
2566         BIT_ULL(POWER_DOMAIN_AUX_B) |                   \
2567         BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
2568         BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
2569         BIT_ULL(POWER_DOMAIN_VGA) |                             \
2570         BIT_ULL(POWER_DOMAIN_INIT))
2571 #define BXT_DISPLAY_DC_OFF_POWER_DOMAINS (              \
2572         BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS |         \
2573         BIT_ULL(POWER_DOMAIN_GT_IRQ) |                  \
2574         BIT_ULL(POWER_DOMAIN_MODESET) |                 \
2575         BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
2576         BIT_ULL(POWER_DOMAIN_GMBUS) |                   \
2577         BIT_ULL(POWER_DOMAIN_INIT))
2578 #define BXT_DPIO_CMN_A_POWER_DOMAINS (                  \
2579         BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) |                \
2580         BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
2581         BIT_ULL(POWER_DOMAIN_INIT))
2582 #define BXT_DPIO_CMN_BC_POWER_DOMAINS (                 \
2583         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |                \
2584         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |                \
2585         BIT_ULL(POWER_DOMAIN_AUX_B) |                   \
2586         BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
2587         BIT_ULL(POWER_DOMAIN_INIT))
2588
2589 #define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS (         \
2590         BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |            \
2591         BIT_ULL(POWER_DOMAIN_PIPE_B) |                  \
2592         BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |            \
2593         BIT_ULL(POWER_DOMAIN_PIPE_C) |                  \
2594         BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |            \
2595         BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |             \
2596         BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |             \
2597         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |                \
2598         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |                \
2599         BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
2600         BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
2601         BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
2602         BIT_ULL(POWER_DOMAIN_VGA) |                             \
2603         BIT_ULL(POWER_DOMAIN_INIT))
2604 #define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS (            \
2605         BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
2606 #define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS (            \
2607         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
2608 #define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS (            \
2609         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
2610 #define GLK_DPIO_CMN_A_POWER_DOMAINS (                  \
2611         BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) |                \
2612         BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
2613         BIT_ULL(POWER_DOMAIN_INIT))
2614 #define GLK_DPIO_CMN_B_POWER_DOMAINS (                  \
2615         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |                \
2616         BIT_ULL(POWER_DOMAIN_AUX_B) |                   \
2617         BIT_ULL(POWER_DOMAIN_INIT))
2618 #define GLK_DPIO_CMN_C_POWER_DOMAINS (                  \
2619         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |                \
2620         BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
2621         BIT_ULL(POWER_DOMAIN_INIT))
2622 #define GLK_DISPLAY_AUX_A_POWER_DOMAINS (               \
2623         BIT_ULL(POWER_DOMAIN_AUX_A) |           \
2624         BIT_ULL(POWER_DOMAIN_AUX_IO_A) |                \
2625         BIT_ULL(POWER_DOMAIN_INIT))
2626 #define GLK_DISPLAY_AUX_B_POWER_DOMAINS (               \
2627         BIT_ULL(POWER_DOMAIN_AUX_B) |           \
2628         BIT_ULL(POWER_DOMAIN_INIT))
2629 #define GLK_DISPLAY_AUX_C_POWER_DOMAINS (               \
2630         BIT_ULL(POWER_DOMAIN_AUX_C) |           \
2631         BIT_ULL(POWER_DOMAIN_INIT))
2632 #define GLK_DISPLAY_DC_OFF_POWER_DOMAINS (              \
2633         GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS |         \
2634         BIT_ULL(POWER_DOMAIN_GT_IRQ) |                  \
2635         BIT_ULL(POWER_DOMAIN_MODESET) |                 \
2636         BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
2637         BIT_ULL(POWER_DOMAIN_GMBUS) |                   \
2638         BIT_ULL(POWER_DOMAIN_INIT))
2639
2640 #define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS (         \
2641         BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |            \
2642         BIT_ULL(POWER_DOMAIN_PIPE_B) |                  \
2643         BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |            \
2644         BIT_ULL(POWER_DOMAIN_PIPE_C) |                  \
2645         BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |            \
2646         BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |             \
2647         BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |             \
2648         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |                \
2649         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |                \
2650         BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |                \
2651         BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) |                \
2652         BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
2653         BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
2654         BIT_ULL(POWER_DOMAIN_AUX_D) |                   \
2655         BIT_ULL(POWER_DOMAIN_AUX_F) |                   \
2656         BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
2657         BIT_ULL(POWER_DOMAIN_VGA) |                             \
2658         BIT_ULL(POWER_DOMAIN_INIT))
2659 #define CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS (            \
2660         BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) |           \
2661         BIT_ULL(POWER_DOMAIN_INIT))
2662 #define CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS (            \
2663         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) |           \
2664         BIT_ULL(POWER_DOMAIN_INIT))
2665 #define CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS (            \
2666         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) |           \
2667         BIT_ULL(POWER_DOMAIN_INIT))
2668 #define CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS (            \
2669         BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) |           \
2670         BIT_ULL(POWER_DOMAIN_INIT))
2671 #define CNL_DISPLAY_AUX_A_POWER_DOMAINS (               \
2672         BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
2673         BIT_ULL(POWER_DOMAIN_AUX_IO_A) |                \
2674         BIT_ULL(POWER_DOMAIN_INIT))
2675 #define CNL_DISPLAY_AUX_B_POWER_DOMAINS (               \
2676         BIT_ULL(POWER_DOMAIN_AUX_B) |                   \
2677         BIT_ULL(POWER_DOMAIN_INIT))
2678 #define CNL_DISPLAY_AUX_C_POWER_DOMAINS (               \
2679         BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
2680         BIT_ULL(POWER_DOMAIN_INIT))
2681 #define CNL_DISPLAY_AUX_D_POWER_DOMAINS (               \
2682         BIT_ULL(POWER_DOMAIN_AUX_D) |                   \
2683         BIT_ULL(POWER_DOMAIN_INIT))
2684 #define CNL_DISPLAY_AUX_F_POWER_DOMAINS (               \
2685         BIT_ULL(POWER_DOMAIN_AUX_F) |                   \
2686         BIT_ULL(POWER_DOMAIN_INIT))
2687 #define CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS (            \
2688         BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) |           \
2689         BIT_ULL(POWER_DOMAIN_INIT))
2690 #define CNL_DISPLAY_DC_OFF_POWER_DOMAINS (              \
2691         CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS |         \
2692         BIT_ULL(POWER_DOMAIN_GT_IRQ) |                  \
2693         BIT_ULL(POWER_DOMAIN_MODESET) |                 \
2694         BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
2695         BIT_ULL(POWER_DOMAIN_INIT))
2696
2697 /*
2698  * ICL PW_0/PG_0 domains (HW/DMC control):
2699  * - PCI
2700  * - clocks except port PLL
2701  * - central power except FBC
2702  * - shared functions except pipe interrupts, pipe MBUS, DBUF registers
2703  * ICL PW_1/PG_1 domains (HW/DMC control):
2704  * - DBUF function
2705  * - PIPE_A and its planes, except VGA
2706  * - transcoder EDP + PSR
2707  * - transcoder DSI
2708  * - DDI_A
2709  * - FBC
2710  */
2711 #define ICL_PW_4_POWER_DOMAINS (                        \
2712         BIT_ULL(POWER_DOMAIN_PIPE_C) |                  \
2713         BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |     \
2714         BIT_ULL(POWER_DOMAIN_INIT))
2715         /* VDSC/joining */
2716 #define ICL_PW_3_POWER_DOMAINS (                        \
2717         ICL_PW_4_POWER_DOMAINS |                        \
2718         BIT_ULL(POWER_DOMAIN_PIPE_B) |                  \
2719         BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |            \
2720         BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |            \
2721         BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |            \
2722         BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |     \
2723         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |        \
2724         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |        \
2725         BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |        \
2726         BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) |        \
2727         BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) |        \
2728         BIT_ULL(POWER_DOMAIN_AUX_B) |                   \
2729         BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
2730         BIT_ULL(POWER_DOMAIN_AUX_D) |                   \
2731         BIT_ULL(POWER_DOMAIN_AUX_E) |                   \
2732         BIT_ULL(POWER_DOMAIN_AUX_F) |                   \
2733         BIT_ULL(POWER_DOMAIN_AUX_C_TBT) |               \
2734         BIT_ULL(POWER_DOMAIN_AUX_D_TBT) |               \
2735         BIT_ULL(POWER_DOMAIN_AUX_E_TBT) |               \
2736         BIT_ULL(POWER_DOMAIN_AUX_F_TBT) |               \
2737         BIT_ULL(POWER_DOMAIN_VGA) |                     \
2738         BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
2739         BIT_ULL(POWER_DOMAIN_INIT))
2740         /*
2741          * - transcoder WD
2742          * - KVMR (HW control)
2743          */
2744 #define ICL_PW_2_POWER_DOMAINS (                        \
2745         ICL_PW_3_POWER_DOMAINS |                        \
2746         BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) |             \
2747         BIT_ULL(POWER_DOMAIN_INIT))
2748         /*
2749          * - KVMR (HW control)
2750          */
2751 #define ICL_DISPLAY_DC_OFF_POWER_DOMAINS (              \
2752         ICL_PW_2_POWER_DOMAINS |                        \
2753         BIT_ULL(POWER_DOMAIN_MODESET) |                 \
2754         BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
2755         BIT_ULL(POWER_DOMAIN_DPLL_DC_OFF) |                     \
2756         BIT_ULL(POWER_DOMAIN_INIT))
2757
2758 #define ICL_DDI_IO_A_POWER_DOMAINS (                    \
2759         BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
2760 #define ICL_DDI_IO_B_POWER_DOMAINS (                    \
2761         BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
2762 #define ICL_DDI_IO_C_POWER_DOMAINS (                    \
2763         BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
2764 #define ICL_DDI_IO_D_POWER_DOMAINS (                    \
2765         BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO))
2766 #define ICL_DDI_IO_E_POWER_DOMAINS (                    \
2767         BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO))
2768 #define ICL_DDI_IO_F_POWER_DOMAINS (                    \
2769         BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO))
2770
2771 #define ICL_AUX_A_IO_POWER_DOMAINS (                    \
2772         BIT_ULL(POWER_DOMAIN_AUX_IO_A) |                \
2773         BIT_ULL(POWER_DOMAIN_AUX_A))
2774 #define ICL_AUX_B_IO_POWER_DOMAINS (                    \
2775         BIT_ULL(POWER_DOMAIN_AUX_B))
2776 #define ICL_AUX_C_TC1_IO_POWER_DOMAINS (                \
2777         BIT_ULL(POWER_DOMAIN_AUX_C))
2778 #define ICL_AUX_D_TC2_IO_POWER_DOMAINS (                \
2779         BIT_ULL(POWER_DOMAIN_AUX_D))
2780 #define ICL_AUX_E_TC3_IO_POWER_DOMAINS (                \
2781         BIT_ULL(POWER_DOMAIN_AUX_E))
2782 #define ICL_AUX_F_TC4_IO_POWER_DOMAINS (                \
2783         BIT_ULL(POWER_DOMAIN_AUX_F))
2784 #define ICL_AUX_C_TBT1_IO_POWER_DOMAINS (               \
2785         BIT_ULL(POWER_DOMAIN_AUX_C_TBT))
2786 #define ICL_AUX_D_TBT2_IO_POWER_DOMAINS (               \
2787         BIT_ULL(POWER_DOMAIN_AUX_D_TBT))
2788 #define ICL_AUX_E_TBT3_IO_POWER_DOMAINS (               \
2789         BIT_ULL(POWER_DOMAIN_AUX_E_TBT))
2790 #define ICL_AUX_F_TBT4_IO_POWER_DOMAINS (               \
2791         BIT_ULL(POWER_DOMAIN_AUX_F_TBT))
2792
2793 #define TGL_PW_5_POWER_DOMAINS (                        \
2794         BIT_ULL(POWER_DOMAIN_PIPE_D) |                  \
2795         BIT_ULL(POWER_DOMAIN_TRANSCODER_D) |            \
2796         BIT_ULL(POWER_DOMAIN_PIPE_D_PANEL_FITTER) |     \
2797         BIT_ULL(POWER_DOMAIN_INIT))
2798
2799 #define TGL_PW_4_POWER_DOMAINS (                        \
2800         TGL_PW_5_POWER_DOMAINS |                        \
2801         BIT_ULL(POWER_DOMAIN_PIPE_C) |                  \
2802         BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |            \
2803         BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |     \
2804         BIT_ULL(POWER_DOMAIN_INIT))
2805
2806 #define TGL_PW_3_POWER_DOMAINS (                        \
2807         TGL_PW_4_POWER_DOMAINS |                        \
2808         BIT_ULL(POWER_DOMAIN_PIPE_B) |                  \
2809         BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |            \
2810         BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |     \
2811         BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |        \
2812         BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) |        \
2813         BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) |        \
2814         BIT_ULL(POWER_DOMAIN_PORT_DDI_G_LANES) |        \
2815         BIT_ULL(POWER_DOMAIN_PORT_DDI_H_LANES) |        \
2816         BIT_ULL(POWER_DOMAIN_PORT_DDI_I_LANES) |        \
2817         BIT_ULL(POWER_DOMAIN_AUX_D) |                   \
2818         BIT_ULL(POWER_DOMAIN_AUX_E) |                   \
2819         BIT_ULL(POWER_DOMAIN_AUX_F) |                   \
2820         BIT_ULL(POWER_DOMAIN_AUX_G) |                   \
2821         BIT_ULL(POWER_DOMAIN_AUX_H) |                   \
2822         BIT_ULL(POWER_DOMAIN_AUX_I) |                   \
2823         BIT_ULL(POWER_DOMAIN_AUX_D_TBT) |               \
2824         BIT_ULL(POWER_DOMAIN_AUX_E_TBT) |               \
2825         BIT_ULL(POWER_DOMAIN_AUX_F_TBT) |               \
2826         BIT_ULL(POWER_DOMAIN_AUX_G_TBT) |               \
2827         BIT_ULL(POWER_DOMAIN_AUX_H_TBT) |               \
2828         BIT_ULL(POWER_DOMAIN_AUX_I_TBT) |               \
2829         BIT_ULL(POWER_DOMAIN_VGA) |                     \
2830         BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
2831         BIT_ULL(POWER_DOMAIN_INIT))
2832
2833 #define TGL_PW_2_POWER_DOMAINS (                        \
2834         TGL_PW_3_POWER_DOMAINS |                        \
2835         BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) |     \
2836         BIT_ULL(POWER_DOMAIN_INIT))
2837
2838 #define TGL_DISPLAY_DC_OFF_POWER_DOMAINS (              \
2839         TGL_PW_3_POWER_DOMAINS |                        \
2840         BIT_ULL(POWER_DOMAIN_MODESET) |                 \
2841         BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
2842         BIT_ULL(POWER_DOMAIN_AUX_B) |                   \
2843         BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
2844         BIT_ULL(POWER_DOMAIN_INIT))
2845
2846 #define TGL_DDI_IO_D_TC1_POWER_DOMAINS (        \
2847         BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO))
2848 #define TGL_DDI_IO_E_TC2_POWER_DOMAINS (        \
2849         BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO))
2850 #define TGL_DDI_IO_F_TC3_POWER_DOMAINS (        \
2851         BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO))
2852 #define TGL_DDI_IO_G_TC4_POWER_DOMAINS (        \
2853         BIT_ULL(POWER_DOMAIN_PORT_DDI_G_IO))
2854 #define TGL_DDI_IO_H_TC5_POWER_DOMAINS (        \
2855         BIT_ULL(POWER_DOMAIN_PORT_DDI_H_IO))
2856 #define TGL_DDI_IO_I_TC6_POWER_DOMAINS (        \
2857         BIT_ULL(POWER_DOMAIN_PORT_DDI_I_IO))
2858
2859 #define TGL_AUX_A_IO_POWER_DOMAINS (            \
2860         BIT_ULL(POWER_DOMAIN_AUX_IO_A) |        \
2861         BIT_ULL(POWER_DOMAIN_AUX_A))
2862 #define TGL_AUX_B_IO_POWER_DOMAINS (            \
2863         BIT_ULL(POWER_DOMAIN_AUX_B))
2864 #define TGL_AUX_C_IO_POWER_DOMAINS (            \
2865         BIT_ULL(POWER_DOMAIN_AUX_C))
2866 #define TGL_AUX_D_TC1_IO_POWER_DOMAINS (        \
2867         BIT_ULL(POWER_DOMAIN_AUX_D))
2868 #define TGL_AUX_E_TC2_IO_POWER_DOMAINS (        \
2869         BIT_ULL(POWER_DOMAIN_AUX_E))
2870 #define TGL_AUX_F_TC3_IO_POWER_DOMAINS (        \
2871         BIT_ULL(POWER_DOMAIN_AUX_F))
2872 #define TGL_AUX_G_TC4_IO_POWER_DOMAINS (        \
2873         BIT_ULL(POWER_DOMAIN_AUX_G))
2874 #define TGL_AUX_H_TC5_IO_POWER_DOMAINS (        \
2875         BIT_ULL(POWER_DOMAIN_AUX_H))
2876 #define TGL_AUX_I_TC6_IO_POWER_DOMAINS (        \
2877         BIT_ULL(POWER_DOMAIN_AUX_I))
2878 #define TGL_AUX_D_TBT1_IO_POWER_DOMAINS (       \
2879         BIT_ULL(POWER_DOMAIN_AUX_D_TBT))
2880 #define TGL_AUX_E_TBT2_IO_POWER_DOMAINS (       \
2881         BIT_ULL(POWER_DOMAIN_AUX_E_TBT))
2882 #define TGL_AUX_F_TBT3_IO_POWER_DOMAINS (       \
2883         BIT_ULL(POWER_DOMAIN_AUX_F_TBT))
2884 #define TGL_AUX_G_TBT4_IO_POWER_DOMAINS (       \
2885         BIT_ULL(POWER_DOMAIN_AUX_G_TBT))
2886 #define TGL_AUX_H_TBT5_IO_POWER_DOMAINS (       \
2887         BIT_ULL(POWER_DOMAIN_AUX_H_TBT))
2888 #define TGL_AUX_I_TBT6_IO_POWER_DOMAINS (       \
2889         BIT_ULL(POWER_DOMAIN_AUX_I_TBT))
2890
2891 #define TGL_TC_COLD_OFF_POWER_DOMAINS (         \
2892         BIT_ULL(POWER_DOMAIN_AUX_D)     |       \
2893         BIT_ULL(POWER_DOMAIN_AUX_E)     |       \
2894         BIT_ULL(POWER_DOMAIN_AUX_F)     |       \
2895         BIT_ULL(POWER_DOMAIN_AUX_G)     |       \
2896         BIT_ULL(POWER_DOMAIN_AUX_H)     |       \
2897         BIT_ULL(POWER_DOMAIN_AUX_I)     |       \
2898         BIT_ULL(POWER_DOMAIN_AUX_D_TBT) |       \
2899         BIT_ULL(POWER_DOMAIN_AUX_E_TBT) |       \
2900         BIT_ULL(POWER_DOMAIN_AUX_F_TBT) |       \
2901         BIT_ULL(POWER_DOMAIN_AUX_G_TBT) |       \
2902         BIT_ULL(POWER_DOMAIN_AUX_H_TBT) |       \
2903         BIT_ULL(POWER_DOMAIN_AUX_I_TBT) |       \
2904         BIT_ULL(POWER_DOMAIN_TC_COLD_OFF))
2905
2906 static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
2907         .sync_hw = i9xx_power_well_sync_hw_noop,
2908         .enable = i9xx_always_on_power_well_noop,
2909         .disable = i9xx_always_on_power_well_noop,
2910         .is_enabled = i9xx_always_on_power_well_enabled,
2911 };
2912
2913 static const struct i915_power_well_ops chv_pipe_power_well_ops = {
2914         .sync_hw = chv_pipe_power_well_sync_hw,
2915         .enable = chv_pipe_power_well_enable,
2916         .disable = chv_pipe_power_well_disable,
2917         .is_enabled = chv_pipe_power_well_enabled,
2918 };
2919
2920 static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
2921         .sync_hw = i9xx_power_well_sync_hw_noop,
2922         .enable = chv_dpio_cmn_power_well_enable,
2923         .disable = chv_dpio_cmn_power_well_disable,
2924         .is_enabled = vlv_power_well_enabled,
2925 };
2926
2927 static const struct i915_power_well_desc i9xx_always_on_power_well[] = {
2928         {
2929                 .name = "always-on",
2930                 .always_on = true,
2931                 .domains = POWER_DOMAIN_MASK,
2932                 .ops = &i9xx_always_on_power_well_ops,
2933                 .id = DISP_PW_ID_NONE,
2934         },
2935 };
2936
2937 static const struct i915_power_well_ops i830_pipes_power_well_ops = {
2938         .sync_hw = i830_pipes_power_well_sync_hw,
2939         .enable = i830_pipes_power_well_enable,
2940         .disable = i830_pipes_power_well_disable,
2941         .is_enabled = i830_pipes_power_well_enabled,
2942 };
2943
2944 static const struct i915_power_well_desc i830_power_wells[] = {
2945         {
2946                 .name = "always-on",
2947                 .always_on = true,
2948                 .domains = POWER_DOMAIN_MASK,
2949                 .ops = &i9xx_always_on_power_well_ops,
2950                 .id = DISP_PW_ID_NONE,
2951         },
2952         {
2953                 .name = "pipes",
2954                 .domains = I830_PIPES_POWER_DOMAINS,
2955                 .ops = &i830_pipes_power_well_ops,
2956                 .id = DISP_PW_ID_NONE,
2957         },
2958 };
2959
2960 static const struct i915_power_well_ops hsw_power_well_ops = {
2961         .sync_hw = hsw_power_well_sync_hw,
2962         .enable = hsw_power_well_enable,
2963         .disable = hsw_power_well_disable,
2964         .is_enabled = hsw_power_well_enabled,
2965 };
2966
2967 static const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
2968         .sync_hw = i9xx_power_well_sync_hw_noop,
2969         .enable = gen9_dc_off_power_well_enable,
2970         .disable = gen9_dc_off_power_well_disable,
2971         .is_enabled = gen9_dc_off_power_well_enabled,
2972 };
2973
2974 static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = {
2975         .sync_hw = i9xx_power_well_sync_hw_noop,
2976         .enable = bxt_dpio_cmn_power_well_enable,
2977         .disable = bxt_dpio_cmn_power_well_disable,
2978         .is_enabled = bxt_dpio_cmn_power_well_enabled,
2979 };
2980
2981 static const struct i915_power_well_regs hsw_power_well_regs = {
2982         .bios   = HSW_PWR_WELL_CTL1,
2983         .driver = HSW_PWR_WELL_CTL2,
2984         .kvmr   = HSW_PWR_WELL_CTL3,
2985         .debug  = HSW_PWR_WELL_CTL4,
2986 };
2987
2988 static const struct i915_power_well_desc hsw_power_wells[] = {
2989         {
2990                 .name = "always-on",
2991                 .always_on = true,
2992                 .domains = POWER_DOMAIN_MASK,
2993                 .ops = &i9xx_always_on_power_well_ops,
2994                 .id = DISP_PW_ID_NONE,
2995         },
2996         {
2997                 .name = "display",
2998                 .domains = HSW_DISPLAY_POWER_DOMAINS,
2999                 .ops = &hsw_power_well_ops,
3000                 .id = HSW_DISP_PW_GLOBAL,
3001                 {
3002                         .hsw.regs = &hsw_power_well_regs,
3003                         .hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
3004                         .hsw.has_vga = true,
3005                 },
3006         },
3007 };
3008
3009 static const struct i915_power_well_desc bdw_power_wells[] = {
3010         {
3011                 .name = "always-on",
3012                 .always_on = true,
3013                 .domains = POWER_DOMAIN_MASK,
3014                 .ops = &i9xx_always_on_power_well_ops,
3015                 .id = DISP_PW_ID_NONE,
3016         },
3017         {
3018                 .name = "display",
3019                 .domains = BDW_DISPLAY_POWER_DOMAINS,
3020                 .ops = &hsw_power_well_ops,
3021                 .id = HSW_DISP_PW_GLOBAL,
3022                 {
3023                         .hsw.regs = &hsw_power_well_regs,
3024                         .hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
3025                         .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3026                         .hsw.has_vga = true,
3027                 },
3028         },
3029 };
3030
3031 static const struct i915_power_well_ops vlv_display_power_well_ops = {
3032         .sync_hw = i9xx_power_well_sync_hw_noop,
3033         .enable = vlv_display_power_well_enable,
3034         .disable = vlv_display_power_well_disable,
3035         .is_enabled = vlv_power_well_enabled,
3036 };
3037
3038 static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
3039         .sync_hw = i9xx_power_well_sync_hw_noop,
3040         .enable = vlv_dpio_cmn_power_well_enable,
3041         .disable = vlv_dpio_cmn_power_well_disable,
3042         .is_enabled = vlv_power_well_enabled,
3043 };
3044
3045 static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
3046         .sync_hw = i9xx_power_well_sync_hw_noop,
3047         .enable = vlv_power_well_enable,
3048         .disable = vlv_power_well_disable,
3049         .is_enabled = vlv_power_well_enabled,
3050 };
3051
3052 static const struct i915_power_well_desc vlv_power_wells[] = {
3053         {
3054                 .name = "always-on",
3055                 .always_on = true,
3056                 .domains = POWER_DOMAIN_MASK,
3057                 .ops = &i9xx_always_on_power_well_ops,
3058                 .id = DISP_PW_ID_NONE,
3059         },
3060         {
3061                 .name = "display",
3062                 .domains = VLV_DISPLAY_POWER_DOMAINS,
3063                 .ops = &vlv_display_power_well_ops,
3064                 .id = VLV_DISP_PW_DISP2D,
3065                 {
3066                         .vlv.idx = PUNIT_PWGT_IDX_DISP2D,
3067                 },
3068         },
3069         {
3070                 .name = "dpio-tx-b-01",
3071                 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
3072                            VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
3073                            VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
3074                            VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
3075                 .ops = &vlv_dpio_power_well_ops,
3076                 .id = DISP_PW_ID_NONE,
3077                 {
3078                         .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_01,
3079                 },
3080         },
3081         {
3082                 .name = "dpio-tx-b-23",
3083                 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
3084                            VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
3085                            VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
3086                            VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
3087                 .ops = &vlv_dpio_power_well_ops,
3088                 .id = DISP_PW_ID_NONE,
3089                 {
3090                         .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_23,
3091                 },
3092         },
3093         {
3094                 .name = "dpio-tx-c-01",
3095                 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
3096                            VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
3097                            VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
3098                            VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
3099                 .ops = &vlv_dpio_power_well_ops,
3100                 .id = DISP_PW_ID_NONE,
3101                 {
3102                         .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_01,
3103                 },
3104         },
3105         {
3106                 .name = "dpio-tx-c-23",
3107                 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
3108                            VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
3109                            VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
3110                            VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
3111                 .ops = &vlv_dpio_power_well_ops,
3112                 .id = DISP_PW_ID_NONE,
3113                 {
3114                         .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_23,
3115                 },
3116         },
3117         {
3118                 .name = "dpio-common",
3119                 .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
3120                 .ops = &vlv_dpio_cmn_power_well_ops,
3121                 .id = VLV_DISP_PW_DPIO_CMN_BC,
3122                 {
3123                         .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
3124                 },
3125         },
3126 };
3127
3128 static const struct i915_power_well_desc chv_power_wells[] = {
3129         {
3130                 .name = "always-on",
3131                 .always_on = true,
3132                 .domains = POWER_DOMAIN_MASK,
3133                 .ops = &i9xx_always_on_power_well_ops,
3134                 .id = DISP_PW_ID_NONE,
3135         },
3136         {
3137                 .name = "display",
3138                 /*
3139                  * Pipe A power well is the new disp2d well. Pipe B and C
3140                  * power wells don't actually exist. Pipe A power well is
3141                  * required for any pipe to work.
3142                  */
3143                 .domains = CHV_DISPLAY_POWER_DOMAINS,
3144                 .ops = &chv_pipe_power_well_ops,
3145                 .id = DISP_PW_ID_NONE,
3146         },
3147         {
3148                 .name = "dpio-common-bc",
3149                 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
3150                 .ops = &chv_dpio_cmn_power_well_ops,
3151                 .id = VLV_DISP_PW_DPIO_CMN_BC,
3152                 {
3153                         .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
3154                 },
3155         },
3156         {
3157                 .name = "dpio-common-d",
3158                 .domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
3159                 .ops = &chv_dpio_cmn_power_well_ops,
3160                 .id = CHV_DISP_PW_DPIO_CMN_D,
3161                 {
3162                         .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_D,
3163                 },
3164         },
3165 };
3166
3167 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
3168                                          enum i915_power_well_id power_well_id)
3169 {
3170         struct i915_power_well *power_well;
3171         bool ret;
3172
3173         power_well = lookup_power_well(dev_priv, power_well_id);
3174         ret = power_well->desc->ops->is_enabled(dev_priv, power_well);
3175
3176         return ret;
3177 }
3178
3179 static const struct i915_power_well_desc skl_power_wells[] = {
3180         {
3181                 .name = "always-on",
3182                 .always_on = true,
3183                 .domains = POWER_DOMAIN_MASK,
3184                 .ops = &i9xx_always_on_power_well_ops,
3185                 .id = DISP_PW_ID_NONE,
3186         },
3187         {
3188                 .name = "power well 1",
3189                 /* Handled by the DMC firmware */
3190                 .always_on = true,
3191                 .domains = 0,
3192                 .ops = &hsw_power_well_ops,
3193                 .id = SKL_DISP_PW_1,
3194                 {
3195                         .hsw.regs = &hsw_power_well_regs,
3196                         .hsw.idx = SKL_PW_CTL_IDX_PW_1,
3197                         .hsw.has_fuses = true,
3198                 },
3199         },
3200         {
3201                 .name = "MISC IO power well",
3202                 /* Handled by the DMC firmware */
3203                 .always_on = true,
3204                 .domains = 0,
3205                 .ops = &hsw_power_well_ops,
3206                 .id = SKL_DISP_PW_MISC_IO,
3207                 {
3208                         .hsw.regs = &hsw_power_well_regs,
3209                         .hsw.idx = SKL_PW_CTL_IDX_MISC_IO,
3210                 },
3211         },
3212         {
3213                 .name = "DC off",
3214                 .domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS,
3215                 .ops = &gen9_dc_off_power_well_ops,
3216                 .id = SKL_DISP_DC_OFF,
3217         },
3218         {
3219                 .name = "power well 2",
3220                 .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3221                 .ops = &hsw_power_well_ops,
3222                 .id = SKL_DISP_PW_2,
3223                 {
3224                         .hsw.regs = &hsw_power_well_regs,
3225                         .hsw.idx = SKL_PW_CTL_IDX_PW_2,
3226                         .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3227                         .hsw.has_vga = true,
3228                         .hsw.has_fuses = true,
3229                 },
3230         },
3231         {
3232                 .name = "DDI A/E IO power well",
3233                 .domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS,
3234                 .ops = &hsw_power_well_ops,
3235                 .id = DISP_PW_ID_NONE,
3236                 {
3237                         .hsw.regs = &hsw_power_well_regs,
3238                         .hsw.idx = SKL_PW_CTL_IDX_DDI_A_E,
3239                 },
3240         },
3241         {
3242                 .name = "DDI B IO power well",
3243                 .domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS,
3244                 .ops = &hsw_power_well_ops,
3245                 .id = DISP_PW_ID_NONE,
3246                 {
3247                         .hsw.regs = &hsw_power_well_regs,
3248                         .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
3249                 },
3250         },
3251         {
3252                 .name = "DDI C IO power well",
3253                 .domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS,
3254                 .ops = &hsw_power_well_ops,
3255                 .id = DISP_PW_ID_NONE,
3256                 {
3257                         .hsw.regs = &hsw_power_well_regs,
3258                         .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
3259                 },
3260         },
3261         {
3262                 .name = "DDI D IO power well",
3263                 .domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS,
3264                 .ops = &hsw_power_well_ops,
3265                 .id = DISP_PW_ID_NONE,
3266                 {
3267                         .hsw.regs = &hsw_power_well_regs,
3268                         .hsw.idx = SKL_PW_CTL_IDX_DDI_D,
3269                 },
3270         },
3271 };
3272
3273 static const struct i915_power_well_desc bxt_power_wells[] = {
3274         {
3275                 .name = "always-on",
3276                 .always_on = true,
3277                 .domains = POWER_DOMAIN_MASK,
3278                 .ops = &i9xx_always_on_power_well_ops,
3279                 .id = DISP_PW_ID_NONE,
3280         },
3281         {
3282                 .name = "power well 1",
3283                 /* Handled by the DMC firmware */
3284                 .always_on = true,
3285                 .domains = 0,
3286                 .ops = &hsw_power_well_ops,
3287                 .id = SKL_DISP_PW_1,
3288                 {
3289                         .hsw.regs = &hsw_power_well_regs,
3290                         .hsw.idx = SKL_PW_CTL_IDX_PW_1,
3291                         .hsw.has_fuses = true,
3292                 },
3293         },
3294         {
3295                 .name = "DC off",
3296                 .domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS,
3297                 .ops = &gen9_dc_off_power_well_ops,
3298                 .id = SKL_DISP_DC_OFF,
3299         },
3300         {
3301                 .name = "power well 2",
3302                 .domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3303                 .ops = &hsw_power_well_ops,
3304                 .id = SKL_DISP_PW_2,
3305                 {
3306                         .hsw.regs = &hsw_power_well_regs,
3307                         .hsw.idx = SKL_PW_CTL_IDX_PW_2,
3308                         .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3309                         .hsw.has_vga = true,
3310                         .hsw.has_fuses = true,
3311                 },
3312         },
3313         {
3314                 .name = "dpio-common-a",
3315                 .domains = BXT_DPIO_CMN_A_POWER_DOMAINS,
3316                 .ops = &bxt_dpio_cmn_power_well_ops,
3317                 .id = BXT_DISP_PW_DPIO_CMN_A,
3318                 {
3319                         .bxt.phy = DPIO_PHY1,
3320                 },
3321         },
3322         {
3323                 .name = "dpio-common-bc",
3324                 .domains = BXT_DPIO_CMN_BC_POWER_DOMAINS,
3325                 .ops = &bxt_dpio_cmn_power_well_ops,
3326                 .id = VLV_DISP_PW_DPIO_CMN_BC,
3327                 {
3328                         .bxt.phy = DPIO_PHY0,
3329                 },
3330         },
3331 };
3332
3333 static const struct i915_power_well_desc glk_power_wells[] = {
3334         {
3335                 .name = "always-on",
3336                 .always_on = true,
3337                 .domains = POWER_DOMAIN_MASK,
3338                 .ops = &i9xx_always_on_power_well_ops,
3339                 .id = DISP_PW_ID_NONE,
3340         },
3341         {
3342                 .name = "power well 1",
3343                 /* Handled by the DMC firmware */
3344                 .always_on = true,
3345                 .domains = 0,
3346                 .ops = &hsw_power_well_ops,
3347                 .id = SKL_DISP_PW_1,
3348                 {
3349                         .hsw.regs = &hsw_power_well_regs,
3350                         .hsw.idx = SKL_PW_CTL_IDX_PW_1,
3351                         .hsw.has_fuses = true,
3352                 },
3353         },
3354         {
3355                 .name = "DC off",
3356                 .domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS,
3357                 .ops = &gen9_dc_off_power_well_ops,
3358                 .id = SKL_DISP_DC_OFF,
3359         },
3360         {
3361                 .name = "power well 2",
3362                 .domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3363                 .ops = &hsw_power_well_ops,
3364                 .id = SKL_DISP_PW_2,
3365                 {
3366                         .hsw.regs = &hsw_power_well_regs,
3367                         .hsw.idx = SKL_PW_CTL_IDX_PW_2,
3368                         .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3369                         .hsw.has_vga = true,
3370                         .hsw.has_fuses = true,
3371                 },
3372         },
3373         {
3374                 .name = "dpio-common-a",
3375                 .domains = GLK_DPIO_CMN_A_POWER_DOMAINS,
3376                 .ops = &bxt_dpio_cmn_power_well_ops,
3377                 .id = BXT_DISP_PW_DPIO_CMN_A,
3378                 {
3379                         .bxt.phy = DPIO_PHY1,
3380                 },
3381         },
3382         {
3383                 .name = "dpio-common-b",
3384                 .domains = GLK_DPIO_CMN_B_POWER_DOMAINS,
3385                 .ops = &bxt_dpio_cmn_power_well_ops,
3386                 .id = VLV_DISP_PW_DPIO_CMN_BC,
3387                 {
3388                         .bxt.phy = DPIO_PHY0,
3389                 },
3390         },
3391         {
3392                 .name = "dpio-common-c",
3393                 .domains = GLK_DPIO_CMN_C_POWER_DOMAINS,
3394                 .ops = &bxt_dpio_cmn_power_well_ops,
3395                 .id = GLK_DISP_PW_DPIO_CMN_C,
3396                 {
3397                         .bxt.phy = DPIO_PHY2,
3398                 },
3399         },
3400         {
3401                 .name = "AUX A",
3402                 .domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS,
3403                 .ops = &hsw_power_well_ops,
3404                 .id = DISP_PW_ID_NONE,
3405                 {
3406                         .hsw.regs = &hsw_power_well_regs,
3407                         .hsw.idx = GLK_PW_CTL_IDX_AUX_A,
3408                 },
3409         },
3410         {
3411                 .name = "AUX B",
3412                 .domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS,
3413                 .ops = &hsw_power_well_ops,
3414                 .id = DISP_PW_ID_NONE,
3415                 {
3416                         .hsw.regs = &hsw_power_well_regs,
3417                         .hsw.idx = GLK_PW_CTL_IDX_AUX_B,
3418                 },
3419         },
3420         {
3421                 .name = "AUX C",
3422                 .domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS,
3423                 .ops = &hsw_power_well_ops,
3424                 .id = DISP_PW_ID_NONE,
3425                 {
3426                         .hsw.regs = &hsw_power_well_regs,
3427                         .hsw.idx = GLK_PW_CTL_IDX_AUX_C,
3428                 },
3429         },
3430         {
3431                 .name = "DDI A IO power well",
3432                 .domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS,
3433                 .ops = &hsw_power_well_ops,
3434                 .id = DISP_PW_ID_NONE,
3435                 {
3436                         .hsw.regs = &hsw_power_well_regs,
3437                         .hsw.idx = GLK_PW_CTL_IDX_DDI_A,
3438                 },
3439         },
3440         {
3441                 .name = "DDI B IO power well",
3442                 .domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS,
3443                 .ops = &hsw_power_well_ops,
3444                 .id = DISP_PW_ID_NONE,
3445                 {
3446                         .hsw.regs = &hsw_power_well_regs,
3447                         .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
3448                 },
3449         },
3450         {
3451                 .name = "DDI C IO power well",
3452                 .domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS,
3453                 .ops = &hsw_power_well_ops,
3454                 .id = DISP_PW_ID_NONE,
3455                 {
3456                         .hsw.regs = &hsw_power_well_regs,
3457                         .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
3458                 },
3459         },
3460 };
3461
3462 static const struct i915_power_well_desc cnl_power_wells[] = {
3463         {
3464                 .name = "always-on",
3465                 .always_on = true,
3466                 .domains = POWER_DOMAIN_MASK,
3467                 .ops = &i9xx_always_on_power_well_ops,
3468                 .id = DISP_PW_ID_NONE,
3469         },
3470         {
3471                 .name = "power well 1",
3472                 /* Handled by the DMC firmware */
3473                 .always_on = true,
3474                 .domains = 0,
3475                 .ops = &hsw_power_well_ops,
3476                 .id = SKL_DISP_PW_1,
3477                 {
3478                         .hsw.regs = &hsw_power_well_regs,
3479                         .hsw.idx = SKL_PW_CTL_IDX_PW_1,
3480                         .hsw.has_fuses = true,
3481                 },
3482         },
3483         {
3484                 .name = "AUX A",
3485                 .domains = CNL_DISPLAY_AUX_A_POWER_DOMAINS,
3486                 .ops = &hsw_power_well_ops,
3487                 .id = DISP_PW_ID_NONE,
3488                 {
3489                         .hsw.regs = &hsw_power_well_regs,
3490                         .hsw.idx = GLK_PW_CTL_IDX_AUX_A,
3491                 },
3492         },
3493         {
3494                 .name = "AUX B",
3495                 .domains = CNL_DISPLAY_AUX_B_POWER_DOMAINS,
3496                 .ops = &hsw_power_well_ops,
3497                 .id = DISP_PW_ID_NONE,
3498                 {
3499                         .hsw.regs = &hsw_power_well_regs,
3500                         .hsw.idx = GLK_PW_CTL_IDX_AUX_B,
3501                 },
3502         },
3503         {
3504                 .name = "AUX C",
3505                 .domains = CNL_DISPLAY_AUX_C_POWER_DOMAINS,
3506                 .ops = &hsw_power_well_ops,
3507                 .id = DISP_PW_ID_NONE,
3508                 {
3509                         .hsw.regs = &hsw_power_well_regs,
3510                         .hsw.idx = GLK_PW_CTL_IDX_AUX_C,
3511                 },
3512         },
3513         {
3514                 .name = "AUX D",
3515                 .domains = CNL_DISPLAY_AUX_D_POWER_DOMAINS,
3516                 .ops = &hsw_power_well_ops,
3517                 .id = DISP_PW_ID_NONE,
3518                 {
3519                         .hsw.regs = &hsw_power_well_regs,
3520                         .hsw.idx = CNL_PW_CTL_IDX_AUX_D,
3521                 },
3522         },
3523         {
3524                 .name = "DC off",
3525                 .domains = CNL_DISPLAY_DC_OFF_POWER_DOMAINS,
3526                 .ops = &gen9_dc_off_power_well_ops,
3527                 .id = SKL_DISP_DC_OFF,
3528         },
3529         {
3530                 .name = "power well 2",
3531                 .domains = CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3532                 .ops = &hsw_power_well_ops,
3533                 .id = SKL_DISP_PW_2,
3534                 {
3535                         .hsw.regs = &hsw_power_well_regs,
3536                         .hsw.idx = SKL_PW_CTL_IDX_PW_2,
3537                         .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3538                         .hsw.has_vga = true,
3539                         .hsw.has_fuses = true,
3540                 },
3541         },
3542         {
3543                 .name = "DDI A IO power well",
3544                 .domains = CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS,
3545                 .ops = &hsw_power_well_ops,
3546                 .id = DISP_PW_ID_NONE,
3547                 {
3548                         .hsw.regs = &hsw_power_well_regs,
3549                         .hsw.idx = GLK_PW_CTL_IDX_DDI_A,
3550                 },
3551         },
3552         {
3553                 .name = "DDI B IO power well",
3554                 .domains = CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS,
3555                 .ops = &hsw_power_well_ops,
3556                 .id = DISP_PW_ID_NONE,
3557                 {
3558                         .hsw.regs = &hsw_power_well_regs,
3559                         .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
3560                 },
3561         },
3562         {
3563                 .name = "DDI C IO power well",
3564                 .domains = CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS,
3565                 .ops = &hsw_power_well_ops,
3566                 .id = DISP_PW_ID_NONE,
3567                 {
3568                         .hsw.regs = &hsw_power_well_regs,
3569                         .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
3570                 },
3571         },
3572         {
3573                 .name = "DDI D IO power well",
3574                 .domains = CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS,
3575                 .ops = &hsw_power_well_ops,
3576                 .id = DISP_PW_ID_NONE,
3577                 {
3578                         .hsw.regs = &hsw_power_well_regs,
3579                         .hsw.idx = SKL_PW_CTL_IDX_DDI_D,
3580                 },
3581         },
3582         {
3583                 .name = "DDI F IO power well",
3584                 .domains = CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS,
3585                 .ops = &hsw_power_well_ops,
3586                 .id = DISP_PW_ID_NONE,
3587                 {
3588                         .hsw.regs = &hsw_power_well_regs,
3589                         .hsw.idx = CNL_PW_CTL_IDX_DDI_F,
3590                 },
3591         },
3592         {
3593                 .name = "AUX F",
3594                 .domains = CNL_DISPLAY_AUX_F_POWER_DOMAINS,
3595                 .ops = &hsw_power_well_ops,
3596                 .id = DISP_PW_ID_NONE,
3597                 {
3598                         .hsw.regs = &hsw_power_well_regs,
3599                         .hsw.idx = CNL_PW_CTL_IDX_AUX_F,
3600                 },
3601         },
3602 };
3603
3604 static const struct i915_power_well_ops icl_combo_phy_aux_power_well_ops = {
3605         .sync_hw = hsw_power_well_sync_hw,
3606         .enable = icl_combo_phy_aux_power_well_enable,
3607         .disable = icl_combo_phy_aux_power_well_disable,
3608         .is_enabled = hsw_power_well_enabled,
3609 };
3610
3611 static const struct i915_power_well_ops icl_tc_phy_aux_power_well_ops = {
3612         .sync_hw = hsw_power_well_sync_hw,
3613         .enable = icl_tc_phy_aux_power_well_enable,
3614         .disable = icl_tc_phy_aux_power_well_disable,
3615         .is_enabled = hsw_power_well_enabled,
3616 };
3617
3618 static const struct i915_power_well_regs icl_aux_power_well_regs = {
3619         .bios   = ICL_PWR_WELL_CTL_AUX1,
3620         .driver = ICL_PWR_WELL_CTL_AUX2,
3621         .debug  = ICL_PWR_WELL_CTL_AUX4,
3622 };
3623
3624 static const struct i915_power_well_regs icl_ddi_power_well_regs = {
3625         .bios   = ICL_PWR_WELL_CTL_DDI1,
3626         .driver = ICL_PWR_WELL_CTL_DDI2,
3627         .debug  = ICL_PWR_WELL_CTL_DDI4,
3628 };
3629
3630 static const struct i915_power_well_desc icl_power_wells[] = {
3631         {
3632                 .name = "always-on",
3633                 .always_on = true,
3634                 .domains = POWER_DOMAIN_MASK,
3635                 .ops = &i9xx_always_on_power_well_ops,
3636                 .id = DISP_PW_ID_NONE,
3637         },
3638         {
3639                 .name = "power well 1",
3640                 /* Handled by the DMC firmware */
3641                 .always_on = true,
3642                 .domains = 0,
3643                 .ops = &hsw_power_well_ops,
3644                 .id = SKL_DISP_PW_1,
3645                 {
3646                         .hsw.regs = &hsw_power_well_regs,
3647                         .hsw.idx = ICL_PW_CTL_IDX_PW_1,
3648                         .hsw.has_fuses = true,
3649                 },
3650         },
3651         {
3652                 .name = "DC off",
3653                 .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
3654                 .ops = &gen9_dc_off_power_well_ops,
3655                 .id = SKL_DISP_DC_OFF,
3656         },
3657         {
3658                 .name = "power well 2",
3659                 .domains = ICL_PW_2_POWER_DOMAINS,
3660                 .ops = &hsw_power_well_ops,
3661                 .id = SKL_DISP_PW_2,
3662                 {
3663                         .hsw.regs = &hsw_power_well_regs,
3664                         .hsw.idx = ICL_PW_CTL_IDX_PW_2,
3665                         .hsw.has_fuses = true,
3666                 },
3667         },
3668         {
3669                 .name = "power well 3",
3670                 .domains = ICL_PW_3_POWER_DOMAINS,
3671                 .ops = &hsw_power_well_ops,
3672                 .id = ICL_DISP_PW_3,
3673                 {
3674                         .hsw.regs = &hsw_power_well_regs,
3675                         .hsw.idx = ICL_PW_CTL_IDX_PW_3,
3676                         .hsw.irq_pipe_mask = BIT(PIPE_B),
3677                         .hsw.has_vga = true,
3678                         .hsw.has_fuses = true,
3679                 },
3680         },
3681         {
3682                 .name = "DDI A IO",
3683                 .domains = ICL_DDI_IO_A_POWER_DOMAINS,
3684                 .ops = &hsw_power_well_ops,
3685                 .id = DISP_PW_ID_NONE,
3686                 {
3687                         .hsw.regs = &icl_ddi_power_well_regs,
3688                         .hsw.idx = ICL_PW_CTL_IDX_DDI_A,
3689                 },
3690         },
3691         {
3692                 .name = "DDI B IO",
3693                 .domains = ICL_DDI_IO_B_POWER_DOMAINS,
3694                 .ops = &hsw_power_well_ops,
3695                 .id = DISP_PW_ID_NONE,
3696                 {
3697                         .hsw.regs = &icl_ddi_power_well_regs,
3698                         .hsw.idx = ICL_PW_CTL_IDX_DDI_B,
3699                 },
3700         },
3701         {
3702                 .name = "DDI C IO",
3703                 .domains = ICL_DDI_IO_C_POWER_DOMAINS,
3704                 .ops = &hsw_power_well_ops,
3705                 .id = DISP_PW_ID_NONE,
3706                 {
3707                         .hsw.regs = &icl_ddi_power_well_regs,
3708                         .hsw.idx = ICL_PW_CTL_IDX_DDI_C,
3709                 },
3710         },
3711         {
3712                 .name = "DDI D IO",
3713                 .domains = ICL_DDI_IO_D_POWER_DOMAINS,
3714                 .ops = &hsw_power_well_ops,
3715                 .id = DISP_PW_ID_NONE,
3716                 {
3717                         .hsw.regs = &icl_ddi_power_well_regs,
3718                         .hsw.idx = ICL_PW_CTL_IDX_DDI_D,
3719                 },
3720         },
3721         {
3722                 .name = "DDI E IO",
3723                 .domains = ICL_DDI_IO_E_POWER_DOMAINS,
3724                 .ops = &hsw_power_well_ops,
3725                 .id = DISP_PW_ID_NONE,
3726                 {
3727                         .hsw.regs = &icl_ddi_power_well_regs,
3728                         .hsw.idx = ICL_PW_CTL_IDX_DDI_E,
3729                 },
3730         },
3731         {
3732                 .name = "DDI F IO",
3733                 .domains = ICL_DDI_IO_F_POWER_DOMAINS,
3734                 .ops = &hsw_power_well_ops,
3735                 .id = DISP_PW_ID_NONE,
3736                 {
3737                         .hsw.regs = &icl_ddi_power_well_regs,
3738                         .hsw.idx = ICL_PW_CTL_IDX_DDI_F,
3739                 },
3740         },
3741         {
3742                 .name = "AUX A",
3743                 .domains = ICL_AUX_A_IO_POWER_DOMAINS,
3744                 .ops = &icl_combo_phy_aux_power_well_ops,
3745                 .id = DISP_PW_ID_NONE,
3746                 {
3747                         .hsw.regs = &icl_aux_power_well_regs,
3748                         .hsw.idx = ICL_PW_CTL_IDX_AUX_A,
3749                 },
3750         },
3751         {
3752                 .name = "AUX B",
3753                 .domains = ICL_AUX_B_IO_POWER_DOMAINS,
3754                 .ops = &icl_combo_phy_aux_power_well_ops,
3755                 .id = DISP_PW_ID_NONE,
3756                 {
3757                         .hsw.regs = &icl_aux_power_well_regs,
3758                         .hsw.idx = ICL_PW_CTL_IDX_AUX_B,
3759                 },
3760         },
3761         {
3762                 .name = "AUX C TC1",
3763                 .domains = ICL_AUX_C_TC1_IO_POWER_DOMAINS,
3764                 .ops = &icl_tc_phy_aux_power_well_ops,
3765                 .id = DISP_PW_ID_NONE,
3766                 {
3767                         .hsw.regs = &icl_aux_power_well_regs,
3768                         .hsw.idx = ICL_PW_CTL_IDX_AUX_C,
3769                         .hsw.is_tc_tbt = false,
3770                 },
3771         },
3772         {
3773                 .name = "AUX D TC2",
3774                 .domains = ICL_AUX_D_TC2_IO_POWER_DOMAINS,
3775                 .ops = &icl_tc_phy_aux_power_well_ops,
3776                 .id = DISP_PW_ID_NONE,
3777                 {
3778                         .hsw.regs = &icl_aux_power_well_regs,
3779                         .hsw.idx = ICL_PW_CTL_IDX_AUX_D,
3780                         .hsw.is_tc_tbt = false,
3781                 },
3782         },
3783         {
3784                 .name = "AUX E TC3",
3785                 .domains = ICL_AUX_E_TC3_IO_POWER_DOMAINS,
3786                 .ops = &icl_tc_phy_aux_power_well_ops,
3787                 .id = DISP_PW_ID_NONE,
3788                 {
3789                         .hsw.regs = &icl_aux_power_well_regs,
3790                         .hsw.idx = ICL_PW_CTL_IDX_AUX_E,
3791                         .hsw.is_tc_tbt = false,
3792                 },
3793         },
3794         {
3795                 .name = "AUX F TC4",
3796                 .domains = ICL_AUX_F_TC4_IO_POWER_DOMAINS,
3797                 .ops = &icl_tc_phy_aux_power_well_ops,
3798                 .id = DISP_PW_ID_NONE,
3799                 {
3800                         .hsw.regs = &icl_aux_power_well_regs,
3801                         .hsw.idx = ICL_PW_CTL_IDX_AUX_F,
3802                         .hsw.is_tc_tbt = false,
3803                 },
3804         },
3805         {
3806                 .name = "AUX C TBT1",
3807                 .domains = ICL_AUX_C_TBT1_IO_POWER_DOMAINS,
3808                 .ops = &icl_tc_phy_aux_power_well_ops,
3809                 .id = DISP_PW_ID_NONE,
3810                 {
3811                         .hsw.regs = &icl_aux_power_well_regs,
3812                         .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1,
3813                         .hsw.is_tc_tbt = true,
3814                 },
3815         },
3816         {
3817                 .name = "AUX D TBT2",
3818                 .domains = ICL_AUX_D_TBT2_IO_POWER_DOMAINS,
3819                 .ops = &icl_tc_phy_aux_power_well_ops,
3820                 .id = DISP_PW_ID_NONE,
3821                 {
3822                         .hsw.regs = &icl_aux_power_well_regs,
3823                         .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2,
3824                         .hsw.is_tc_tbt = true,
3825                 },
3826         },
3827         {
3828                 .name = "AUX E TBT3",
3829                 .domains = ICL_AUX_E_TBT3_IO_POWER_DOMAINS,
3830                 .ops = &icl_tc_phy_aux_power_well_ops,
3831                 .id = DISP_PW_ID_NONE,
3832                 {
3833                         .hsw.regs = &icl_aux_power_well_regs,
3834                         .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3,
3835                         .hsw.is_tc_tbt = true,
3836                 },
3837         },
3838         {
3839                 .name = "AUX F TBT4",
3840                 .domains = ICL_AUX_F_TBT4_IO_POWER_DOMAINS,
3841                 .ops = &icl_tc_phy_aux_power_well_ops,
3842                 .id = DISP_PW_ID_NONE,
3843                 {
3844                         .hsw.regs = &icl_aux_power_well_regs,
3845                         .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4,
3846                         .hsw.is_tc_tbt = true,
3847                 },
3848         },
3849         {
3850                 .name = "power well 4",
3851                 .domains = ICL_PW_4_POWER_DOMAINS,
3852                 .ops = &hsw_power_well_ops,
3853                 .id = DISP_PW_ID_NONE,
3854                 {
3855                         .hsw.regs = &hsw_power_well_regs,
3856                         .hsw.idx = ICL_PW_CTL_IDX_PW_4,
3857                         .hsw.has_fuses = true,
3858                         .hsw.irq_pipe_mask = BIT(PIPE_C),
3859                 },
3860         },
3861 };
3862
3863 static const struct i915_power_well_desc ehl_power_wells[] = {
3864         {
3865                 .name = "always-on",
3866                 .always_on = true,
3867                 .domains = POWER_DOMAIN_MASK,
3868                 .ops = &i9xx_always_on_power_well_ops,
3869                 .id = DISP_PW_ID_NONE,
3870         },
3871         {
3872                 .name = "power well 1",
3873                 /* Handled by the DMC firmware */
3874                 .always_on = true,
3875                 .domains = 0,
3876                 .ops = &hsw_power_well_ops,
3877                 .id = SKL_DISP_PW_1,
3878                 {
3879                         .hsw.regs = &hsw_power_well_regs,
3880                         .hsw.idx = ICL_PW_CTL_IDX_PW_1,
3881                         .hsw.has_fuses = true,
3882                 },
3883         },
3884         {
3885                 .name = "DC off",
3886                 .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
3887                 .ops = &gen9_dc_off_power_well_ops,
3888                 .id = SKL_DISP_DC_OFF,
3889         },
3890         {
3891                 .name = "power well 2",
3892                 .domains = ICL_PW_2_POWER_DOMAINS,
3893                 .ops = &hsw_power_well_ops,
3894                 .id = SKL_DISP_PW_2,
3895                 {
3896                         .hsw.regs = &hsw_power_well_regs,
3897                         .hsw.idx = ICL_PW_CTL_IDX_PW_2,
3898                         .hsw.has_fuses = true,
3899                 },
3900         },
3901         {
3902                 .name = "power well 3",
3903                 .domains = ICL_PW_3_POWER_DOMAINS,
3904                 .ops = &hsw_power_well_ops,
3905                 .id = ICL_DISP_PW_3,
3906                 {
3907                         .hsw.regs = &hsw_power_well_regs,
3908                         .hsw.idx = ICL_PW_CTL_IDX_PW_3,
3909                         .hsw.irq_pipe_mask = BIT(PIPE_B),
3910                         .hsw.has_vga = true,
3911                         .hsw.has_fuses = true,
3912                 },
3913         },
3914         {
3915                 .name = "DDI A IO",
3916                 .domains = ICL_DDI_IO_A_POWER_DOMAINS,
3917                 .ops = &hsw_power_well_ops,
3918                 .id = DISP_PW_ID_NONE,
3919                 {
3920                         .hsw.regs = &icl_ddi_power_well_regs,
3921                         .hsw.idx = ICL_PW_CTL_IDX_DDI_A,
3922                 },
3923         },
3924         {
3925                 .name = "DDI B IO",
3926                 .domains = ICL_DDI_IO_B_POWER_DOMAINS,
3927                 .ops = &hsw_power_well_ops,
3928                 .id = DISP_PW_ID_NONE,
3929                 {
3930                         .hsw.regs = &icl_ddi_power_well_regs,
3931                         .hsw.idx = ICL_PW_CTL_IDX_DDI_B,
3932                 },
3933         },
3934         {
3935                 .name = "DDI C IO",
3936                 .domains = ICL_DDI_IO_C_POWER_DOMAINS,
3937                 .ops = &hsw_power_well_ops,
3938                 .id = DISP_PW_ID_NONE,
3939                 {
3940                         .hsw.regs = &icl_ddi_power_well_regs,
3941                         .hsw.idx = ICL_PW_CTL_IDX_DDI_C,
3942                 },
3943         },
3944         {
3945                 .name = "DDI D IO",
3946                 .domains = ICL_DDI_IO_D_POWER_DOMAINS,
3947                 .ops = &hsw_power_well_ops,
3948                 .id = DISP_PW_ID_NONE,
3949                 {
3950                         .hsw.regs = &icl_ddi_power_well_regs,
3951                         .hsw.idx = ICL_PW_CTL_IDX_DDI_D,
3952                 },
3953         },
3954         {
3955                 .name = "AUX A",
3956                 .domains = ICL_AUX_A_IO_POWER_DOMAINS,
3957                 .ops = &hsw_power_well_ops,
3958                 .id = DISP_PW_ID_NONE,
3959                 {
3960                         .hsw.regs = &icl_aux_power_well_regs,
3961                         .hsw.idx = ICL_PW_CTL_IDX_AUX_A,
3962                 },
3963         },
3964         {
3965                 .name = "AUX B",
3966                 .domains = ICL_AUX_B_IO_POWER_DOMAINS,
3967                 .ops = &hsw_power_well_ops,
3968                 .id = DISP_PW_ID_NONE,
3969                 {
3970                         .hsw.regs = &icl_aux_power_well_regs,
3971                         .hsw.idx = ICL_PW_CTL_IDX_AUX_B,
3972                 },
3973         },
3974         {
3975                 .name = "AUX C",
3976                 .domains = ICL_AUX_C_TC1_IO_POWER_DOMAINS,
3977                 .ops = &hsw_power_well_ops,
3978                 .id = DISP_PW_ID_NONE,
3979                 {
3980                         .hsw.regs = &icl_aux_power_well_regs,
3981                         .hsw.idx = ICL_PW_CTL_IDX_AUX_C,
3982                 },
3983         },
3984         {
3985                 .name = "AUX D",
3986                 .domains = ICL_AUX_D_TC2_IO_POWER_DOMAINS,
3987                 .ops = &hsw_power_well_ops,
3988                 .id = DISP_PW_ID_NONE,
3989                 {
3990                         .hsw.regs = &icl_aux_power_well_regs,
3991                         .hsw.idx = ICL_PW_CTL_IDX_AUX_D,
3992                 },
3993         },
3994         {
3995                 .name = "power well 4",
3996                 .domains = ICL_PW_4_POWER_DOMAINS,
3997                 .ops = &hsw_power_well_ops,
3998                 .id = DISP_PW_ID_NONE,
3999                 {
4000                         .hsw.regs = &hsw_power_well_regs,
4001                         .hsw.idx = ICL_PW_CTL_IDX_PW_4,
4002                         .hsw.has_fuses = true,
4003                         .hsw.irq_pipe_mask = BIT(PIPE_C),
4004                 },
4005         },
4006 };
4007
4008 static void
4009 tgl_tc_cold_request(struct drm_i915_private *i915, bool block)
4010 {
4011         u8 tries = 0;
4012         int ret;
4013
4014         while (1) {
4015                 u32 low_val = 0, high_val;
4016
4017                 if (block)
4018                         high_val = TGL_PCODE_EXIT_TCCOLD_DATA_H_BLOCK_REQ;
4019                 else
4020                         high_val = TGL_PCODE_EXIT_TCCOLD_DATA_H_UNBLOCK_REQ;
4021
4022                 /*
4023                  * Spec states that we should timeout the request after 200us
4024                  * but the function below will timeout after 500us
4025                  */
4026                 ret = sandybridge_pcode_read(i915, TGL_PCODE_TCCOLD, &low_val,
4027                                              &high_val);
4028                 if (ret == 0) {
4029                         if (block &&
4030                             (low_val & TGL_PCODE_EXIT_TCCOLD_DATA_L_EXIT_FAILED))
4031                                 ret = -EIO;
4032                         else
4033                                 break;
4034                 }
4035
4036                 if (++tries == 3)
4037                         break;
4038
4039                 if (ret == -EAGAIN)
4040                         msleep(1);
4041         }
4042
4043         if (ret)
4044                 drm_err(&i915->drm, "TC cold %sblock failed\n",
4045                         block ? "" : "un");
4046         else
4047                 drm_dbg_kms(&i915->drm, "TC cold %sblock succeeded\n",
4048                             block ? "" : "un");
4049 }
4050
4051 static void
4052 tgl_tc_cold_off_power_well_enable(struct drm_i915_private *i915,
4053                                   struct i915_power_well *power_well)
4054 {
4055         tgl_tc_cold_request(i915, true);
4056 }
4057
4058 static void
4059 tgl_tc_cold_off_power_well_disable(struct drm_i915_private *i915,
4060                                    struct i915_power_well *power_well)
4061 {
4062         tgl_tc_cold_request(i915, false);
4063 }
4064
4065 static void
4066 tgl_tc_cold_off_power_well_sync_hw(struct drm_i915_private *i915,
4067                                    struct i915_power_well *power_well)
4068 {
4069         if (power_well->count > 0)
4070                 tgl_tc_cold_off_power_well_enable(i915, power_well);
4071         else
4072                 tgl_tc_cold_off_power_well_disable(i915, power_well);
4073 }
4074
4075 static bool
4076 tgl_tc_cold_off_power_well_is_enabled(struct drm_i915_private *dev_priv,
4077                                       struct i915_power_well *power_well)
4078 {
4079         /*
4080          * Not the correctly implementation but there is no way to just read it
4081          * from PCODE, so returning count to avoid state mismatch errors
4082          */
4083         return power_well->count;
4084 }
4085
4086 static const struct i915_power_well_ops tgl_tc_cold_off_ops = {
4087         .sync_hw = tgl_tc_cold_off_power_well_sync_hw,
4088         .enable = tgl_tc_cold_off_power_well_enable,
4089         .disable = tgl_tc_cold_off_power_well_disable,
4090         .is_enabled = tgl_tc_cold_off_power_well_is_enabled,
4091 };
4092
4093 static const struct i915_power_well_desc tgl_power_wells[] = {
4094         {
4095                 .name = "always-on",
4096                 .always_on = true,
4097                 .domains = POWER_DOMAIN_MASK,
4098                 .ops = &i9xx_always_on_power_well_ops,
4099                 .id = DISP_PW_ID_NONE,
4100         },
4101         {
4102                 .name = "power well 1",
4103                 /* Handled by the DMC firmware */
4104                 .always_on = true,
4105                 .domains = 0,
4106                 .ops = &hsw_power_well_ops,
4107                 .id = SKL_DISP_PW_1,
4108                 {
4109                         .hsw.regs = &hsw_power_well_regs,
4110                         .hsw.idx = ICL_PW_CTL_IDX_PW_1,
4111                         .hsw.has_fuses = true,
4112                 },
4113         },
4114         {
4115                 .name = "DC off",
4116                 .domains = TGL_DISPLAY_DC_OFF_POWER_DOMAINS,
4117                 .ops = &gen9_dc_off_power_well_ops,
4118                 .id = SKL_DISP_DC_OFF,
4119         },
4120         {
4121                 .name = "power well 2",
4122                 .domains = TGL_PW_2_POWER_DOMAINS,
4123                 .ops = &hsw_power_well_ops,
4124                 .id = SKL_DISP_PW_2,
4125                 {
4126                         .hsw.regs = &hsw_power_well_regs,
4127                         .hsw.idx = ICL_PW_CTL_IDX_PW_2,
4128                         .hsw.has_fuses = true,
4129                 },
4130         },
4131         {
4132                 .name = "power well 3",
4133                 .domains = TGL_PW_3_POWER_DOMAINS,
4134                 .ops = &hsw_power_well_ops,
4135                 .id = ICL_DISP_PW_3,
4136                 {
4137                         .hsw.regs = &hsw_power_well_regs,
4138                         .hsw.idx = ICL_PW_CTL_IDX_PW_3,
4139                         .hsw.irq_pipe_mask = BIT(PIPE_B),
4140                         .hsw.has_vga = true,
4141                         .hsw.has_fuses = true,
4142                 },
4143         },
4144         {
4145                 .name = "DDI A IO",
4146                 .domains = ICL_DDI_IO_A_POWER_DOMAINS,
4147                 .ops = &hsw_power_well_ops,
4148                 .id = DISP_PW_ID_NONE,
4149                 {
4150                         .hsw.regs = &icl_ddi_power_well_regs,
4151                         .hsw.idx = ICL_PW_CTL_IDX_DDI_A,
4152                 }
4153         },
4154         {
4155                 .name = "DDI B IO",
4156                 .domains = ICL_DDI_IO_B_POWER_DOMAINS,
4157                 .ops = &hsw_power_well_ops,
4158                 .id = DISP_PW_ID_NONE,
4159                 {
4160                         .hsw.regs = &icl_ddi_power_well_regs,
4161                         .hsw.idx = ICL_PW_CTL_IDX_DDI_B,
4162                 }
4163         },
4164         {
4165                 .name = "DDI C IO",
4166                 .domains = ICL_DDI_IO_C_POWER_DOMAINS,
4167                 .ops = &hsw_power_well_ops,
4168                 .id = DISP_PW_ID_NONE,
4169                 {
4170                         .hsw.regs = &icl_ddi_power_well_regs,
4171                         .hsw.idx = ICL_PW_CTL_IDX_DDI_C,
4172                 }
4173         },
4174         {
4175                 .name = "DDI D TC1 IO",
4176                 .domains = TGL_DDI_IO_D_TC1_POWER_DOMAINS,
4177                 .ops = &hsw_power_well_ops,
4178                 .id = DISP_PW_ID_NONE,
4179                 {
4180                         .hsw.regs = &icl_ddi_power_well_regs,
4181                         .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1,
4182                 },
4183         },
4184         {
4185                 .name = "DDI E TC2 IO",
4186                 .domains = TGL_DDI_IO_E_TC2_POWER_DOMAINS,
4187                 .ops = &hsw_power_well_ops,
4188                 .id = DISP_PW_ID_NONE,
4189                 {
4190                         .hsw.regs = &icl_ddi_power_well_regs,
4191                         .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2,
4192                 },
4193         },
4194         {
4195                 .name = "DDI F TC3 IO",
4196                 .domains = TGL_DDI_IO_F_TC3_POWER_DOMAINS,
4197                 .ops = &hsw_power_well_ops,
4198                 .id = DISP_PW_ID_NONE,
4199                 {
4200                         .hsw.regs = &icl_ddi_power_well_regs,
4201                         .hsw.idx = TGL_PW_CTL_IDX_DDI_TC3,
4202                 },
4203         },
4204         {
4205                 .name = "DDI G TC4 IO",
4206                 .domains = TGL_DDI_IO_G_TC4_POWER_DOMAINS,
4207                 .ops = &hsw_power_well_ops,
4208                 .id = DISP_PW_ID_NONE,
4209                 {
4210                         .hsw.regs = &icl_ddi_power_well_regs,
4211                         .hsw.idx = TGL_PW_CTL_IDX_DDI_TC4,
4212                 },
4213         },
4214         {
4215                 .name = "DDI H TC5 IO",
4216                 .domains = TGL_DDI_IO_H_TC5_POWER_DOMAINS,
4217                 .ops = &hsw_power_well_ops,
4218                 .id = DISP_PW_ID_NONE,
4219                 {
4220                         .hsw.regs = &icl_ddi_power_well_regs,
4221                         .hsw.idx = TGL_PW_CTL_IDX_DDI_TC5,
4222                 },
4223         },
4224         {
4225                 .name = "DDI I TC6 IO",
4226                 .domains = TGL_DDI_IO_I_TC6_POWER_DOMAINS,
4227                 .ops = &hsw_power_well_ops,
4228                 .id = DISP_PW_ID_NONE,
4229                 {
4230                         .hsw.regs = &icl_ddi_power_well_regs,
4231                         .hsw.idx = TGL_PW_CTL_IDX_DDI_TC6,
4232                 },
4233         },
4234         {
4235                 .name = "AUX A",
4236                 .domains = TGL_AUX_A_IO_POWER_DOMAINS,
4237                 .ops = &hsw_power_well_ops,
4238                 .id = DISP_PW_ID_NONE,
4239                 {
4240                         .hsw.regs = &icl_aux_power_well_regs,
4241                         .hsw.idx = ICL_PW_CTL_IDX_AUX_A,
4242                 },
4243         },
4244         {
4245                 .name = "AUX B",
4246                 .domains = TGL_AUX_B_IO_POWER_DOMAINS,
4247                 .ops = &hsw_power_well_ops,
4248                 .id = DISP_PW_ID_NONE,
4249                 {
4250                         .hsw.regs = &icl_aux_power_well_regs,
4251                         .hsw.idx = ICL_PW_CTL_IDX_AUX_B,
4252                 },
4253         },
4254         {
4255                 .name = "AUX C",
4256                 .domains = TGL_AUX_C_IO_POWER_DOMAINS,
4257                 .ops = &hsw_power_well_ops,
4258                 .id = DISP_PW_ID_NONE,
4259                 {
4260                         .hsw.regs = &icl_aux_power_well_regs,
4261                         .hsw.idx = ICL_PW_CTL_IDX_AUX_C,
4262                 },
4263         },
4264         {
4265                 .name = "AUX D TC1",
4266                 .domains = TGL_AUX_D_TC1_IO_POWER_DOMAINS,
4267                 .ops = &icl_tc_phy_aux_power_well_ops,
4268                 .id = DISP_PW_ID_NONE,
4269                 {
4270                         .hsw.regs = &icl_aux_power_well_regs,
4271                         .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1,
4272                         .hsw.is_tc_tbt = false,
4273                 },
4274         },
4275         {
4276                 .name = "AUX E TC2",
4277                 .domains = TGL_AUX_E_TC2_IO_POWER_DOMAINS,
4278                 .ops = &icl_tc_phy_aux_power_well_ops,
4279                 .id = DISP_PW_ID_NONE,
4280                 {
4281                         .hsw.regs = &icl_aux_power_well_regs,
4282                         .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2,
4283                         .hsw.is_tc_tbt = false,
4284                 },
4285         },
4286         {
4287                 .name = "AUX F TC3",
4288                 .domains = TGL_AUX_F_TC3_IO_POWER_DOMAINS,
4289                 .ops = &icl_tc_phy_aux_power_well_ops,
4290                 .id = DISP_PW_ID_NONE,
4291                 {
4292                         .hsw.regs = &icl_aux_power_well_regs,
4293                         .hsw.idx = TGL_PW_CTL_IDX_AUX_TC3,
4294                         .hsw.is_tc_tbt = false,
4295                 },
4296         },
4297         {
4298                 .name = "AUX G TC4",
4299                 .domains = TGL_AUX_G_TC4_IO_POWER_DOMAINS,
4300                 .ops = &icl_tc_phy_aux_power_well_ops,
4301                 .id = DISP_PW_ID_NONE,
4302                 {
4303                         .hsw.regs = &icl_aux_power_well_regs,
4304                         .hsw.idx = TGL_PW_CTL_IDX_AUX_TC4,
4305                         .hsw.is_tc_tbt = false,
4306                 },
4307         },
4308         {
4309                 .name = "AUX H TC5",
4310                 .domains = TGL_AUX_H_TC5_IO_POWER_DOMAINS,
4311                 .ops = &icl_tc_phy_aux_power_well_ops,
4312                 .id = DISP_PW_ID_NONE,
4313                 {
4314                         .hsw.regs = &icl_aux_power_well_regs,
4315                         .hsw.idx = TGL_PW_CTL_IDX_AUX_TC5,
4316                         .hsw.is_tc_tbt = false,
4317                 },
4318         },
4319         {
4320                 .name = "AUX I TC6",
4321                 .domains = TGL_AUX_I_TC6_IO_POWER_DOMAINS,
4322                 .ops = &icl_tc_phy_aux_power_well_ops,
4323                 .id = DISP_PW_ID_NONE,
4324                 {
4325                         .hsw.regs = &icl_aux_power_well_regs,
4326                         .hsw.idx = TGL_PW_CTL_IDX_AUX_TC6,
4327                         .hsw.is_tc_tbt = false,
4328                 },
4329         },
4330         {
4331                 .name = "AUX D TBT1",
4332                 .domains = TGL_AUX_D_TBT1_IO_POWER_DOMAINS,
4333                 .ops = &icl_tc_phy_aux_power_well_ops,
4334                 .id = DISP_PW_ID_NONE,
4335                 {
4336                         .hsw.regs = &icl_aux_power_well_regs,
4337                         .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1,
4338                         .hsw.is_tc_tbt = true,
4339                 },
4340         },
4341         {
4342                 .name = "AUX E TBT2",
4343                 .domains = TGL_AUX_E_TBT2_IO_POWER_DOMAINS,
4344                 .ops = &icl_tc_phy_aux_power_well_ops,
4345                 .id = DISP_PW_ID_NONE,
4346                 {
4347                         .hsw.regs = &icl_aux_power_well_regs,
4348                         .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2,
4349                         .hsw.is_tc_tbt = true,
4350                 },
4351         },
4352         {
4353                 .name = "AUX F TBT3",
4354                 .domains = TGL_AUX_F_TBT3_IO_POWER_DOMAINS,
4355                 .ops = &icl_tc_phy_aux_power_well_ops,
4356                 .id = DISP_PW_ID_NONE,
4357                 {
4358                         .hsw.regs = &icl_aux_power_well_regs,
4359                         .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3,
4360                         .hsw.is_tc_tbt = true,
4361                 },
4362         },
4363         {
4364                 .name = "AUX G TBT4",
4365                 .domains = TGL_AUX_G_TBT4_IO_POWER_DOMAINS,
4366                 .ops = &icl_tc_phy_aux_power_well_ops,
4367                 .id = DISP_PW_ID_NONE,
4368                 {
4369                         .hsw.regs = &icl_aux_power_well_regs,
4370                         .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4,
4371                         .hsw.is_tc_tbt = true,
4372                 },
4373         },
4374         {
4375                 .name = "AUX H TBT5",
4376                 .domains = TGL_AUX_H_TBT5_IO_POWER_DOMAINS,
4377                 .ops = &icl_tc_phy_aux_power_well_ops,
4378                 .id = DISP_PW_ID_NONE,
4379                 {
4380                         .hsw.regs = &icl_aux_power_well_regs,
4381                         .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT5,
4382                         .hsw.is_tc_tbt = true,
4383                 },
4384         },
4385         {
4386                 .name = "AUX I TBT6",
4387                 .domains = TGL_AUX_I_TBT6_IO_POWER_DOMAINS,
4388                 .ops = &icl_tc_phy_aux_power_well_ops,
4389                 .id = DISP_PW_ID_NONE,
4390                 {
4391                         .hsw.regs = &icl_aux_power_well_regs,
4392                         .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT6,
4393                         .hsw.is_tc_tbt = true,
4394                 },
4395         },
4396         {
4397                 .name = "power well 4",
4398                 .domains = TGL_PW_4_POWER_DOMAINS,
4399                 .ops = &hsw_power_well_ops,
4400                 .id = DISP_PW_ID_NONE,
4401                 {
4402                         .hsw.regs = &hsw_power_well_regs,
4403                         .hsw.idx = ICL_PW_CTL_IDX_PW_4,
4404                         .hsw.has_fuses = true,
4405                         .hsw.irq_pipe_mask = BIT(PIPE_C),
4406                 }
4407         },
4408         {
4409                 .name = "power well 5",
4410                 .domains = TGL_PW_5_POWER_DOMAINS,
4411                 .ops = &hsw_power_well_ops,
4412                 .id = DISP_PW_ID_NONE,
4413                 {
4414                         .hsw.regs = &hsw_power_well_regs,
4415                         .hsw.idx = TGL_PW_CTL_IDX_PW_5,
4416                         .hsw.has_fuses = true,
4417                         .hsw.irq_pipe_mask = BIT(PIPE_D),
4418                 },
4419         },
4420         {
4421                 .name = "TC cold off",
4422                 .domains = TGL_TC_COLD_OFF_POWER_DOMAINS,
4423                 .ops = &tgl_tc_cold_off_ops,
4424                 .id = DISP_PW_ID_NONE,
4425         },
4426 };
4427
4428 static int
4429 sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
4430                                    int disable_power_well)
4431 {
4432         if (disable_power_well >= 0)
4433                 return !!disable_power_well;
4434
4435         return 1;
4436 }
4437
4438 static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
4439                                int enable_dc)
4440 {
4441         u32 mask;
4442         int requested_dc;
4443         int max_dc;
4444
4445         if (INTEL_GEN(dev_priv) >= 12) {
4446                 max_dc = 4;
4447                 /*
4448                  * DC9 has a separate HW flow from the rest of the DC states,
4449                  * not depending on the DMC firmware. It's needed by system
4450                  * suspend/resume, so allow it unconditionally.
4451                  */
4452                 mask = DC_STATE_EN_DC9;
4453         } else if (IS_GEN(dev_priv, 11)) {
4454                 max_dc = 2;
4455                 mask = DC_STATE_EN_DC9;
4456         } else if (IS_GEN(dev_priv, 10) || IS_GEN9_BC(dev_priv)) {
4457                 max_dc = 2;
4458                 mask = 0;
4459         } else if (IS_GEN9_LP(dev_priv)) {
4460                 max_dc = 1;
4461                 mask = DC_STATE_EN_DC9;
4462         } else {
4463                 max_dc = 0;
4464                 mask = 0;
4465         }
4466
4467         if (!i915_modparams.disable_power_well)
4468                 max_dc = 0;
4469
4470         if (enable_dc >= 0 && enable_dc <= max_dc) {
4471                 requested_dc = enable_dc;
4472         } else if (enable_dc == -1) {
4473                 requested_dc = max_dc;
4474         } else if (enable_dc > max_dc && enable_dc <= 4) {
4475                 drm_dbg_kms(&dev_priv->drm,
4476                             "Adjusting requested max DC state (%d->%d)\n",
4477                             enable_dc, max_dc);
4478                 requested_dc = max_dc;
4479         } else {
4480                 drm_err(&dev_priv->drm,
4481                         "Unexpected value for enable_dc (%d)\n", enable_dc);
4482                 requested_dc = max_dc;
4483         }
4484
4485         switch (requested_dc) {
4486         case 4:
4487                 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6;
4488                 break;
4489         case 3:
4490                 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC5;
4491                 break;
4492         case 2:
4493                 mask |= DC_STATE_EN_UPTO_DC6;
4494                 break;
4495         case 1:
4496                 mask |= DC_STATE_EN_UPTO_DC5;
4497                 break;
4498         }
4499
4500         drm_dbg_kms(&dev_priv->drm, "Allowed DC state mask %02x\n", mask);
4501
4502         return mask;
4503 }
4504
4505 static int
4506 __set_power_wells(struct i915_power_domains *power_domains,
4507                   const struct i915_power_well_desc *power_well_descs,
4508                   int power_well_count)
4509 {
4510         u64 power_well_ids = 0;
4511         int i;
4512
4513         power_domains->power_well_count = power_well_count;
4514         power_domains->power_wells =
4515                                 kcalloc(power_well_count,
4516                                         sizeof(*power_domains->power_wells),
4517                                         GFP_KERNEL);
4518         if (!power_domains->power_wells)
4519                 return -ENOMEM;
4520
4521         for (i = 0; i < power_well_count; i++) {
4522                 enum i915_power_well_id id = power_well_descs[i].id;
4523
4524                 power_domains->power_wells[i].desc = &power_well_descs[i];
4525
4526                 if (id == DISP_PW_ID_NONE)
4527                         continue;
4528
4529                 WARN_ON(id >= sizeof(power_well_ids) * 8);
4530                 WARN_ON(power_well_ids & BIT_ULL(id));
4531                 power_well_ids |= BIT_ULL(id);
4532         }
4533
4534         return 0;
4535 }
4536
4537 #define set_power_wells(power_domains, __power_well_descs) \
4538         __set_power_wells(power_domains, __power_well_descs, \
4539                           ARRAY_SIZE(__power_well_descs))
4540
4541 /**
4542  * intel_power_domains_init - initializes the power domain structures
4543  * @dev_priv: i915 device instance
4544  *
4545  * Initializes the power domain structures for @dev_priv depending upon the
4546  * supported platform.
4547  */
4548 int intel_power_domains_init(struct drm_i915_private *dev_priv)
4549 {
4550         struct i915_power_domains *power_domains = &dev_priv->power_domains;
4551         int err;
4552
4553         i915_modparams.disable_power_well =
4554                 sanitize_disable_power_well_option(dev_priv,
4555                                                    i915_modparams.disable_power_well);
4556         dev_priv->csr.allowed_dc_mask =
4557                 get_allowed_dc_mask(dev_priv, i915_modparams.enable_dc);
4558
4559         dev_priv->csr.target_dc_state =
4560                 sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
4561
4562         BUILD_BUG_ON(POWER_DOMAIN_NUM > 64);
4563
4564         mutex_init(&power_domains->lock);
4565
4566         INIT_DELAYED_WORK(&power_domains->async_put_work,
4567                           intel_display_power_put_async_work);
4568
4569         /*
4570          * The enabling order will be from lower to higher indexed wells,
4571          * the disabling order is reversed.
4572          */
4573         if (IS_GEN(dev_priv, 12)) {
4574                 err = set_power_wells(power_domains, tgl_power_wells);
4575         } else if (IS_ELKHARTLAKE(dev_priv)) {
4576                 err = set_power_wells(power_domains, ehl_power_wells);
4577         } else if (IS_GEN(dev_priv, 11)) {
4578                 err = set_power_wells(power_domains, icl_power_wells);
4579         } else if (IS_CANNONLAKE(dev_priv)) {
4580                 err = set_power_wells(power_domains, cnl_power_wells);
4581
4582                 /*
4583                  * DDI and Aux IO are getting enabled for all ports
4584                  * regardless the presence or use. So, in order to avoid
4585                  * timeouts, lets remove them from the list
4586                  * for the SKUs without port F.
4587                  */
4588                 if (!IS_CNL_WITH_PORT_F(dev_priv))
4589                         power_domains->power_well_count -= 2;
4590         } else if (IS_GEMINILAKE(dev_priv)) {
4591                 err = set_power_wells(power_domains, glk_power_wells);
4592         } else if (IS_BROXTON(dev_priv)) {
4593                 err = set_power_wells(power_domains, bxt_power_wells);
4594         } else if (IS_GEN9_BC(dev_priv)) {
4595                 err = set_power_wells(power_domains, skl_power_wells);
4596         } else if (IS_CHERRYVIEW(dev_priv)) {
4597                 err = set_power_wells(power_domains, chv_power_wells);
4598         } else if (IS_BROADWELL(dev_priv)) {
4599                 err = set_power_wells(power_domains, bdw_power_wells);
4600         } else if (IS_HASWELL(dev_priv)) {
4601                 err = set_power_wells(power_domains, hsw_power_wells);
4602         } else if (IS_VALLEYVIEW(dev_priv)) {
4603                 err = set_power_wells(power_domains, vlv_power_wells);
4604         } else if (IS_I830(dev_priv)) {
4605                 err = set_power_wells(power_domains, i830_power_wells);
4606         } else {
4607                 err = set_power_wells(power_domains, i9xx_always_on_power_well);
4608         }
4609
4610         return err;
4611 }
4612
4613 /**
4614  * intel_power_domains_cleanup - clean up power domains resources
4615  * @dev_priv: i915 device instance
4616  *
4617  * Release any resources acquired by intel_power_domains_init()
4618  */
4619 void intel_power_domains_cleanup(struct drm_i915_private *dev_priv)
4620 {
4621         kfree(dev_priv->power_domains.power_wells);
4622 }
4623
4624 static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
4625 {
4626         struct i915_power_domains *power_domains = &dev_priv->power_domains;
4627         struct i915_power_well *power_well;
4628
4629         mutex_lock(&power_domains->lock);
4630         for_each_power_well(dev_priv, power_well) {
4631                 power_well->desc->ops->sync_hw(dev_priv, power_well);
4632                 power_well->hw_enabled =
4633                         power_well->desc->ops->is_enabled(dev_priv, power_well);
4634         }
4635         mutex_unlock(&power_domains->lock);
4636 }
4637
4638 static inline
4639 bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv,
4640                           i915_reg_t reg, bool enable)
4641 {
4642         u32 val, status;
4643
4644         val = intel_de_read(dev_priv, reg);
4645         val = enable ? (val | DBUF_POWER_REQUEST) : (val & ~DBUF_POWER_REQUEST);
4646         intel_de_write(dev_priv, reg, val);
4647         intel_de_posting_read(dev_priv, reg);
4648         udelay(10);
4649
4650         status = intel_de_read(dev_priv, reg) & DBUF_POWER_STATE;
4651         if ((enable && !status) || (!enable && status)) {
4652                 drm_err(&dev_priv->drm, "DBus power %s timeout!\n",
4653                         enable ? "enable" : "disable");
4654                 return false;
4655         }
4656         return true;
4657 }
4658
4659 static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
4660 {
4661         icl_dbuf_slices_update(dev_priv, BIT(DBUF_S1));
4662 }
4663
4664 static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
4665 {
4666         icl_dbuf_slices_update(dev_priv, 0);
4667 }
4668
4669 void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
4670                             u8 req_slices)
4671 {
4672         int i;
4673         int max_slices = INTEL_INFO(dev_priv)->num_supported_dbuf_slices;
4674         struct i915_power_domains *power_domains = &dev_priv->power_domains;
4675
4676         drm_WARN(&dev_priv->drm, hweight8(req_slices) > max_slices,
4677                  "Invalid number of dbuf slices requested\n");
4678
4679         drm_dbg_kms(&dev_priv->drm, "Updating dbuf slices to 0x%x\n",
4680                     req_slices);
4681
4682         /*
4683          * Might be running this in parallel to gen9_dc_off_power_well_enable
4684          * being called from intel_dp_detect for instance,
4685          * which causes assertion triggered by race condition,
4686          * as gen9_assert_dbuf_enabled might preempt this when registers
4687          * were already updated, while dev_priv was not.
4688          */
4689         mutex_lock(&power_domains->lock);
4690
4691         for (i = 0; i < max_slices; i++) {
4692                 intel_dbuf_slice_set(dev_priv,
4693                                      DBUF_CTL_S(i),
4694                                      (req_slices & BIT(i)) != 0);
4695         }
4696
4697         dev_priv->enabled_dbuf_slices_mask = req_slices;
4698
4699         mutex_unlock(&power_domains->lock);
4700 }
4701
4702 static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
4703 {
4704         skl_ddb_get_hw_state(dev_priv);
4705         /*
4706          * Just power up at least 1 slice, we will
4707          * figure out later which slices we have and what we need.
4708          */
4709         icl_dbuf_slices_update(dev_priv, dev_priv->enabled_dbuf_slices_mask |
4710                                BIT(DBUF_S1));
4711 }
4712
4713 static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
4714 {
4715         icl_dbuf_slices_update(dev_priv, 0);
4716 }
4717
4718 static void icl_mbus_init(struct drm_i915_private *dev_priv)
4719 {
4720         u32 mask, val;
4721
4722         mask = MBUS_ABOX_BT_CREDIT_POOL1_MASK |
4723                 MBUS_ABOX_BT_CREDIT_POOL2_MASK |
4724                 MBUS_ABOX_B_CREDIT_MASK |
4725                 MBUS_ABOX_BW_CREDIT_MASK;
4726         val = MBUS_ABOX_BT_CREDIT_POOL1(16) |
4727                 MBUS_ABOX_BT_CREDIT_POOL2(16) |
4728                 MBUS_ABOX_B_CREDIT(1) |
4729                 MBUS_ABOX_BW_CREDIT(1);
4730
4731         intel_de_rmw(dev_priv, MBUS_ABOX_CTL, mask, val);
4732         if (INTEL_GEN(dev_priv) >= 12) {
4733                 intel_de_rmw(dev_priv, MBUS_ABOX1_CTL, mask, val);
4734                 intel_de_rmw(dev_priv, MBUS_ABOX2_CTL, mask, val);
4735         }
4736 }
4737
4738 static void hsw_assert_cdclk(struct drm_i915_private *dev_priv)
4739 {
4740         u32 val = intel_de_read(dev_priv, LCPLL_CTL);
4741
4742         /*
4743          * The LCPLL register should be turned on by the BIOS. For now
4744          * let's just check its state and print errors in case
4745          * something is wrong.  Don't even try to turn it on.
4746          */
4747
4748         if (val & LCPLL_CD_SOURCE_FCLK)
4749                 drm_err(&dev_priv->drm, "CDCLK source is not LCPLL\n");
4750
4751         if (val & LCPLL_PLL_DISABLE)
4752                 drm_err(&dev_priv->drm, "LCPLL is disabled\n");
4753
4754         if ((val & LCPLL_REF_MASK) != LCPLL_REF_NON_SSC)
4755                 drm_err(&dev_priv->drm, "LCPLL not using non-SSC reference\n");
4756 }
4757
4758 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
4759 {
4760         struct drm_device *dev = &dev_priv->drm;
4761         struct intel_crtc *crtc;
4762
4763         for_each_intel_crtc(dev, crtc)
4764                 I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
4765                                 pipe_name(crtc->pipe));
4766
4767         I915_STATE_WARN(intel_de_read(dev_priv, HSW_PWR_WELL_CTL2),
4768                         "Display power well on\n");
4769         I915_STATE_WARN(intel_de_read(dev_priv, SPLL_CTL) & SPLL_PLL_ENABLE,
4770                         "SPLL enabled\n");
4771         I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(0)) & WRPLL_PLL_ENABLE,
4772                         "WRPLL1 enabled\n");
4773         I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(1)) & WRPLL_PLL_ENABLE,
4774                         "WRPLL2 enabled\n");
4775         I915_STATE_WARN(intel_de_read(dev_priv, PP_STATUS(0)) & PP_ON,
4776                         "Panel power on\n");
4777         I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
4778                         "CPU PWM1 enabled\n");
4779         if (IS_HASWELL(dev_priv))
4780                 I915_STATE_WARN(intel_de_read(dev_priv, HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
4781                                 "CPU PWM2 enabled\n");
4782         I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
4783                         "PCH PWM1 enabled\n");
4784         I915_STATE_WARN(intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
4785                         "Utility pin enabled\n");
4786         I915_STATE_WARN(intel_de_read(dev_priv, PCH_GTC_CTL) & PCH_GTC_ENABLE,
4787                         "PCH GTC enabled\n");
4788
4789         /*
4790          * In theory we can still leave IRQs enabled, as long as only the HPD
4791          * interrupts remain enabled. We used to check for that, but since it's
4792          * gen-specific and since we only disable LCPLL after we fully disable
4793          * the interrupts, the check below should be enough.
4794          */
4795         I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
4796 }
4797
4798 static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv)
4799 {
4800         if (IS_HASWELL(dev_priv))
4801                 return intel_de_read(dev_priv, D_COMP_HSW);
4802         else
4803                 return intel_de_read(dev_priv, D_COMP_BDW);
4804 }
4805
4806 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val)
4807 {
4808         if (IS_HASWELL(dev_priv)) {
4809                 if (sandybridge_pcode_write(dev_priv,
4810                                             GEN6_PCODE_WRITE_D_COMP, val))
4811                         drm_dbg_kms(&dev_priv->drm,
4812                                     "Failed to write to D_COMP\n");
4813         } else {
4814                 intel_de_write(dev_priv, D_COMP_BDW, val);
4815                 intel_de_posting_read(dev_priv, D_COMP_BDW);
4816         }
4817 }
4818
4819 /*
4820  * This function implements pieces of two sequences from BSpec:
4821  * - Sequence for display software to disable LCPLL
4822  * - Sequence for display software to allow package C8+
4823  * The steps implemented here are just the steps that actually touch the LCPLL
4824  * register. Callers should take care of disabling all the display engine
4825  * functions, doing the mode unset, fixing interrupts, etc.
4826  */
4827 static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
4828                               bool switch_to_fclk, bool allow_power_down)
4829 {
4830         u32 val;
4831
4832         assert_can_disable_lcpll(dev_priv);
4833
4834         val = intel_de_read(dev_priv, LCPLL_CTL);
4835
4836         if (switch_to_fclk) {
4837                 val |= LCPLL_CD_SOURCE_FCLK;
4838                 intel_de_write(dev_priv, LCPLL_CTL, val);
4839
4840                 if (wait_for_us(intel_de_read(dev_priv, LCPLL_CTL) &
4841                                 LCPLL_CD_SOURCE_FCLK_DONE, 1))
4842                         drm_err(&dev_priv->drm, "Switching to FCLK failed\n");
4843
4844                 val = intel_de_read(dev_priv, LCPLL_CTL);
4845         }
4846
4847         val |= LCPLL_PLL_DISABLE;
4848         intel_de_write(dev_priv, LCPLL_CTL, val);
4849         intel_de_posting_read(dev_priv, LCPLL_CTL);
4850
4851         if (intel_de_wait_for_clear(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 1))
4852                 drm_err(&dev_priv->drm, "LCPLL still locked\n");
4853
4854         val = hsw_read_dcomp(dev_priv);
4855         val |= D_COMP_COMP_DISABLE;
4856         hsw_write_dcomp(dev_priv, val);
4857         ndelay(100);
4858
4859         if (wait_for((hsw_read_dcomp(dev_priv) &
4860                       D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
4861                 drm_err(&dev_priv->drm, "D_COMP RCOMP still in progress\n");
4862
4863         if (allow_power_down) {
4864                 val = intel_de_read(dev_priv, LCPLL_CTL);
4865                 val |= LCPLL_POWER_DOWN_ALLOW;
4866                 intel_de_write(dev_priv, LCPLL_CTL, val);
4867                 intel_de_posting_read(dev_priv, LCPLL_CTL);
4868         }
4869 }
4870
4871 /*
4872  * Fully restores LCPLL, disallowing power down and switching back to LCPLL
4873  * source.
4874  */
4875 static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
4876 {
4877         u32 val;
4878
4879         val = intel_de_read(dev_priv, LCPLL_CTL);
4880
4881         if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
4882                     LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
4883                 return;
4884
4885         /*
4886          * Make sure we're not on PC8 state before disabling PC8, otherwise
4887          * we'll hang the machine. To prevent PC8 state, just enable force_wake.
4888          */
4889         intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
4890
4891         if (val & LCPLL_POWER_DOWN_ALLOW) {
4892                 val &= ~LCPLL_POWER_DOWN_ALLOW;
4893                 intel_de_write(dev_priv, LCPLL_CTL, val);
4894                 intel_de_posting_read(dev_priv, LCPLL_CTL);
4895         }
4896
4897         val = hsw_read_dcomp(dev_priv);
4898         val |= D_COMP_COMP_FORCE;
4899         val &= ~D_COMP_COMP_DISABLE;
4900         hsw_write_dcomp(dev_priv, val);
4901
4902         val = intel_de_read(dev_priv, LCPLL_CTL);
4903         val &= ~LCPLL_PLL_DISABLE;
4904         intel_de_write(dev_priv, LCPLL_CTL, val);
4905
4906         if (intel_de_wait_for_set(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 5))
4907                 drm_err(&dev_priv->drm, "LCPLL not locked yet\n");
4908
4909         if (val & LCPLL_CD_SOURCE_FCLK) {
4910                 val = intel_de_read(dev_priv, LCPLL_CTL);
4911                 val &= ~LCPLL_CD_SOURCE_FCLK;
4912                 intel_de_write(dev_priv, LCPLL_CTL, val);
4913
4914                 if (wait_for_us((intel_de_read(dev_priv, LCPLL_CTL) &
4915                                  LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
4916                         drm_err(&dev_priv->drm,
4917                                 "Switching back to LCPLL failed\n");
4918         }
4919
4920         intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
4921
4922         intel_update_cdclk(dev_priv);
4923         intel_dump_cdclk_config(&dev_priv->cdclk.hw, "Current CDCLK");
4924 }
4925
4926 /*
4927  * Package states C8 and deeper are really deep PC states that can only be
4928  * reached when all the devices on the system allow it, so even if the graphics
4929  * device allows PC8+, it doesn't mean the system will actually get to these
4930  * states. Our driver only allows PC8+ when going into runtime PM.
4931  *
4932  * The requirements for PC8+ are that all the outputs are disabled, the power
4933  * well is disabled and most interrupts are disabled, and these are also
4934  * requirements for runtime PM. When these conditions are met, we manually do
4935  * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
4936  * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
4937  * hang the machine.
4938  *
4939  * When we really reach PC8 or deeper states (not just when we allow it) we lose
4940  * the state of some registers, so when we come back from PC8+ we need to
4941  * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
4942  * need to take care of the registers kept by RC6. Notice that this happens even
4943  * if we don't put the device in PCI D3 state (which is what currently happens
4944  * because of the runtime PM support).
4945  *
4946  * For more, read "Display Sequences for Package C8" on the hardware
4947  * documentation.
4948  */
4949 static void hsw_enable_pc8(struct drm_i915_private *dev_priv)
4950 {
4951         u32 val;
4952
4953         drm_dbg_kms(&dev_priv->drm, "Enabling package C8+\n");
4954
4955         if (HAS_PCH_LPT_LP(dev_priv)) {
4956                 val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D);
4957                 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
4958                 intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val);
4959         }
4960
4961         lpt_disable_clkout_dp(dev_priv);
4962         hsw_disable_lcpll(dev_priv, true, true);
4963 }
4964
4965 static void hsw_disable_pc8(struct drm_i915_private *dev_priv)
4966 {
4967         u32 val;
4968
4969         drm_dbg_kms(&dev_priv->drm, "Disabling package C8+\n");
4970
4971         hsw_restore_lcpll(dev_priv);
4972         intel_init_pch_refclk(dev_priv);
4973
4974         if (HAS_PCH_LPT_LP(dev_priv)) {
4975                 val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D);
4976                 val |= PCH_LP_PARTITION_LEVEL_DISABLE;
4977                 intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val);
4978         }
4979 }
4980
4981 static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
4982                                       bool enable)
4983 {
4984         i915_reg_t reg;
4985         u32 reset_bits, val;
4986
4987         if (IS_IVYBRIDGE(dev_priv)) {
4988                 reg = GEN7_MSG_CTL;
4989                 reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK;
4990         } else {
4991                 reg = HSW_NDE_RSTWRN_OPT;
4992                 reset_bits = RESET_PCH_HANDSHAKE_ENABLE;
4993         }
4994
4995         val = intel_de_read(dev_priv, reg);
4996
4997         if (enable)
4998                 val |= reset_bits;
4999         else
5000                 val &= ~reset_bits;
5001
5002         intel_de_write(dev_priv, reg, val);
5003 }
5004
5005 static void skl_display_core_init(struct drm_i915_private *dev_priv,
5006                                   bool resume)
5007 {
5008         struct i915_power_domains *power_domains = &dev_priv->power_domains;
5009         struct i915_power_well *well;
5010
5011         gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
5012
5013         /* enable PCH reset handshake */
5014         intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
5015
5016         /* enable PG1 and Misc I/O */
5017         mutex_lock(&power_domains->lock);
5018
5019         well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5020         intel_power_well_enable(dev_priv, well);
5021
5022         well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
5023         intel_power_well_enable(dev_priv, well);
5024
5025         mutex_unlock(&power_domains->lock);
5026
5027         intel_cdclk_init_hw(dev_priv);
5028
5029         gen9_dbuf_enable(dev_priv);
5030
5031         if (resume && dev_priv->csr.dmc_payload)
5032                 intel_csr_load_program(dev_priv);
5033 }
5034
5035 static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
5036 {
5037         struct i915_power_domains *power_domains = &dev_priv->power_domains;
5038         struct i915_power_well *well;
5039
5040         gen9_disable_dc_states(dev_priv);
5041
5042         gen9_dbuf_disable(dev_priv);
5043
5044         intel_cdclk_uninit_hw(dev_priv);
5045
5046         /* The spec doesn't call for removing the reset handshake flag */
5047         /* disable PG1 and Misc I/O */
5048
5049         mutex_lock(&power_domains->lock);
5050
5051         /*
5052          * BSpec says to keep the MISC IO power well enabled here, only
5053          * remove our request for power well 1.
5054          * Note that even though the driver's request is removed power well 1
5055          * may stay enabled after this due to DMC's own request on it.
5056          */
5057         well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5058         intel_power_well_disable(dev_priv, well);
5059
5060         mutex_unlock(&power_domains->lock);
5061
5062         usleep_range(10, 30);           /* 10 us delay per Bspec */
5063 }
5064
5065 static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume)
5066 {
5067         struct i915_power_domains *power_domains = &dev_priv->power_domains;
5068         struct i915_power_well *well;
5069
5070         gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
5071
5072         /*
5073          * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
5074          * or else the reset will hang because there is no PCH to respond.
5075          * Move the handshake programming to initialization sequence.
5076          * Previously was left up to BIOS.
5077          */
5078         intel_pch_reset_handshake(dev_priv, false);
5079
5080         /* Enable PG1 */
5081         mutex_lock(&power_domains->lock);
5082
5083         well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5084         intel_power_well_enable(dev_priv, well);
5085
5086         mutex_unlock(&power_domains->lock);
5087
5088         intel_cdclk_init_hw(dev_priv);
5089
5090         gen9_dbuf_enable(dev_priv);
5091
5092         if (resume && dev_priv->csr.dmc_payload)
5093                 intel_csr_load_program(dev_priv);
5094 }
5095
5096 static void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
5097 {
5098         struct i915_power_domains *power_domains = &dev_priv->power_domains;
5099         struct i915_power_well *well;
5100
5101         gen9_disable_dc_states(dev_priv);
5102
5103         gen9_dbuf_disable(dev_priv);
5104
5105         intel_cdclk_uninit_hw(dev_priv);
5106
5107         /* The spec doesn't call for removing the reset handshake flag */
5108
5109         /*
5110          * Disable PW1 (PG1).
5111          * Note that even though the driver's request is removed power well 1
5112          * may stay enabled after this due to DMC's own request on it.
5113          */
5114         mutex_lock(&power_domains->lock);
5115
5116         well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5117         intel_power_well_disable(dev_priv, well);
5118
5119         mutex_unlock(&power_domains->lock);
5120
5121         usleep_range(10, 30);           /* 10 us delay per Bspec */
5122 }
5123
5124 static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume)
5125 {
5126         struct i915_power_domains *power_domains = &dev_priv->power_domains;
5127         struct i915_power_well *well;
5128
5129         gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
5130
5131         /* 1. Enable PCH Reset Handshake */
5132         intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
5133
5134         /* 2-3. */
5135         intel_combo_phy_init(dev_priv);
5136
5137         /*
5138          * 4. Enable Power Well 1 (PG1).
5139          *    The AUX IO power wells will be enabled on demand.
5140          */
5141         mutex_lock(&power_domains->lock);
5142         well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5143         intel_power_well_enable(dev_priv, well);
5144         mutex_unlock(&power_domains->lock);
5145
5146         /* 5. Enable CD clock */
5147         intel_cdclk_init_hw(dev_priv);
5148
5149         /* 6. Enable DBUF */
5150         gen9_dbuf_enable(dev_priv);
5151
5152         if (resume && dev_priv->csr.dmc_payload)
5153                 intel_csr_load_program(dev_priv);
5154 }
5155
5156 static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
5157 {
5158         struct i915_power_domains *power_domains = &dev_priv->power_domains;
5159         struct i915_power_well *well;
5160
5161         gen9_disable_dc_states(dev_priv);
5162
5163         /* 1. Disable all display engine functions -> aready done */
5164
5165         /* 2. Disable DBUF */
5166         gen9_dbuf_disable(dev_priv);
5167
5168         /* 3. Disable CD clock */
5169         intel_cdclk_uninit_hw(dev_priv);
5170
5171         /*
5172          * 4. Disable Power Well 1 (PG1).
5173          *    The AUX IO power wells are toggled on demand, so they are already
5174          *    disabled at this point.
5175          */
5176         mutex_lock(&power_domains->lock);
5177         well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5178         intel_power_well_disable(dev_priv, well);
5179         mutex_unlock(&power_domains->lock);
5180
5181         usleep_range(10, 30);           /* 10 us delay per Bspec */
5182
5183         /* 5. */
5184         intel_combo_phy_uninit(dev_priv);
5185 }
5186
5187 struct buddy_page_mask {
5188         u32 page_mask;
5189         u8 type;
5190         u8 num_channels;
5191 };
5192
5193 static const struct buddy_page_mask tgl_buddy_page_masks[] = {
5194         { .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0xE },
5195         { .num_channels = 1, .type = INTEL_DRAM_DDR4,   .page_mask = 0xF },
5196         { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1C },
5197         { .num_channels = 2, .type = INTEL_DRAM_DDR4,   .page_mask = 0x1F },
5198         {}
5199 };
5200
5201 static const struct buddy_page_mask wa_1409767108_buddy_page_masks[] = {
5202         { .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1 },
5203         { .num_channels = 1, .type = INTEL_DRAM_DDR4,   .page_mask = 0x1 },
5204         { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x3 },
5205         { .num_channels = 2, .type = INTEL_DRAM_DDR4,   .page_mask = 0x3 },
5206         {}
5207 };
5208
5209 static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv)
5210 {
5211         enum intel_dram_type type = dev_priv->dram_info.type;
5212         u8 num_channels = dev_priv->dram_info.num_channels;
5213         const struct buddy_page_mask *table;
5214         int i;
5215
5216         if (IS_TGL_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_B0))
5217                 /* Wa_1409767108: tgl */
5218                 table = wa_1409767108_buddy_page_masks;
5219         else
5220                 table = tgl_buddy_page_masks;
5221
5222         for (i = 0; table[i].page_mask != 0; i++)
5223                 if (table[i].num_channels == num_channels &&
5224                     table[i].type == type)
5225                         break;
5226
5227         if (table[i].page_mask == 0) {
5228                 drm_dbg(&dev_priv->drm,
5229                         "Unknown memory configuration; disabling address buddy logic.\n");
5230                 intel_de_write(dev_priv, BW_BUDDY1_CTL, BW_BUDDY_DISABLE);
5231                 intel_de_write(dev_priv, BW_BUDDY2_CTL, BW_BUDDY_DISABLE);
5232         } else {
5233                 intel_de_write(dev_priv, BW_BUDDY1_PAGE_MASK,
5234                                table[i].page_mask);
5235                 intel_de_write(dev_priv, BW_BUDDY2_PAGE_MASK,
5236                                table[i].page_mask);
5237
5238                 /* Wa_22010178259:tgl */
5239                 intel_de_rmw(dev_priv, BW_BUDDY1_CTL,
5240                              BW_BUDDY_TLB_REQ_TIMER_MASK,
5241                              REG_FIELD_PREP(BW_BUDDY_TLB_REQ_TIMER_MASK, 0x8));
5242                 intel_de_rmw(dev_priv, BW_BUDDY2_CTL,
5243                              BW_BUDDY_TLB_REQ_TIMER_MASK,
5244                              REG_FIELD_PREP(BW_BUDDY_TLB_REQ_TIMER_MASK, 0x8));
5245         }
5246 }
5247
5248 static void icl_display_core_init(struct drm_i915_private *dev_priv,
5249                                   bool resume)
5250 {
5251         struct i915_power_domains *power_domains = &dev_priv->power_domains;
5252         struct i915_power_well *well;
5253
5254         gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
5255
5256         /* 1. Enable PCH reset handshake. */
5257         intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
5258
5259         /* 2. Initialize all combo phys */
5260         intel_combo_phy_init(dev_priv);
5261
5262         /*
5263          * 3. Enable Power Well 1 (PG1).
5264          *    The AUX IO power wells will be enabled on demand.
5265          */
5266         mutex_lock(&power_domains->lock);
5267         well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5268         intel_power_well_enable(dev_priv, well);
5269         mutex_unlock(&power_domains->lock);
5270
5271         /* 4. Enable CDCLK. */
5272         intel_cdclk_init_hw(dev_priv);
5273
5274         /* 5. Enable DBUF. */
5275         icl_dbuf_enable(dev_priv);
5276
5277         /* 6. Setup MBUS. */
5278         icl_mbus_init(dev_priv);
5279
5280         /* 7. Program arbiter BW_BUDDY registers */
5281         if (INTEL_GEN(dev_priv) >= 12)
5282                 tgl_bw_buddy_init(dev_priv);
5283
5284         if (resume && dev_priv->csr.dmc_payload)
5285                 intel_csr_load_program(dev_priv);
5286 }
5287
5288 static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
5289 {
5290         struct i915_power_domains *power_domains = &dev_priv->power_domains;
5291         struct i915_power_well *well;
5292
5293         gen9_disable_dc_states(dev_priv);
5294
5295         /* 1. Disable all display engine functions -> aready done */
5296
5297         /* 2. Disable DBUF */
5298         icl_dbuf_disable(dev_priv);
5299
5300         /* 3. Disable CD clock */
5301         intel_cdclk_uninit_hw(dev_priv);
5302
5303         /*
5304          * 4. Disable Power Well 1 (PG1).
5305          *    The AUX IO power wells are toggled on demand, so they are already
5306          *    disabled at this point.
5307          */
5308         mutex_lock(&power_domains->lock);
5309         well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5310         intel_power_well_disable(dev_priv, well);
5311         mutex_unlock(&power_domains->lock);
5312
5313         /* 5. */
5314         intel_combo_phy_uninit(dev_priv);
5315 }
5316
5317 static void chv_phy_control_init(struct drm_i915_private *dev_priv)
5318 {
5319         struct i915_power_well *cmn_bc =
5320                 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
5321         struct i915_power_well *cmn_d =
5322                 lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
5323
5324         /*
5325          * DISPLAY_PHY_CONTROL can get corrupted if read. As a
5326          * workaround never ever read DISPLAY_PHY_CONTROL, and
5327          * instead maintain a shadow copy ourselves. Use the actual
5328          * power well state and lane status to reconstruct the
5329          * expected initial value.
5330          */
5331         dev_priv->chv_phy_control =
5332                 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
5333                 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
5334                 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
5335                 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
5336                 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
5337
5338         /*
5339          * If all lanes are disabled we leave the override disabled
5340          * with all power down bits cleared to match the state we
5341          * would use after disabling the port. Otherwise enable the
5342          * override and set the lane powerdown bits accding to the
5343          * current lane status.
5344          */
5345         if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
5346                 u32 status = intel_de_read(dev_priv, DPLL(PIPE_A));
5347                 unsigned int mask;
5348
5349                 mask = status & DPLL_PORTB_READY_MASK;
5350                 if (mask == 0xf)
5351                         mask = 0x0;
5352                 else
5353                         dev_priv->chv_phy_control |=
5354                                 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
5355
5356                 dev_priv->chv_phy_control |=
5357                         PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
5358
5359                 mask = (status & DPLL_PORTC_READY_MASK) >> 4;
5360                 if (mask == 0xf)
5361                         mask = 0x0;
5362                 else
5363                         dev_priv->chv_phy_control |=
5364                                 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
5365
5366                 dev_priv->chv_phy_control |=
5367                         PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
5368
5369                 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
5370
5371                 dev_priv->chv_phy_assert[DPIO_PHY0] = false;
5372         } else {
5373                 dev_priv->chv_phy_assert[DPIO_PHY0] = true;
5374         }
5375
5376         if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
5377                 u32 status = intel_de_read(dev_priv, DPIO_PHY_STATUS);
5378                 unsigned int mask;
5379
5380                 mask = status & DPLL_PORTD_READY_MASK;
5381
5382                 if (mask == 0xf)
5383                         mask = 0x0;
5384                 else
5385                         dev_priv->chv_phy_control |=
5386                                 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
5387
5388                 dev_priv->chv_phy_control |=
5389                         PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
5390
5391                 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
5392
5393                 dev_priv->chv_phy_assert[DPIO_PHY1] = false;
5394         } else {
5395                 dev_priv->chv_phy_assert[DPIO_PHY1] = true;
5396         }
5397
5398         drm_dbg_kms(&dev_priv->drm, "Initial PHY_CONTROL=0x%08x\n",
5399                     dev_priv->chv_phy_control);
5400
5401         /* Defer application of initial phy_control to enabling the powerwell */
5402 }
5403
5404 static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
5405 {
5406         struct i915_power_well *cmn =
5407                 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
5408         struct i915_power_well *disp2d =
5409                 lookup_power_well(dev_priv, VLV_DISP_PW_DISP2D);
5410
5411         /* If the display might be already active skip this */
5412         if (cmn->desc->ops->is_enabled(dev_priv, cmn) &&
5413             disp2d->desc->ops->is_enabled(dev_priv, disp2d) &&
5414             intel_de_read(dev_priv, DPIO_CTL) & DPIO_CMNRST)
5415                 return;
5416
5417         drm_dbg_kms(&dev_priv->drm, "toggling display PHY side reset\n");
5418
5419         /* cmnlane needs DPLL registers */
5420         disp2d->desc->ops->enable(dev_priv, disp2d);
5421
5422         /*
5423          * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
5424          * Need to assert and de-assert PHY SB reset by gating the
5425          * common lane power, then un-gating it.
5426          * Simply ungating isn't enough to reset the PHY enough to get
5427          * ports and lanes running.
5428          */
5429         cmn->desc->ops->disable(dev_priv, cmn);
5430 }
5431
5432 static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0)
5433 {
5434         bool ret;
5435
5436         vlv_punit_get(dev_priv);
5437         ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE;
5438         vlv_punit_put(dev_priv);
5439
5440         return ret;
5441 }
5442
5443 static void assert_ved_power_gated(struct drm_i915_private *dev_priv)
5444 {
5445         drm_WARN(&dev_priv->drm,
5446                  !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0),
5447                  "VED not power gated\n");
5448 }
5449
5450 static void assert_isp_power_gated(struct drm_i915_private *dev_priv)
5451 {
5452         static const struct pci_device_id isp_ids[] = {
5453                 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38)},
5454                 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x22b8)},
5455                 {}
5456         };
5457
5458         drm_WARN(&dev_priv->drm, !pci_dev_present(isp_ids) &&
5459                  !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0),
5460                  "ISP not power gated\n");
5461 }
5462
5463 static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
5464
5465 /**
5466  * intel_power_domains_init_hw - initialize hardware power domain state
5467  * @i915: i915 device instance
5468  * @resume: Called from resume code paths or not
5469  *
5470  * This function initializes the hardware power domain state and enables all
5471  * power wells belonging to the INIT power domain. Power wells in other
5472  * domains (and not in the INIT domain) are referenced or disabled by
5473  * intel_modeset_readout_hw_state(). After that the reference count of each
5474  * power well must match its HW enabled state, see
5475  * intel_power_domains_verify_state().
5476  *
5477  * It will return with power domains disabled (to be enabled later by
5478  * intel_power_domains_enable()) and must be paired with
5479  * intel_power_domains_driver_remove().
5480  */
5481 void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
5482 {
5483         struct i915_power_domains *power_domains = &i915->power_domains;
5484
5485         power_domains->initializing = true;
5486
5487         if (INTEL_GEN(i915) >= 11) {
5488                 icl_display_core_init(i915, resume);
5489         } else if (IS_CANNONLAKE(i915)) {
5490                 cnl_display_core_init(i915, resume);
5491         } else if (IS_GEN9_BC(i915)) {
5492                 skl_display_core_init(i915, resume);
5493         } else if (IS_GEN9_LP(i915)) {
5494                 bxt_display_core_init(i915, resume);
5495         } else if (IS_CHERRYVIEW(i915)) {
5496                 mutex_lock(&power_domains->lock);
5497                 chv_phy_control_init(i915);
5498                 mutex_unlock(&power_domains->lock);
5499                 assert_isp_power_gated(i915);
5500         } else if (IS_VALLEYVIEW(i915)) {
5501                 mutex_lock(&power_domains->lock);
5502                 vlv_cmnlane_wa(i915);
5503                 mutex_unlock(&power_domains->lock);
5504                 assert_ved_power_gated(i915);
5505                 assert_isp_power_gated(i915);
5506         } else if (IS_BROADWELL(i915) || IS_HASWELL(i915)) {
5507                 hsw_assert_cdclk(i915);
5508                 intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
5509         } else if (IS_IVYBRIDGE(i915)) {
5510                 intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
5511         }
5512
5513         /*
5514          * Keep all power wells enabled for any dependent HW access during
5515          * initialization and to make sure we keep BIOS enabled display HW
5516          * resources powered until display HW readout is complete. We drop
5517          * this reference in intel_power_domains_enable().
5518          */
5519         power_domains->wakeref =
5520                 intel_display_power_get(i915, POWER_DOMAIN_INIT);
5521
5522         /* Disable power support if the user asked so. */
5523         if (!i915_modparams.disable_power_well)
5524                 intel_display_power_get(i915, POWER_DOMAIN_INIT);
5525         intel_power_domains_sync_hw(i915);
5526
5527         power_domains->initializing = false;
5528 }
5529
5530 /**
5531  * intel_power_domains_driver_remove - deinitialize hw power domain state
5532  * @i915: i915 device instance
5533  *
5534  * De-initializes the display power domain HW state. It also ensures that the
5535  * device stays powered up so that the driver can be reloaded.
5536  *
5537  * It must be called with power domains already disabled (after a call to
5538  * intel_power_domains_disable()) and must be paired with
5539  * intel_power_domains_init_hw().
5540  */
5541 void intel_power_domains_driver_remove(struct drm_i915_private *i915)
5542 {
5543         intel_wakeref_t wakeref __maybe_unused =
5544                 fetch_and_zero(&i915->power_domains.wakeref);
5545
5546         /* Remove the refcount we took to keep power well support disabled. */
5547         if (!i915_modparams.disable_power_well)
5548                 intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
5549
5550         intel_display_power_flush_work_sync(i915);
5551
5552         intel_power_domains_verify_state(i915);
5553
5554         /* Keep the power well enabled, but cancel its rpm wakeref. */
5555         intel_runtime_pm_put(&i915->runtime_pm, wakeref);
5556 }
5557
5558 /**
5559  * intel_power_domains_enable - enable toggling of display power wells
5560  * @i915: i915 device instance
5561  *
5562  * Enable the ondemand enabling/disabling of the display power wells. Note that
5563  * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled
5564  * only at specific points of the display modeset sequence, thus they are not
5565  * affected by the intel_power_domains_enable()/disable() calls. The purpose
5566  * of these function is to keep the rest of power wells enabled until the end
5567  * of display HW readout (which will acquire the power references reflecting
5568  * the current HW state).
5569  */
5570 void intel_power_domains_enable(struct drm_i915_private *i915)
5571 {
5572         intel_wakeref_t wakeref __maybe_unused =
5573                 fetch_and_zero(&i915->power_domains.wakeref);
5574
5575         intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
5576         intel_power_domains_verify_state(i915);
5577 }
5578
5579 /**
5580  * intel_power_domains_disable - disable toggling of display power wells
5581  * @i915: i915 device instance
5582  *
5583  * Disable the ondemand enabling/disabling of the display power wells. See
5584  * intel_power_domains_enable() for which power wells this call controls.
5585  */
5586 void intel_power_domains_disable(struct drm_i915_private *i915)
5587 {
5588         struct i915_power_domains *power_domains = &i915->power_domains;
5589
5590         drm_WARN_ON(&i915->drm, power_domains->wakeref);
5591         power_domains->wakeref =
5592                 intel_display_power_get(i915, POWER_DOMAIN_INIT);
5593
5594         intel_power_domains_verify_state(i915);
5595 }
5596
5597 /**
5598  * intel_power_domains_suspend - suspend power domain state
5599  * @i915: i915 device instance
5600  * @suspend_mode: specifies the target suspend state (idle, mem, hibernation)
5601  *
5602  * This function prepares the hardware power domain state before entering
5603  * system suspend.
5604  *
5605  * It must be called with power domains already disabled (after a call to
5606  * intel_power_domains_disable()) and paired with intel_power_domains_resume().
5607  */
5608 void intel_power_domains_suspend(struct drm_i915_private *i915,
5609                                  enum i915_drm_suspend_mode suspend_mode)
5610 {
5611         struct i915_power_domains *power_domains = &i915->power_domains;
5612         intel_wakeref_t wakeref __maybe_unused =
5613                 fetch_and_zero(&power_domains->wakeref);
5614
5615         intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
5616
5617         /*
5618          * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9
5619          * support don't manually deinit the power domains. This also means the
5620          * CSR/DMC firmware will stay active, it will power down any HW
5621          * resources as required and also enable deeper system power states
5622          * that would be blocked if the firmware was inactive.
5623          */
5624         if (!(i915->csr.allowed_dc_mask & DC_STATE_EN_DC9) &&
5625             suspend_mode == I915_DRM_SUSPEND_IDLE &&
5626             i915->csr.dmc_payload) {
5627                 intel_display_power_flush_work(i915);
5628                 intel_power_domains_verify_state(i915);
5629                 return;
5630         }
5631
5632         /*
5633          * Even if power well support was disabled we still want to disable
5634          * power wells if power domains must be deinitialized for suspend.
5635          */
5636         if (!i915_modparams.disable_power_well)
5637                 intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
5638
5639         intel_display_power_flush_work(i915);
5640         intel_power_domains_verify_state(i915);
5641
5642         if (INTEL_GEN(i915) >= 11)
5643                 icl_display_core_uninit(i915);
5644         else if (IS_CANNONLAKE(i915))
5645                 cnl_display_core_uninit(i915);
5646         else if (IS_GEN9_BC(i915))
5647                 skl_display_core_uninit(i915);
5648         else if (IS_GEN9_LP(i915))
5649                 bxt_display_core_uninit(i915);
5650
5651         power_domains->display_core_suspended = true;
5652 }
5653
5654 /**
5655  * intel_power_domains_resume - resume power domain state
5656  * @i915: i915 device instance
5657  *
5658  * This function resume the hardware power domain state during system resume.
5659  *
5660  * It will return with power domain support disabled (to be enabled later by
5661  * intel_power_domains_enable()) and must be paired with
5662  * intel_power_domains_suspend().
5663  */
5664 void intel_power_domains_resume(struct drm_i915_private *i915)
5665 {
5666         struct i915_power_domains *power_domains = &i915->power_domains;
5667
5668         if (power_domains->display_core_suspended) {
5669                 intel_power_domains_init_hw(i915, true);
5670                 power_domains->display_core_suspended = false;
5671         } else {
5672                 drm_WARN_ON(&i915->drm, power_domains->wakeref);
5673                 power_domains->wakeref =
5674                         intel_display_power_get(i915, POWER_DOMAIN_INIT);
5675         }
5676
5677         intel_power_domains_verify_state(i915);
5678 }
5679
5680 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
5681
5682 static void intel_power_domains_dump_info(struct drm_i915_private *i915)
5683 {
5684         struct i915_power_domains *power_domains = &i915->power_domains;
5685         struct i915_power_well *power_well;
5686
5687         for_each_power_well(i915, power_well) {
5688                 enum intel_display_power_domain domain;
5689
5690                 drm_dbg(&i915->drm, "%-25s %d\n",
5691                         power_well->desc->name, power_well->count);
5692
5693                 for_each_power_domain(domain, power_well->desc->domains)
5694                         drm_dbg(&i915->drm, "  %-23s %d\n",
5695                                 intel_display_power_domain_str(domain),
5696                                 power_domains->domain_use_count[domain]);
5697         }
5698 }
5699
5700 /**
5701  * intel_power_domains_verify_state - verify the HW/SW state for all power wells
5702  * @i915: i915 device instance
5703  *
5704  * Verify if the reference count of each power well matches its HW enabled
5705  * state and the total refcount of the domains it belongs to. This must be
5706  * called after modeset HW state sanitization, which is responsible for
5707  * acquiring reference counts for any power wells in use and disabling the
5708  * ones left on by BIOS but not required by any active output.
5709  */
5710 static void intel_power_domains_verify_state(struct drm_i915_private *i915)
5711 {
5712         struct i915_power_domains *power_domains = &i915->power_domains;
5713         struct i915_power_well *power_well;
5714         bool dump_domain_info;
5715
5716         mutex_lock(&power_domains->lock);
5717
5718         verify_async_put_domains_state(power_domains);
5719
5720         dump_domain_info = false;
5721         for_each_power_well(i915, power_well) {
5722                 enum intel_display_power_domain domain;
5723                 int domains_count;
5724                 bool enabled;
5725
5726                 enabled = power_well->desc->ops->is_enabled(i915, power_well);
5727                 if ((power_well->count || power_well->desc->always_on) !=
5728                     enabled)
5729                         drm_err(&i915->drm,
5730                                 "power well %s state mismatch (refcount %d/enabled %d)",
5731                                 power_well->desc->name,
5732                                 power_well->count, enabled);
5733
5734                 domains_count = 0;
5735                 for_each_power_domain(domain, power_well->desc->domains)
5736                         domains_count += power_domains->domain_use_count[domain];
5737
5738                 if (power_well->count != domains_count) {
5739                         drm_err(&i915->drm,
5740                                 "power well %s refcount/domain refcount mismatch "
5741                                 "(refcount %d/domains refcount %d)\n",
5742                                 power_well->desc->name, power_well->count,
5743                                 domains_count);
5744                         dump_domain_info = true;
5745                 }
5746         }
5747
5748         if (dump_domain_info) {
5749                 static bool dumped;
5750
5751                 if (!dumped) {
5752                         intel_power_domains_dump_info(i915);
5753                         dumped = true;
5754                 }
5755         }
5756
5757         mutex_unlock(&power_domains->lock);
5758 }
5759
5760 #else
5761
5762 static void intel_power_domains_verify_state(struct drm_i915_private *i915)
5763 {
5764 }
5765
5766 #endif
5767
5768 void intel_display_power_suspend_late(struct drm_i915_private *i915)
5769 {
5770         if (INTEL_GEN(i915) >= 11 || IS_GEN9_LP(i915))
5771                 bxt_enable_dc9(i915);
5772         else if (IS_HASWELL(i915) || IS_BROADWELL(i915))
5773                 hsw_enable_pc8(i915);
5774 }
5775
5776 void intel_display_power_resume_early(struct drm_i915_private *i915)
5777 {
5778         if (INTEL_GEN(i915) >= 11 || IS_GEN9_LP(i915)) {
5779                 gen9_sanitize_dc_state(i915);
5780                 bxt_disable_dc9(i915);
5781         } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
5782                 hsw_disable_pc8(i915);
5783         }
5784 }
5785
5786 void intel_display_power_suspend(struct drm_i915_private *i915)
5787 {
5788         if (INTEL_GEN(i915) >= 11) {
5789                 icl_display_core_uninit(i915);
5790                 bxt_enable_dc9(i915);
5791         } else if (IS_GEN9_LP(i915)) {
5792                 bxt_display_core_uninit(i915);
5793                 bxt_enable_dc9(i915);
5794         } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
5795                 hsw_enable_pc8(i915);
5796         }
5797 }
5798
5799 void intel_display_power_resume(struct drm_i915_private *i915)
5800 {
5801         if (INTEL_GEN(i915) >= 11) {
5802                 bxt_disable_dc9(i915);
5803                 icl_display_core_init(i915, true);
5804                 if (i915->csr.dmc_payload) {
5805                         if (i915->csr.allowed_dc_mask &
5806                             DC_STATE_EN_UPTO_DC6)
5807                                 skl_enable_dc6(i915);
5808                         else if (i915->csr.allowed_dc_mask &
5809                                  DC_STATE_EN_UPTO_DC5)
5810                                 gen9_enable_dc5(i915);
5811                 }
5812         } else if (IS_GEN9_LP(i915)) {
5813                 bxt_disable_dc9(i915);
5814                 bxt_display_core_init(i915, true);
5815                 if (i915->csr.dmc_payload &&
5816                     (i915->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
5817                         gen9_enable_dc5(i915);
5818         } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
5819                 hsw_disable_pc8(i915);
5820         }
5821 }