1 /* SPDX-License-Identifier: MIT */
3 * Copyright © 2019 Intel Corporation
6 #include "display/intel_crt.h"
7 #include "display/intel_dp.h"
11 #include "intel_cdclk.h"
12 #include "intel_combo_phy.h"
13 #include "intel_csr.h"
14 #include "intel_display_power.h"
15 #include "intel_display_types.h"
16 #include "intel_dpio_phy.h"
17 #include "intel_hotplug.h"
19 #include "intel_sideband.h"
21 #include "intel_vga.h"
23 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
24 enum i915_power_well_id power_well_id);
27 intel_display_power_domain_str(enum intel_display_power_domain domain)
30 case POWER_DOMAIN_DISPLAY_CORE:
31 return "DISPLAY_CORE";
32 case POWER_DOMAIN_PIPE_A:
34 case POWER_DOMAIN_PIPE_B:
36 case POWER_DOMAIN_PIPE_C:
38 case POWER_DOMAIN_PIPE_D:
40 case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
41 return "PIPE_A_PANEL_FITTER";
42 case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
43 return "PIPE_B_PANEL_FITTER";
44 case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
45 return "PIPE_C_PANEL_FITTER";
46 case POWER_DOMAIN_PIPE_D_PANEL_FITTER:
47 return "PIPE_D_PANEL_FITTER";
48 case POWER_DOMAIN_TRANSCODER_A:
49 return "TRANSCODER_A";
50 case POWER_DOMAIN_TRANSCODER_B:
51 return "TRANSCODER_B";
52 case POWER_DOMAIN_TRANSCODER_C:
53 return "TRANSCODER_C";
54 case POWER_DOMAIN_TRANSCODER_D:
55 return "TRANSCODER_D";
56 case POWER_DOMAIN_TRANSCODER_EDP:
57 return "TRANSCODER_EDP";
58 case POWER_DOMAIN_TRANSCODER_VDSC_PW2:
59 return "TRANSCODER_VDSC_PW2";
60 case POWER_DOMAIN_TRANSCODER_DSI_A:
61 return "TRANSCODER_DSI_A";
62 case POWER_DOMAIN_TRANSCODER_DSI_C:
63 return "TRANSCODER_DSI_C";
64 case POWER_DOMAIN_PORT_DDI_A_LANES:
65 return "PORT_DDI_A_LANES";
66 case POWER_DOMAIN_PORT_DDI_B_LANES:
67 return "PORT_DDI_B_LANES";
68 case POWER_DOMAIN_PORT_DDI_C_LANES:
69 return "PORT_DDI_C_LANES";
70 case POWER_DOMAIN_PORT_DDI_D_LANES:
71 return "PORT_DDI_D_LANES";
72 case POWER_DOMAIN_PORT_DDI_E_LANES:
73 return "PORT_DDI_E_LANES";
74 case POWER_DOMAIN_PORT_DDI_F_LANES:
75 return "PORT_DDI_F_LANES";
76 case POWER_DOMAIN_PORT_DDI_G_LANES:
77 return "PORT_DDI_G_LANES";
78 case POWER_DOMAIN_PORT_DDI_H_LANES:
79 return "PORT_DDI_H_LANES";
80 case POWER_DOMAIN_PORT_DDI_I_LANES:
81 return "PORT_DDI_I_LANES";
82 case POWER_DOMAIN_PORT_DDI_A_IO:
83 return "PORT_DDI_A_IO";
84 case POWER_DOMAIN_PORT_DDI_B_IO:
85 return "PORT_DDI_B_IO";
86 case POWER_DOMAIN_PORT_DDI_C_IO:
87 return "PORT_DDI_C_IO";
88 case POWER_DOMAIN_PORT_DDI_D_IO:
89 return "PORT_DDI_D_IO";
90 case POWER_DOMAIN_PORT_DDI_E_IO:
91 return "PORT_DDI_E_IO";
92 case POWER_DOMAIN_PORT_DDI_F_IO:
93 return "PORT_DDI_F_IO";
94 case POWER_DOMAIN_PORT_DDI_G_IO:
95 return "PORT_DDI_G_IO";
96 case POWER_DOMAIN_PORT_DDI_H_IO:
97 return "PORT_DDI_H_IO";
98 case POWER_DOMAIN_PORT_DDI_I_IO:
99 return "PORT_DDI_I_IO";
100 case POWER_DOMAIN_PORT_DSI:
102 case POWER_DOMAIN_PORT_CRT:
104 case POWER_DOMAIN_PORT_OTHER:
106 case POWER_DOMAIN_VGA:
108 case POWER_DOMAIN_AUDIO:
110 case POWER_DOMAIN_AUX_A:
112 case POWER_DOMAIN_AUX_B:
114 case POWER_DOMAIN_AUX_C:
116 case POWER_DOMAIN_AUX_D:
118 case POWER_DOMAIN_AUX_E:
120 case POWER_DOMAIN_AUX_F:
122 case POWER_DOMAIN_AUX_G:
124 case POWER_DOMAIN_AUX_H:
126 case POWER_DOMAIN_AUX_I:
128 case POWER_DOMAIN_AUX_IO_A:
130 case POWER_DOMAIN_AUX_C_TBT:
132 case POWER_DOMAIN_AUX_D_TBT:
134 case POWER_DOMAIN_AUX_E_TBT:
136 case POWER_DOMAIN_AUX_F_TBT:
138 case POWER_DOMAIN_AUX_G_TBT:
140 case POWER_DOMAIN_AUX_H_TBT:
142 case POWER_DOMAIN_AUX_I_TBT:
144 case POWER_DOMAIN_GMBUS:
146 case POWER_DOMAIN_INIT:
148 case POWER_DOMAIN_MODESET:
150 case POWER_DOMAIN_GT_IRQ:
152 case POWER_DOMAIN_DPLL_DC_OFF:
153 return "DPLL_DC_OFF";
155 MISSING_CASE(domain);
160 static void intel_power_well_enable(struct drm_i915_private *dev_priv,
161 struct i915_power_well *power_well)
163 drm_dbg_kms(&dev_priv->drm, "enabling %s\n", power_well->desc->name);
164 power_well->desc->ops->enable(dev_priv, power_well);
165 power_well->hw_enabled = true;
168 static void intel_power_well_disable(struct drm_i915_private *dev_priv,
169 struct i915_power_well *power_well)
171 drm_dbg_kms(&dev_priv->drm, "disabling %s\n", power_well->desc->name);
172 power_well->hw_enabled = false;
173 power_well->desc->ops->disable(dev_priv, power_well);
176 static void intel_power_well_get(struct drm_i915_private *dev_priv,
177 struct i915_power_well *power_well)
179 if (!power_well->count++)
180 intel_power_well_enable(dev_priv, power_well);
183 static void intel_power_well_put(struct drm_i915_private *dev_priv,
184 struct i915_power_well *power_well)
186 drm_WARN(&dev_priv->drm, !power_well->count,
187 "Use count on power well %s is already zero",
188 power_well->desc->name);
190 if (!--power_well->count)
191 intel_power_well_disable(dev_priv, power_well);
195 * __intel_display_power_is_enabled - unlocked check for a power domain
196 * @dev_priv: i915 device instance
197 * @domain: power domain to check
199 * This is the unlocked version of intel_display_power_is_enabled() and should
200 * only be used from error capture and recovery code where deadlocks are
204 * True when the power domain is enabled, false otherwise.
206 bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
207 enum intel_display_power_domain domain)
209 struct i915_power_well *power_well;
212 if (dev_priv->runtime_pm.suspended)
217 for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) {
218 if (power_well->desc->always_on)
221 if (!power_well->hw_enabled) {
231 * intel_display_power_is_enabled - check for a power domain
232 * @dev_priv: i915 device instance
233 * @domain: power domain to check
235 * This function can be used to check the hw power domain state. It is mostly
236 * used in hardware state readout functions. Everywhere else code should rely
237 * upon explicit power domain reference counting to ensure that the hardware
238 * block is powered up before accessing it.
240 * Callers must hold the relevant modesetting locks to ensure that concurrent
241 * threads can't disable the power well while the caller tries to read a few
245 * True when the power domain is enabled, false otherwise.
247 bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
248 enum intel_display_power_domain domain)
250 struct i915_power_domains *power_domains;
253 power_domains = &dev_priv->power_domains;
255 mutex_lock(&power_domains->lock);
256 ret = __intel_display_power_is_enabled(dev_priv, domain);
257 mutex_unlock(&power_domains->lock);
263 * Starting with Haswell, we have a "Power Down Well" that can be turned off
264 * when not needed anymore. We have 4 registers that can request the power well
265 * to be enabled, and it will only be disabled if none of the registers is
266 * requesting it to be enabled.
268 static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv,
269 u8 irq_pipe_mask, bool has_vga)
272 intel_vga_reset_io_mem(dev_priv);
275 gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask);
278 static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv,
282 gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask);
285 static struct intel_digital_port *
286 aux_ch_to_digital_port(struct drm_i915_private *dev_priv,
289 struct intel_digital_port *dig_port = NULL;
290 struct intel_encoder *encoder;
292 for_each_intel_encoder(&dev_priv->drm, encoder) {
293 /* We'll check the MST primary port */
294 if (encoder->type == INTEL_OUTPUT_DP_MST)
297 dig_port = enc_to_dig_port(encoder);
301 if (dig_port->aux_ch != aux_ch) {
312 static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
313 struct i915_power_well *power_well)
315 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
316 int pw_idx = power_well->desc->hsw.idx;
318 /* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
319 if (intel_de_wait_for_set(dev_priv, regs->driver,
320 HSW_PWR_WELL_CTL_STATE(pw_idx), 1)) {
321 drm_dbg_kms(&dev_priv->drm, "%s power well enable timeout\n",
322 power_well->desc->name);
324 /* An AUX timeout is expected if the TBT DP tunnel is down. */
325 drm_WARN_ON(&dev_priv->drm, !power_well->desc->hsw.is_tc_tbt);
329 static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv,
330 const struct i915_power_well_regs *regs,
333 u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
336 ret = intel_de_read(dev_priv, regs->bios) & req_mask ? 1 : 0;
337 ret |= intel_de_read(dev_priv, regs->driver) & req_mask ? 2 : 0;
339 ret |= intel_de_read(dev_priv, regs->kvmr) & req_mask ? 4 : 0;
340 ret |= intel_de_read(dev_priv, regs->debug) & req_mask ? 8 : 0;
345 static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv,
346 struct i915_power_well *power_well)
348 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
349 int pw_idx = power_well->desc->hsw.idx;
354 * Bspec doesn't require waiting for PWs to get disabled, but still do
355 * this for paranoia. The known cases where a PW will be forced on:
356 * - a KVMR request on any power well via the KVMR request register
357 * - a DMC request on PW1 and MISC_IO power wells via the BIOS and
358 * DEBUG request registers
359 * Skip the wait in case any of the request bits are set and print a
360 * diagnostic message.
362 wait_for((disabled = !(intel_de_read(dev_priv, regs->driver) &
363 HSW_PWR_WELL_CTL_STATE(pw_idx))) ||
364 (reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1);
368 drm_dbg_kms(&dev_priv->drm,
369 "%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n",
370 power_well->desc->name,
371 !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8));
374 static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv,
375 enum skl_power_gate pg)
377 /* Timeout 5us for PG#0, for other PGs 1us */
378 drm_WARN_ON(&dev_priv->drm,
379 intel_de_wait_for_set(dev_priv, SKL_FUSE_STATUS,
380 SKL_FUSE_PG_DIST_STATUS(pg), 1));
383 static void hsw_power_well_enable_prepare(struct drm_i915_private *dev_priv,
384 struct i915_power_well *power_well)
386 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
387 int pw_idx = power_well->desc->hsw.idx;
390 if (power_well->desc->hsw.has_fuses) {
391 enum skl_power_gate pg;
393 pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
394 SKL_PW_CTL_IDX_TO_PG(pw_idx);
396 * For PW1 we have to wait both for the PW0/PG0 fuse state
397 * before enabling the power well and PW1/PG1's own fuse
398 * state after the enabling. For all other power wells with
399 * fuses we only have to wait for that PW/PG's fuse state
400 * after the enabling.
403 gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0);
406 val = intel_de_read(dev_priv, regs->driver);
407 intel_de_write(dev_priv, regs->driver,
408 val | HSW_PWR_WELL_CTL_REQ(pw_idx));
411 static void hsw_power_well_enable_complete(struct drm_i915_private *dev_priv,
412 struct i915_power_well *power_well)
414 int pw_idx = power_well->desc->hsw.idx;
416 hsw_wait_for_power_well_enable(dev_priv, power_well);
418 /* Display WA #1178: cnl */
419 if (IS_CANNONLAKE(dev_priv) &&
420 pw_idx >= GLK_PW_CTL_IDX_AUX_B &&
421 pw_idx <= CNL_PW_CTL_IDX_AUX_F) {
424 val = intel_de_read(dev_priv, CNL_AUX_ANAOVRD1(pw_idx));
425 val |= CNL_AUX_ANAOVRD1_ENABLE | CNL_AUX_ANAOVRD1_LDO_BYPASS;
426 intel_de_write(dev_priv, CNL_AUX_ANAOVRD1(pw_idx), val);
429 if (power_well->desc->hsw.has_fuses) {
430 enum skl_power_gate pg;
432 pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
433 SKL_PW_CTL_IDX_TO_PG(pw_idx);
434 gen9_wait_for_power_well_fuses(dev_priv, pg);
437 hsw_power_well_post_enable(dev_priv,
438 power_well->desc->hsw.irq_pipe_mask,
439 power_well->desc->hsw.has_vga);
442 static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
443 struct i915_power_well *power_well)
445 hsw_power_well_enable_prepare(dev_priv, power_well);
446 hsw_power_well_enable_complete(dev_priv, power_well);
449 static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
450 struct i915_power_well *power_well)
452 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
453 int pw_idx = power_well->desc->hsw.idx;
456 hsw_power_well_pre_disable(dev_priv,
457 power_well->desc->hsw.irq_pipe_mask);
459 val = intel_de_read(dev_priv, regs->driver);
460 intel_de_write(dev_priv, regs->driver,
461 val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
462 hsw_wait_for_power_well_disable(dev_priv, power_well);
465 #define ICL_AUX_PW_TO_PHY(pw_idx) ((pw_idx) - ICL_PW_CTL_IDX_AUX_A)
468 icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
469 struct i915_power_well *power_well)
471 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
472 int pw_idx = power_well->desc->hsw.idx;
473 enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx);
476 drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
478 val = intel_de_read(dev_priv, regs->driver);
479 intel_de_write(dev_priv, regs->driver,
480 val | HSW_PWR_WELL_CTL_REQ(pw_idx));
482 if (INTEL_GEN(dev_priv) < 12) {
483 val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy));
484 intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy),
485 val | ICL_LANE_ENABLE_AUX);
488 hsw_wait_for_power_well_enable(dev_priv, power_well);
490 /* Display WA #1178: icl */
491 if (pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B &&
492 !intel_bios_is_port_edp(dev_priv, (enum port)phy)) {
493 val = intel_de_read(dev_priv, ICL_AUX_ANAOVRD1(pw_idx));
494 val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS;
495 intel_de_write(dev_priv, ICL_AUX_ANAOVRD1(pw_idx), val);
500 icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
501 struct i915_power_well *power_well)
503 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
504 int pw_idx = power_well->desc->hsw.idx;
505 enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx);
508 drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
510 val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy));
511 intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy),
512 val & ~ICL_LANE_ENABLE_AUX);
514 val = intel_de_read(dev_priv, regs->driver);
515 intel_de_write(dev_priv, regs->driver,
516 val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
518 hsw_wait_for_power_well_disable(dev_priv, power_well);
521 #define ICL_AUX_PW_TO_CH(pw_idx) \
522 ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A)
524 #define ICL_TBT_AUX_PW_TO_CH(pw_idx) \
525 ((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C)
527 static enum aux_ch icl_tc_phy_aux_ch(struct drm_i915_private *dev_priv,
528 struct i915_power_well *power_well)
530 int pw_idx = power_well->desc->hsw.idx;
532 return power_well->desc->hsw.is_tc_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) :
533 ICL_AUX_PW_TO_CH(pw_idx);
536 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
538 static u64 async_put_domains_mask(struct i915_power_domains *power_domains);
540 static int power_well_async_ref_count(struct drm_i915_private *dev_priv,
541 struct i915_power_well *power_well)
543 int refs = hweight64(power_well->desc->domains &
544 async_put_domains_mask(&dev_priv->power_domains));
546 drm_WARN_ON(&dev_priv->drm, refs > power_well->count);
551 static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
552 struct i915_power_well *power_well,
553 struct intel_digital_port *dig_port)
555 /* Bypass the check if all references are released asynchronously */
556 if (power_well_async_ref_count(dev_priv, power_well) ==
560 if (drm_WARN_ON(&dev_priv->drm, !dig_port))
563 drm_WARN_ON(&dev_priv->drm, !intel_tc_port_ref_held(dig_port));
568 static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
569 struct i915_power_well *power_well,
570 struct intel_digital_port *dig_port)
576 #define TGL_AUX_PW_TO_TC_PORT(pw_idx) ((pw_idx) - TGL_PW_CTL_IDX_AUX_TC1)
579 icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
580 struct i915_power_well *power_well)
582 enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
583 struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch);
586 icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port);
588 val = intel_de_read(dev_priv, DP_AUX_CH_CTL(aux_ch));
589 val &= ~DP_AUX_CH_CTL_TBT_IO;
590 if (power_well->desc->hsw.is_tc_tbt)
591 val |= DP_AUX_CH_CTL_TBT_IO;
592 intel_de_write(dev_priv, DP_AUX_CH_CTL(aux_ch), val);
594 hsw_power_well_enable_prepare(dev_priv, power_well);
596 /* TODO ICL TC cold handling */
598 hsw_power_well_enable_complete(dev_priv, power_well);
600 if (INTEL_GEN(dev_priv) >= 12 && !power_well->desc->hsw.is_tc_tbt) {
601 enum tc_port tc_port;
603 tc_port = TGL_AUX_PW_TO_TC_PORT(power_well->desc->hsw.idx);
604 intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
605 HIP_INDEX_VAL(tc_port, 0x2));
607 if (intel_de_wait_for_set(dev_priv, DKL_CMN_UC_DW_27(tc_port),
608 DKL_CMN_UC_DW27_UC_HEALTH, 1))
609 drm_warn(&dev_priv->drm,
610 "Timeout waiting TC uC health\n");
615 icl_tc_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
616 struct i915_power_well *power_well)
618 enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
619 struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch);
621 icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port);
623 hsw_power_well_disable(dev_priv, power_well);
627 * We should only use the power well if we explicitly asked the hardware to
628 * enable it, so check if it's enabled and also check if we've requested it to
631 static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
632 struct i915_power_well *power_well)
634 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
635 enum i915_power_well_id id = power_well->desc->id;
636 int pw_idx = power_well->desc->hsw.idx;
637 u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) |
638 HSW_PWR_WELL_CTL_STATE(pw_idx);
641 val = intel_de_read(dev_priv, regs->driver);
644 * On GEN9 big core due to a DMC bug the driver's request bits for PW1
645 * and the MISC_IO PW will be not restored, so check instead for the
646 * BIOS's own request bits, which are forced-on for these power wells
647 * when exiting DC5/6.
649 if (IS_GEN(dev_priv, 9) && !IS_GEN9_LP(dev_priv) &&
650 (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO))
651 val |= intel_de_read(dev_priv, regs->bios);
653 return (val & mask) == mask;
656 static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
658 drm_WARN_ONCE(&dev_priv->drm,
659 (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC9),
660 "DC9 already programmed to be enabled.\n");
661 drm_WARN_ONCE(&dev_priv->drm,
662 intel_de_read(dev_priv, DC_STATE_EN) &
663 DC_STATE_EN_UPTO_DC5,
664 "DC5 still not disabled to enable DC9.\n");
665 drm_WARN_ONCE(&dev_priv->drm,
666 intel_de_read(dev_priv, HSW_PWR_WELL_CTL2) &
667 HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2),
668 "Power well 2 on.\n");
669 drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv),
670 "Interrupts not disabled yet.\n");
673 * TODO: check for the following to verify the conditions to enter DC9
674 * state are satisfied:
675 * 1] Check relevant display engine registers to verify if mode set
676 * disable sequence was followed.
677 * 2] Check if display uninitialize sequence is initialized.
681 static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
683 drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv),
684 "Interrupts not disabled yet.\n");
685 drm_WARN_ONCE(&dev_priv->drm,
686 intel_de_read(dev_priv, DC_STATE_EN) &
687 DC_STATE_EN_UPTO_DC5,
688 "DC5 still not disabled.\n");
691 * TODO: check for the following to verify DC9 state was indeed
692 * entered before programming to disable it:
693 * 1] Check relevant display engine registers to verify if mode
694 * set disable sequence was followed.
695 * 2] Check if display uninitialize sequence is initialized.
699 static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
706 intel_de_write(dev_priv, DC_STATE_EN, state);
708 /* It has been observed that disabling the dc6 state sometimes
709 * doesn't stick and dmc keeps returning old value. Make sure
710 * the write really sticks enough times and also force rewrite until
711 * we are confident that state is exactly what we want.
714 v = intel_de_read(dev_priv, DC_STATE_EN);
717 intel_de_write(dev_priv, DC_STATE_EN, state);
720 } else if (rereads++ > 5) {
724 } while (rewrites < 100);
727 drm_err(&dev_priv->drm,
728 "Writing dc state to 0x%x failed, now 0x%x\n",
731 /* Most of the times we need one retry, avoid spam */
733 drm_dbg_kms(&dev_priv->drm,
734 "Rewrote dc state to 0x%x %d times\n",
738 static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
742 mask = DC_STATE_EN_UPTO_DC5;
744 if (INTEL_GEN(dev_priv) >= 12)
745 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6
747 else if (IS_GEN(dev_priv, 11))
748 mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9;
749 else if (IS_GEN9_LP(dev_priv))
750 mask |= DC_STATE_EN_DC9;
752 mask |= DC_STATE_EN_UPTO_DC6;
757 static void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
761 val = intel_de_read(dev_priv, DC_STATE_EN) & gen9_dc_mask(dev_priv);
763 drm_dbg_kms(&dev_priv->drm,
764 "Resetting DC state tracking from %02x to %02x\n",
765 dev_priv->csr.dc_state, val);
766 dev_priv->csr.dc_state = val;
770 * gen9_set_dc_state - set target display C power state
771 * @dev_priv: i915 device instance
772 * @state: target DC power state
774 * - DC_STATE_EN_UPTO_DC5
775 * - DC_STATE_EN_UPTO_DC6
778 * Signal to DMC firmware/HW the target DC power state passed in @state.
779 * DMC/HW can turn off individual display clocks and power rails when entering
780 * a deeper DC power state (higher in number) and turns these back when exiting
781 * that state to a shallower power state (lower in number). The HW will decide
782 * when to actually enter a given state on an on-demand basis, for instance
783 * depending on the active state of display pipes. The state of display
784 * registers backed by affected power rails are saved/restored as needed.
786 * Based on the above enabling a deeper DC power state is asynchronous wrt.
787 * enabling it. Disabling a deeper power state is synchronous: for instance
788 * setting %DC_STATE_DISABLE won't complete until all HW resources are turned
789 * back on and register state is restored. This is guaranteed by the MMIO write
790 * to DC_STATE_EN blocking until the state is restored.
792 static void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
797 if (drm_WARN_ON_ONCE(&dev_priv->drm,
798 state & ~dev_priv->csr.allowed_dc_mask))
799 state &= dev_priv->csr.allowed_dc_mask;
801 val = intel_de_read(dev_priv, DC_STATE_EN);
802 mask = gen9_dc_mask(dev_priv);
803 drm_dbg_kms(&dev_priv->drm, "Setting DC state from %02x to %02x\n",
806 /* Check if DMC is ignoring our DC state requests */
807 if ((val & mask) != dev_priv->csr.dc_state)
808 drm_err(&dev_priv->drm, "DC state mismatch (0x%x -> 0x%x)\n",
809 dev_priv->csr.dc_state, val & mask);
814 gen9_write_dc_state(dev_priv, val);
816 dev_priv->csr.dc_state = val & mask;
820 sanitize_target_dc_state(struct drm_i915_private *dev_priv,
824 DC_STATE_EN_UPTO_DC6,
825 DC_STATE_EN_UPTO_DC5,
831 for (i = 0; i < ARRAY_SIZE(states) - 1; i++) {
832 if (target_dc_state != states[i])
835 if (dev_priv->csr.allowed_dc_mask & target_dc_state)
838 target_dc_state = states[i + 1];
841 return target_dc_state;
844 static void tgl_enable_dc3co(struct drm_i915_private *dev_priv)
846 drm_dbg_kms(&dev_priv->drm, "Enabling DC3CO\n");
847 gen9_set_dc_state(dev_priv, DC_STATE_EN_DC3CO);
850 static void tgl_disable_dc3co(struct drm_i915_private *dev_priv)
854 drm_dbg_kms(&dev_priv->drm, "Disabling DC3CO\n");
855 val = intel_de_read(dev_priv, DC_STATE_EN);
856 val &= ~DC_STATE_DC3CO_STATUS;
857 intel_de_write(dev_priv, DC_STATE_EN, val);
858 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
860 * Delay of 200us DC3CO Exit time B.Spec 49196
862 usleep_range(200, 210);
865 static void bxt_enable_dc9(struct drm_i915_private *dev_priv)
867 assert_can_enable_dc9(dev_priv);
869 drm_dbg_kms(&dev_priv->drm, "Enabling DC9\n");
871 * Power sequencer reset is not needed on
872 * platforms with South Display Engine on PCH,
873 * because PPS registers are always on.
875 if (!HAS_PCH_SPLIT(dev_priv))
876 intel_power_sequencer_reset(dev_priv);
877 gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
880 static void bxt_disable_dc9(struct drm_i915_private *dev_priv)
882 assert_can_disable_dc9(dev_priv);
884 drm_dbg_kms(&dev_priv->drm, "Disabling DC9\n");
886 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
888 intel_pps_unlock_regs_wa(dev_priv);
891 static void assert_csr_loaded(struct drm_i915_private *dev_priv)
893 drm_WARN_ONCE(&dev_priv->drm,
894 !intel_de_read(dev_priv, CSR_PROGRAM(0)),
895 "CSR program storage start is NULL\n");
896 drm_WARN_ONCE(&dev_priv->drm, !intel_de_read(dev_priv, CSR_SSP_BASE),
897 "CSR SSP Base Not fine\n");
898 drm_WARN_ONCE(&dev_priv->drm, !intel_de_read(dev_priv, CSR_HTP_SKL),
899 "CSR HTP Not fine\n");
902 static struct i915_power_well *
903 lookup_power_well(struct drm_i915_private *dev_priv,
904 enum i915_power_well_id power_well_id)
906 struct i915_power_well *power_well;
908 for_each_power_well(dev_priv, power_well)
909 if (power_well->desc->id == power_well_id)
913 * It's not feasible to add error checking code to the callers since
914 * this condition really shouldn't happen and it doesn't even make sense
915 * to abort things like display initialization sequences. Just return
916 * the first power well and hope the WARN gets reported so we can fix
919 drm_WARN(&dev_priv->drm, 1,
920 "Power well %d not defined for this platform\n",
922 return &dev_priv->power_domains.power_wells[0];
926 * intel_display_power_set_target_dc_state - Set target dc state.
927 * @dev_priv: i915 device
928 * @state: state which needs to be set as target_dc_state.
930 * This function set the "DC off" power well target_dc_state,
931 * based upon this target_dc_stste, "DC off" power well will
932 * enable desired DC state.
934 void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv,
937 struct i915_power_well *power_well;
939 struct i915_power_domains *power_domains = &dev_priv->power_domains;
941 mutex_lock(&power_domains->lock);
942 power_well = lookup_power_well(dev_priv, SKL_DISP_DC_OFF);
944 if (drm_WARN_ON(&dev_priv->drm, !power_well))
947 state = sanitize_target_dc_state(dev_priv, state);
949 if (state == dev_priv->csr.target_dc_state)
952 dc_off_enabled = power_well->desc->ops->is_enabled(dev_priv,
955 * If DC off power well is disabled, need to enable and disable the
956 * DC off power well to effect target DC state.
959 power_well->desc->ops->enable(dev_priv, power_well);
961 dev_priv->csr.target_dc_state = state;
964 power_well->desc->ops->disable(dev_priv, power_well);
967 mutex_unlock(&power_domains->lock);
970 static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
972 enum i915_power_well_id high_pg;
974 /* Power wells at this level and above must be disabled for DC5 entry */
975 if (INTEL_GEN(dev_priv) >= 12)
976 high_pg = ICL_DISP_PW_3;
978 high_pg = SKL_DISP_PW_2;
980 drm_WARN_ONCE(&dev_priv->drm,
981 intel_display_power_well_is_enabled(dev_priv, high_pg),
982 "Power wells above platform's DC5 limit still enabled.\n");
984 drm_WARN_ONCE(&dev_priv->drm,
985 (intel_de_read(dev_priv, DC_STATE_EN) &
986 DC_STATE_EN_UPTO_DC5),
987 "DC5 already programmed to be enabled.\n");
988 assert_rpm_wakelock_held(&dev_priv->runtime_pm);
990 assert_csr_loaded(dev_priv);
993 static void gen9_enable_dc5(struct drm_i915_private *dev_priv)
995 assert_can_enable_dc5(dev_priv);
997 drm_dbg_kms(&dev_priv->drm, "Enabling DC5\n");
999 /* Wa Display #1183: skl,kbl,cfl */
1000 if (IS_GEN9_BC(dev_priv))
1001 intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1,
1002 intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT);
1004 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
1007 static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
1009 drm_WARN_ONCE(&dev_priv->drm,
1010 intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
1011 "Backlight is not disabled.\n");
1012 drm_WARN_ONCE(&dev_priv->drm,
1013 (intel_de_read(dev_priv, DC_STATE_EN) &
1014 DC_STATE_EN_UPTO_DC6),
1015 "DC6 already programmed to be enabled.\n");
1017 assert_csr_loaded(dev_priv);
1020 static void skl_enable_dc6(struct drm_i915_private *dev_priv)
1022 assert_can_enable_dc6(dev_priv);
1024 drm_dbg_kms(&dev_priv->drm, "Enabling DC6\n");
1026 /* Wa Display #1183: skl,kbl,cfl */
1027 if (IS_GEN9_BC(dev_priv))
1028 intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1,
1029 intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT);
1031 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
1034 static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
1035 struct i915_power_well *power_well)
1037 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
1038 int pw_idx = power_well->desc->hsw.idx;
1039 u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
1040 u32 bios_req = intel_de_read(dev_priv, regs->bios);
1042 /* Take over the request bit if set by BIOS. */
1043 if (bios_req & mask) {
1044 u32 drv_req = intel_de_read(dev_priv, regs->driver);
1046 if (!(drv_req & mask))
1047 intel_de_write(dev_priv, regs->driver, drv_req | mask);
1048 intel_de_write(dev_priv, regs->bios, bios_req & ~mask);
1052 static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1053 struct i915_power_well *power_well)
1055 bxt_ddi_phy_init(dev_priv, power_well->desc->bxt.phy);
1058 static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1059 struct i915_power_well *power_well)
1061 bxt_ddi_phy_uninit(dev_priv, power_well->desc->bxt.phy);
1064 static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
1065 struct i915_power_well *power_well)
1067 return bxt_ddi_phy_is_enabled(dev_priv, power_well->desc->bxt.phy);
1070 static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
1072 struct i915_power_well *power_well;
1074 power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A);
1075 if (power_well->count > 0)
1076 bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
1078 power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
1079 if (power_well->count > 0)
1080 bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
1082 if (IS_GEMINILAKE(dev_priv)) {
1083 power_well = lookup_power_well(dev_priv,
1084 GLK_DISP_PW_DPIO_CMN_C);
1085 if (power_well->count > 0)
1086 bxt_ddi_phy_verify_state(dev_priv,
1087 power_well->desc->bxt.phy);
1091 static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
1092 struct i915_power_well *power_well)
1094 return ((intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC3CO) == 0 &&
1095 (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0);
1098 static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
1100 u8 hw_enabled_dbuf_slices = intel_enabled_dbuf_slices_mask(dev_priv);
1101 u8 enabled_dbuf_slices = dev_priv->enabled_dbuf_slices_mask;
1103 drm_WARN(&dev_priv->drm,
1104 hw_enabled_dbuf_slices != enabled_dbuf_slices,
1105 "Unexpected DBuf power power state (0x%08x, expected 0x%08x)\n",
1106 hw_enabled_dbuf_slices,
1107 enabled_dbuf_slices);
1110 static void gen9_disable_dc_states(struct drm_i915_private *dev_priv)
1112 struct intel_cdclk_config cdclk_config = {};
1114 if (dev_priv->csr.target_dc_state == DC_STATE_EN_DC3CO) {
1115 tgl_disable_dc3co(dev_priv);
1119 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
1121 dev_priv->display.get_cdclk(dev_priv, &cdclk_config);
1122 /* Can't read out voltage_level so can't use intel_cdclk_changed() */
1123 drm_WARN_ON(&dev_priv->drm,
1124 intel_cdclk_needs_modeset(&dev_priv->cdclk.hw,
1127 gen9_assert_dbuf_enabled(dev_priv);
1129 if (IS_GEN9_LP(dev_priv))
1130 bxt_verify_ddi_phy_power_wells(dev_priv);
1132 if (INTEL_GEN(dev_priv) >= 11)
1134 * DMC retains HW context only for port A, the other combo
1135 * PHY's HW context for port B is lost after DC transitions,
1136 * so we need to restore it manually.
1138 intel_combo_phy_init(dev_priv);
1141 static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
1142 struct i915_power_well *power_well)
1144 gen9_disable_dc_states(dev_priv);
1147 static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
1148 struct i915_power_well *power_well)
1150 if (!dev_priv->csr.dmc_payload)
1153 switch (dev_priv->csr.target_dc_state) {
1154 case DC_STATE_EN_DC3CO:
1155 tgl_enable_dc3co(dev_priv);
1157 case DC_STATE_EN_UPTO_DC6:
1158 skl_enable_dc6(dev_priv);
1160 case DC_STATE_EN_UPTO_DC5:
1161 gen9_enable_dc5(dev_priv);
1166 static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv,
1167 struct i915_power_well *power_well)
1171 static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
1172 struct i915_power_well *power_well)
1176 static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
1177 struct i915_power_well *power_well)
1182 static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv,
1183 struct i915_power_well *power_well)
1185 if ((intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0)
1186 i830_enable_pipe(dev_priv, PIPE_A);
1187 if ((intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0)
1188 i830_enable_pipe(dev_priv, PIPE_B);
1191 static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv,
1192 struct i915_power_well *power_well)
1194 i830_disable_pipe(dev_priv, PIPE_B);
1195 i830_disable_pipe(dev_priv, PIPE_A);
1198 static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv,
1199 struct i915_power_well *power_well)
1201 return intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE &&
1202 intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
1205 static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv,
1206 struct i915_power_well *power_well)
1208 if (power_well->count > 0)
1209 i830_pipes_power_well_enable(dev_priv, power_well);
1211 i830_pipes_power_well_disable(dev_priv, power_well);
1214 static void vlv_set_power_well(struct drm_i915_private *dev_priv,
1215 struct i915_power_well *power_well, bool enable)
1217 int pw_idx = power_well->desc->vlv.idx;
1222 mask = PUNIT_PWRGT_MASK(pw_idx);
1223 state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) :
1224 PUNIT_PWRGT_PWR_GATE(pw_idx);
1226 vlv_punit_get(dev_priv);
1229 ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
1234 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
1237 vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
1239 if (wait_for(COND, 100))
1240 drm_err(&dev_priv->drm,
1241 "timeout setting power well state %08x (%08x)\n",
1243 vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
1248 vlv_punit_put(dev_priv);
1251 static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
1252 struct i915_power_well *power_well)
1254 vlv_set_power_well(dev_priv, power_well, true);
1257 static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
1258 struct i915_power_well *power_well)
1260 vlv_set_power_well(dev_priv, power_well, false);
1263 static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
1264 struct i915_power_well *power_well)
1266 int pw_idx = power_well->desc->vlv.idx;
1267 bool enabled = false;
1272 mask = PUNIT_PWRGT_MASK(pw_idx);
1273 ctrl = PUNIT_PWRGT_PWR_ON(pw_idx);
1275 vlv_punit_get(dev_priv);
1277 state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
1279 * We only ever set the power-on and power-gate states, anything
1280 * else is unexpected.
1282 drm_WARN_ON(&dev_priv->drm, state != PUNIT_PWRGT_PWR_ON(pw_idx) &&
1283 state != PUNIT_PWRGT_PWR_GATE(pw_idx));
1288 * A transient state at this point would mean some unexpected party
1289 * is poking at the power controls too.
1291 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
1292 drm_WARN_ON(&dev_priv->drm, ctrl != state);
1294 vlv_punit_put(dev_priv);
1299 static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
1304 * On driver load, a pipe may be active and driving a DSI display.
1305 * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
1306 * (and never recovering) in this case. intel_dsi_post_disable() will
1307 * clear it when we turn off the display.
1309 val = intel_de_read(dev_priv, DSPCLK_GATE_D);
1310 val &= DPOUNIT_CLOCK_GATE_DISABLE;
1311 val |= VRHUNIT_CLOCK_GATE_DISABLE;
1312 intel_de_write(dev_priv, DSPCLK_GATE_D, val);
1315 * Disable trickle feed and enable pnd deadline calculation
1317 intel_de_write(dev_priv, MI_ARB_VLV,
1318 MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
1319 intel_de_write(dev_priv, CBR1_VLV, 0);
1321 drm_WARN_ON(&dev_priv->drm, RUNTIME_INFO(dev_priv)->rawclk_freq == 0);
1322 intel_de_write(dev_priv, RAWCLK_FREQ_VLV,
1323 DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq,
1327 static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
1329 struct intel_encoder *encoder;
1333 * Enable the CRI clock source so we can get at the
1334 * display and the reference clock for VGA
1335 * hotplug / manual detection. Supposedly DSI also
1336 * needs the ref clock up and running.
1338 * CHV DPLL B/C have some issues if VGA mode is enabled.
1340 for_each_pipe(dev_priv, pipe) {
1341 u32 val = intel_de_read(dev_priv, DPLL(pipe));
1343 val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1345 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1347 intel_de_write(dev_priv, DPLL(pipe), val);
1350 vlv_init_display_clock_gating(dev_priv);
1352 spin_lock_irq(&dev_priv->irq_lock);
1353 valleyview_enable_display_irqs(dev_priv);
1354 spin_unlock_irq(&dev_priv->irq_lock);
1357 * During driver initialization/resume we can avoid restoring the
1358 * part of the HW/SW state that will be inited anyway explicitly.
1360 if (dev_priv->power_domains.initializing)
1363 intel_hpd_init(dev_priv);
1365 /* Re-enable the ADPA, if we have one */
1366 for_each_intel_encoder(&dev_priv->drm, encoder) {
1367 if (encoder->type == INTEL_OUTPUT_ANALOG)
1368 intel_crt_reset(&encoder->base);
1371 intel_vga_redisable_power_on(dev_priv);
1373 intel_pps_unlock_regs_wa(dev_priv);
1376 static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
1378 spin_lock_irq(&dev_priv->irq_lock);
1379 valleyview_disable_display_irqs(dev_priv);
1380 spin_unlock_irq(&dev_priv->irq_lock);
1382 /* make sure we're done processing display irqs */
1383 intel_synchronize_irq(dev_priv);
1385 intel_power_sequencer_reset(dev_priv);
1387 /* Prevent us from re-enabling polling on accident in late suspend */
1388 if (!dev_priv->drm.dev->power.is_suspended)
1389 intel_hpd_poll_init(dev_priv);
1392 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
1393 struct i915_power_well *power_well)
1395 vlv_set_power_well(dev_priv, power_well, true);
1397 vlv_display_power_well_init(dev_priv);
1400 static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
1401 struct i915_power_well *power_well)
1403 vlv_display_power_well_deinit(dev_priv);
1405 vlv_set_power_well(dev_priv, power_well, false);
1408 static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1409 struct i915_power_well *power_well)
1411 /* since ref/cri clock was enabled */
1412 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1414 vlv_set_power_well(dev_priv, power_well, true);
1417 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
1418 * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
1419 * a. GUnit 0x2110 bit[0] set to 1 (def 0)
1420 * b. The other bits such as sfr settings / modesel may all
1423 * This should only be done on init and resume from S3 with
1424 * both PLLs disabled, or we risk losing DPIO and PLL
1427 intel_de_write(dev_priv, DPIO_CTL,
1428 intel_de_read(dev_priv, DPIO_CTL) | DPIO_CMNRST);
1431 static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1432 struct i915_power_well *power_well)
1436 for_each_pipe(dev_priv, pipe)
1437 assert_pll_disabled(dev_priv, pipe);
1439 /* Assert common reset */
1440 intel_de_write(dev_priv, DPIO_CTL,
1441 intel_de_read(dev_priv, DPIO_CTL) & ~DPIO_CMNRST);
1443 vlv_set_power_well(dev_priv, power_well, false);
1446 #define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0))
1448 #define BITS_SET(val, bits) (((val) & (bits)) == (bits))
1450 static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
1452 struct i915_power_well *cmn_bc =
1453 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
1454 struct i915_power_well *cmn_d =
1455 lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
1456 u32 phy_control = dev_priv->chv_phy_control;
1458 u32 phy_status_mask = 0xffffffff;
1461 * The BIOS can leave the PHY is some weird state
1462 * where it doesn't fully power down some parts.
1463 * Disable the asserts until the PHY has been fully
1464 * reset (ie. the power well has been disabled at
1467 if (!dev_priv->chv_phy_assert[DPIO_PHY0])
1468 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
1469 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
1470 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
1471 PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
1472 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
1473 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
1475 if (!dev_priv->chv_phy_assert[DPIO_PHY1])
1476 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
1477 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
1478 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
1480 if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
1481 phy_status |= PHY_POWERGOOD(DPIO_PHY0);
1483 /* this assumes override is only used to enable lanes */
1484 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
1485 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
1487 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
1488 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
1490 /* CL1 is on whenever anything is on in either channel */
1491 if (BITS_SET(phy_control,
1492 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
1493 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
1494 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
1497 * The DPLLB check accounts for the pipe B + port A usage
1498 * with CL2 powered up but all the lanes in the second channel
1501 if (BITS_SET(phy_control,
1502 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
1503 (intel_de_read(dev_priv, DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
1504 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
1506 if (BITS_SET(phy_control,
1507 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
1508 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
1509 if (BITS_SET(phy_control,
1510 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
1511 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
1513 if (BITS_SET(phy_control,
1514 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
1515 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
1516 if (BITS_SET(phy_control,
1517 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
1518 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
1521 if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
1522 phy_status |= PHY_POWERGOOD(DPIO_PHY1);
1524 /* this assumes override is only used to enable lanes */
1525 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
1526 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
1528 if (BITS_SET(phy_control,
1529 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
1530 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
1532 if (BITS_SET(phy_control,
1533 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
1534 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
1535 if (BITS_SET(phy_control,
1536 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
1537 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
1540 phy_status &= phy_status_mask;
1543 * The PHY may be busy with some initial calibration and whatnot,
1544 * so the power state can take a while to actually change.
1546 if (intel_de_wait_for_register(dev_priv, DISPLAY_PHY_STATUS,
1547 phy_status_mask, phy_status, 10))
1548 drm_err(&dev_priv->drm,
1549 "Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
1550 intel_de_read(dev_priv, DISPLAY_PHY_STATUS) & phy_status_mask,
1551 phy_status, dev_priv->chv_phy_control);
1556 static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1557 struct i915_power_well *power_well)
1563 drm_WARN_ON_ONCE(&dev_priv->drm,
1564 power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
1565 power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
1567 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1575 /* since ref/cri clock was enabled */
1576 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1577 vlv_set_power_well(dev_priv, power_well, true);
1579 /* Poll for phypwrgood signal */
1580 if (intel_de_wait_for_set(dev_priv, DISPLAY_PHY_STATUS,
1581 PHY_POWERGOOD(phy), 1))
1582 drm_err(&dev_priv->drm, "Display PHY %d is not power up\n",
1585 vlv_dpio_get(dev_priv);
1587 /* Enable dynamic power down */
1588 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
1589 tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
1590 DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
1591 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
1593 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1594 tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
1595 tmp |= DPIO_DYNPWRDOWNEN_CH1;
1596 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
1599 * Force the non-existing CL2 off. BXT does this
1600 * too, so maybe it saves some power even though
1601 * CL2 doesn't exist?
1603 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
1604 tmp |= DPIO_CL2_LDOFUSE_PWRENB;
1605 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
1608 vlv_dpio_put(dev_priv);
1610 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
1611 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1612 dev_priv->chv_phy_control);
1614 drm_dbg_kms(&dev_priv->drm,
1615 "Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1616 phy, dev_priv->chv_phy_control);
1618 assert_chv_phy_status(dev_priv);
1621 static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1622 struct i915_power_well *power_well)
1626 drm_WARN_ON_ONCE(&dev_priv->drm,
1627 power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
1628 power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
1630 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1632 assert_pll_disabled(dev_priv, PIPE_A);
1633 assert_pll_disabled(dev_priv, PIPE_B);
1636 assert_pll_disabled(dev_priv, PIPE_C);
1639 dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
1640 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1641 dev_priv->chv_phy_control);
1643 vlv_set_power_well(dev_priv, power_well, false);
1645 drm_dbg_kms(&dev_priv->drm,
1646 "Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1647 phy, dev_priv->chv_phy_control);
1649 /* PHY is fully reset now, so we can enable the PHY state asserts */
1650 dev_priv->chv_phy_assert[phy] = true;
1652 assert_chv_phy_status(dev_priv);
1655 static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1656 enum dpio_channel ch, bool override, unsigned int mask)
1658 enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
1659 u32 reg, val, expected, actual;
1662 * The BIOS can leave the PHY is some weird state
1663 * where it doesn't fully power down some parts.
1664 * Disable the asserts until the PHY has been fully
1665 * reset (ie. the power well has been disabled at
1668 if (!dev_priv->chv_phy_assert[phy])
1672 reg = _CHV_CMN_DW0_CH0;
1674 reg = _CHV_CMN_DW6_CH1;
1676 vlv_dpio_get(dev_priv);
1677 val = vlv_dpio_read(dev_priv, pipe, reg);
1678 vlv_dpio_put(dev_priv);
1681 * This assumes !override is only used when the port is disabled.
1682 * All lanes should power down even without the override when
1683 * the port is disabled.
1685 if (!override || mask == 0xf) {
1686 expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1688 * If CH1 common lane is not active anymore
1689 * (eg. for pipe B DPLL) the entire channel will
1690 * shut down, which causes the common lane registers
1691 * to read as 0. That means we can't actually check
1692 * the lane power down status bits, but as the entire
1693 * register reads as 0 it's a good indication that the
1694 * channel is indeed entirely powered down.
1696 if (ch == DPIO_CH1 && val == 0)
1698 } else if (mask != 0x0) {
1699 expected = DPIO_ANYDL_POWERDOWN;
1705 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0;
1707 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
1708 actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1710 drm_WARN(&dev_priv->drm, actual != expected,
1711 "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
1712 !!(actual & DPIO_ALLDL_POWERDOWN),
1713 !!(actual & DPIO_ANYDL_POWERDOWN),
1714 !!(expected & DPIO_ALLDL_POWERDOWN),
1715 !!(expected & DPIO_ANYDL_POWERDOWN),
1719 bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1720 enum dpio_channel ch, bool override)
1722 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1725 mutex_lock(&power_domains->lock);
1727 was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1729 if (override == was_override)
1733 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1735 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1737 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1738 dev_priv->chv_phy_control);
1740 drm_dbg_kms(&dev_priv->drm,
1741 "Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
1742 phy, ch, dev_priv->chv_phy_control);
1744 assert_chv_phy_status(dev_priv);
1747 mutex_unlock(&power_domains->lock);
1749 return was_override;
1752 void chv_phy_powergate_lanes(struct intel_encoder *encoder,
1753 bool override, unsigned int mask)
1755 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1756 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1757 enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(encoder));
1758 enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(encoder));
1760 mutex_lock(&power_domains->lock);
1762 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
1763 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
1766 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1768 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1770 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1771 dev_priv->chv_phy_control);
1773 drm_dbg_kms(&dev_priv->drm,
1774 "Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
1775 phy, ch, mask, dev_priv->chv_phy_control);
1777 assert_chv_phy_status(dev_priv);
1779 assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
1781 mutex_unlock(&power_domains->lock);
1784 static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
1785 struct i915_power_well *power_well)
1787 enum pipe pipe = PIPE_A;
1791 vlv_punit_get(dev_priv);
1793 state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe);
1795 * We only ever set the power-on and power-gate states, anything
1796 * else is unexpected.
1798 drm_WARN_ON(&dev_priv->drm, state != DP_SSS_PWR_ON(pipe) &&
1799 state != DP_SSS_PWR_GATE(pipe));
1800 enabled = state == DP_SSS_PWR_ON(pipe);
1803 * A transient state at this point would mean some unexpected party
1804 * is poking at the power controls too.
1806 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe);
1807 drm_WARN_ON(&dev_priv->drm, ctrl << 16 != state);
1809 vlv_punit_put(dev_priv);
1814 static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
1815 struct i915_power_well *power_well,
1818 enum pipe pipe = PIPE_A;
1822 state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
1824 vlv_punit_get(dev_priv);
1827 ((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state)
1832 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
1833 ctrl &= ~DP_SSC_MASK(pipe);
1834 ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
1835 vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl);
1837 if (wait_for(COND, 100))
1838 drm_err(&dev_priv->drm,
1839 "timeout setting power well state %08x (%08x)\n",
1841 vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM));
1846 vlv_punit_put(dev_priv);
1849 static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
1850 struct i915_power_well *power_well)
1852 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1853 dev_priv->chv_phy_control);
1856 static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
1857 struct i915_power_well *power_well)
1859 chv_set_pipe_power_well(dev_priv, power_well, true);
1861 vlv_display_power_well_init(dev_priv);
1864 static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
1865 struct i915_power_well *power_well)
1867 vlv_display_power_well_deinit(dev_priv);
1869 chv_set_pipe_power_well(dev_priv, power_well, false);
1872 static u64 __async_put_domains_mask(struct i915_power_domains *power_domains)
1874 return power_domains->async_put_domains[0] |
1875 power_domains->async_put_domains[1];
1878 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
1881 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
1883 return !WARN_ON(power_domains->async_put_domains[0] &
1884 power_domains->async_put_domains[1]);
1888 __async_put_domains_state_ok(struct i915_power_domains *power_domains)
1890 enum intel_display_power_domain domain;
1893 err |= !assert_async_put_domain_masks_disjoint(power_domains);
1894 err |= WARN_ON(!!power_domains->async_put_wakeref !=
1895 !!__async_put_domains_mask(power_domains));
1897 for_each_power_domain(domain, __async_put_domains_mask(power_domains))
1898 err |= WARN_ON(power_domains->domain_use_count[domain] != 1);
1903 static void print_power_domains(struct i915_power_domains *power_domains,
1904 const char *prefix, u64 mask)
1906 struct drm_i915_private *i915 = container_of(power_domains,
1907 struct drm_i915_private,
1909 enum intel_display_power_domain domain;
1911 drm_dbg(&i915->drm, "%s (%lu):\n", prefix, hweight64(mask));
1912 for_each_power_domain(domain, mask)
1913 drm_dbg(&i915->drm, "%s use_count %d\n",
1914 intel_display_power_domain_str(domain),
1915 power_domains->domain_use_count[domain]);
1919 print_async_put_domains_state(struct i915_power_domains *power_domains)
1921 struct drm_i915_private *i915 = container_of(power_domains,
1922 struct drm_i915_private,
1925 drm_dbg(&i915->drm, "async_put_wakeref %u\n",
1926 power_domains->async_put_wakeref);
1928 print_power_domains(power_domains, "async_put_domains[0]",
1929 power_domains->async_put_domains[0]);
1930 print_power_domains(power_domains, "async_put_domains[1]",
1931 power_domains->async_put_domains[1]);
1935 verify_async_put_domains_state(struct i915_power_domains *power_domains)
1937 if (!__async_put_domains_state_ok(power_domains))
1938 print_async_put_domains_state(power_domains);
1944 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
1949 verify_async_put_domains_state(struct i915_power_domains *power_domains)
1953 #endif /* CONFIG_DRM_I915_DEBUG_RUNTIME_PM */
1955 static u64 async_put_domains_mask(struct i915_power_domains *power_domains)
1957 assert_async_put_domain_masks_disjoint(power_domains);
1959 return __async_put_domains_mask(power_domains);
1963 async_put_domains_clear_domain(struct i915_power_domains *power_domains,
1964 enum intel_display_power_domain domain)
1966 assert_async_put_domain_masks_disjoint(power_domains);
1968 power_domains->async_put_domains[0] &= ~BIT_ULL(domain);
1969 power_domains->async_put_domains[1] &= ~BIT_ULL(domain);
1973 intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv,
1974 enum intel_display_power_domain domain)
1976 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1979 if (!(async_put_domains_mask(power_domains) & BIT_ULL(domain)))
1982 async_put_domains_clear_domain(power_domains, domain);
1986 if (async_put_domains_mask(power_domains))
1989 cancel_delayed_work(&power_domains->async_put_work);
1990 intel_runtime_pm_put_raw(&dev_priv->runtime_pm,
1991 fetch_and_zero(&power_domains->async_put_wakeref));
1993 verify_async_put_domains_state(power_domains);
1999 __intel_display_power_get_domain(struct drm_i915_private *dev_priv,
2000 enum intel_display_power_domain domain)
2002 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2003 struct i915_power_well *power_well;
2005 if (intel_display_power_grab_async_put_ref(dev_priv, domain))
2008 for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain))
2009 intel_power_well_get(dev_priv, power_well);
2011 power_domains->domain_use_count[domain]++;
2015 * intel_display_power_get - grab a power domain reference
2016 * @dev_priv: i915 device instance
2017 * @domain: power domain to reference
2019 * This function grabs a power domain reference for @domain and ensures that the
2020 * power domain and all its parents are powered up. Therefore users should only
2021 * grab a reference to the innermost power domain they need.
2023 * Any power domain reference obtained by this function must have a symmetric
2024 * call to intel_display_power_put() to release the reference again.
2026 intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
2027 enum intel_display_power_domain domain)
2029 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2030 intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2032 mutex_lock(&power_domains->lock);
2033 __intel_display_power_get_domain(dev_priv, domain);
2034 mutex_unlock(&power_domains->lock);
2040 * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
2041 * @dev_priv: i915 device instance
2042 * @domain: power domain to reference
2044 * This function grabs a power domain reference for @domain and ensures that the
2045 * power domain and all its parents are powered up. Therefore users should only
2046 * grab a reference to the innermost power domain they need.
2048 * Any power domain reference obtained by this function must have a symmetric
2049 * call to intel_display_power_put() to release the reference again.
2052 intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
2053 enum intel_display_power_domain domain)
2055 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2056 intel_wakeref_t wakeref;
2059 wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm);
2063 mutex_lock(&power_domains->lock);
2065 if (__intel_display_power_is_enabled(dev_priv, domain)) {
2066 __intel_display_power_get_domain(dev_priv, domain);
2072 mutex_unlock(&power_domains->lock);
2075 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2083 __intel_display_power_put_domain(struct drm_i915_private *dev_priv,
2084 enum intel_display_power_domain domain)
2086 struct i915_power_domains *power_domains;
2087 struct i915_power_well *power_well;
2088 const char *name = intel_display_power_domain_str(domain);
2090 power_domains = &dev_priv->power_domains;
2092 drm_WARN(&dev_priv->drm, !power_domains->domain_use_count[domain],
2093 "Use count on domain %s is already zero\n",
2095 drm_WARN(&dev_priv->drm,
2096 async_put_domains_mask(power_domains) & BIT_ULL(domain),
2097 "Async disabling of domain %s is pending\n",
2100 power_domains->domain_use_count[domain]--;
2102 for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain))
2103 intel_power_well_put(dev_priv, power_well);
2106 static void __intel_display_power_put(struct drm_i915_private *dev_priv,
2107 enum intel_display_power_domain domain)
2109 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2111 mutex_lock(&power_domains->lock);
2112 __intel_display_power_put_domain(dev_priv, domain);
2113 mutex_unlock(&power_domains->lock);
2117 * intel_display_power_put_unchecked - release an unchecked power domain reference
2118 * @dev_priv: i915 device instance
2119 * @domain: power domain to reference
2121 * This function drops the power domain reference obtained by
2122 * intel_display_power_get() and might power down the corresponding hardware
2123 * block right away if this is the last reference.
2125 * This function exists only for historical reasons and should be avoided in
2126 * new code, as the correctness of its use cannot be checked. Always use
2127 * intel_display_power_put() instead.
2129 void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
2130 enum intel_display_power_domain domain)
2132 __intel_display_power_put(dev_priv, domain);
2133 intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
2137 queue_async_put_domains_work(struct i915_power_domains *power_domains,
2138 intel_wakeref_t wakeref)
2140 WARN_ON(power_domains->async_put_wakeref);
2141 power_domains->async_put_wakeref = wakeref;
2142 WARN_ON(!queue_delayed_work(system_unbound_wq,
2143 &power_domains->async_put_work,
2144 msecs_to_jiffies(100)));
2148 release_async_put_domains(struct i915_power_domains *power_domains, u64 mask)
2150 struct drm_i915_private *dev_priv =
2151 container_of(power_domains, struct drm_i915_private,
2153 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
2154 enum intel_display_power_domain domain;
2155 intel_wakeref_t wakeref;
2158 * The caller must hold already raw wakeref, upgrade that to a proper
2159 * wakeref to make the state checker happy about the HW access during
2160 * power well disabling.
2162 assert_rpm_raw_wakeref_held(rpm);
2163 wakeref = intel_runtime_pm_get(rpm);
2165 for_each_power_domain(domain, mask) {
2166 /* Clear before put, so put's sanity check is happy. */
2167 async_put_domains_clear_domain(power_domains, domain);
2168 __intel_display_power_put_domain(dev_priv, domain);
2171 intel_runtime_pm_put(rpm, wakeref);
2175 intel_display_power_put_async_work(struct work_struct *work)
2177 struct drm_i915_private *dev_priv =
2178 container_of(work, struct drm_i915_private,
2179 power_domains.async_put_work.work);
2180 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2181 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
2182 intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm);
2183 intel_wakeref_t old_work_wakeref = 0;
2185 mutex_lock(&power_domains->lock);
2188 * Bail out if all the domain refs pending to be released were grabbed
2189 * by subsequent gets or a flush_work.
2191 old_work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
2192 if (!old_work_wakeref)
2195 release_async_put_domains(power_domains,
2196 power_domains->async_put_domains[0]);
2198 /* Requeue the work if more domains were async put meanwhile. */
2199 if (power_domains->async_put_domains[1]) {
2200 power_domains->async_put_domains[0] =
2201 fetch_and_zero(&power_domains->async_put_domains[1]);
2202 queue_async_put_domains_work(power_domains,
2203 fetch_and_zero(&new_work_wakeref));
2207 verify_async_put_domains_state(power_domains);
2209 mutex_unlock(&power_domains->lock);
2211 if (old_work_wakeref)
2212 intel_runtime_pm_put_raw(rpm, old_work_wakeref);
2213 if (new_work_wakeref)
2214 intel_runtime_pm_put_raw(rpm, new_work_wakeref);
2218 * intel_display_power_put_async - release a power domain reference asynchronously
2219 * @i915: i915 device instance
2220 * @domain: power domain to reference
2221 * @wakeref: wakeref acquired for the reference that is being released
2223 * This function drops the power domain reference obtained by
2224 * intel_display_power_get*() and schedules a work to power down the
2225 * corresponding hardware block if this is the last reference.
2227 void __intel_display_power_put_async(struct drm_i915_private *i915,
2228 enum intel_display_power_domain domain,
2229 intel_wakeref_t wakeref)
2231 struct i915_power_domains *power_domains = &i915->power_domains;
2232 struct intel_runtime_pm *rpm = &i915->runtime_pm;
2233 intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm);
2235 mutex_lock(&power_domains->lock);
2237 if (power_domains->domain_use_count[domain] > 1) {
2238 __intel_display_power_put_domain(i915, domain);
2243 drm_WARN_ON(&i915->drm, power_domains->domain_use_count[domain] != 1);
2245 /* Let a pending work requeue itself or queue a new one. */
2246 if (power_domains->async_put_wakeref) {
2247 power_domains->async_put_domains[1] |= BIT_ULL(domain);
2249 power_domains->async_put_domains[0] |= BIT_ULL(domain);
2250 queue_async_put_domains_work(power_domains,
2251 fetch_and_zero(&work_wakeref));
2255 verify_async_put_domains_state(power_domains);
2257 mutex_unlock(&power_domains->lock);
2260 intel_runtime_pm_put_raw(rpm, work_wakeref);
2262 intel_runtime_pm_put(rpm, wakeref);
2266 * intel_display_power_flush_work - flushes the async display power disabling work
2267 * @i915: i915 device instance
2269 * Flushes any pending work that was scheduled by a preceding
2270 * intel_display_power_put_async() call, completing the disabling of the
2271 * corresponding power domains.
2273 * Note that the work handler function may still be running after this
2274 * function returns; to ensure that the work handler isn't running use
2275 * intel_display_power_flush_work_sync() instead.
2277 void intel_display_power_flush_work(struct drm_i915_private *i915)
2279 struct i915_power_domains *power_domains = &i915->power_domains;
2280 intel_wakeref_t work_wakeref;
2282 mutex_lock(&power_domains->lock);
2284 work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
2288 release_async_put_domains(power_domains,
2289 async_put_domains_mask(power_domains));
2290 cancel_delayed_work(&power_domains->async_put_work);
2293 verify_async_put_domains_state(power_domains);
2295 mutex_unlock(&power_domains->lock);
2298 intel_runtime_pm_put_raw(&i915->runtime_pm, work_wakeref);
2302 * intel_display_power_flush_work_sync - flushes and syncs the async display power disabling work
2303 * @i915: i915 device instance
2305 * Like intel_display_power_flush_work(), but also ensure that the work
2306 * handler function is not running any more when this function returns.
2309 intel_display_power_flush_work_sync(struct drm_i915_private *i915)
2311 struct i915_power_domains *power_domains = &i915->power_domains;
2313 intel_display_power_flush_work(i915);
2314 cancel_delayed_work_sync(&power_domains->async_put_work);
2316 verify_async_put_domains_state(power_domains);
2318 drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref);
2321 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
2323 * intel_display_power_put - release a power domain reference
2324 * @dev_priv: i915 device instance
2325 * @domain: power domain to reference
2326 * @wakeref: wakeref acquired for the reference that is being released
2328 * This function drops the power domain reference obtained by
2329 * intel_display_power_get() and might power down the corresponding hardware
2330 * block right away if this is the last reference.
2332 void intel_display_power_put(struct drm_i915_private *dev_priv,
2333 enum intel_display_power_domain domain,
2334 intel_wakeref_t wakeref)
2336 __intel_display_power_put(dev_priv, domain);
2337 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2341 #define I830_PIPES_POWER_DOMAINS ( \
2342 BIT_ULL(POWER_DOMAIN_PIPE_A) | \
2343 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2344 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
2345 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2346 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2347 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2348 BIT_ULL(POWER_DOMAIN_INIT))
2350 #define VLV_DISPLAY_POWER_DOMAINS ( \
2351 BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) | \
2352 BIT_ULL(POWER_DOMAIN_PIPE_A) | \
2353 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2354 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
2355 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2356 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2357 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2358 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2359 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2360 BIT_ULL(POWER_DOMAIN_PORT_DSI) | \
2361 BIT_ULL(POWER_DOMAIN_PORT_CRT) | \
2362 BIT_ULL(POWER_DOMAIN_VGA) | \
2363 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2364 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2365 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2366 BIT_ULL(POWER_DOMAIN_GMBUS) | \
2367 BIT_ULL(POWER_DOMAIN_INIT))
2369 #define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \
2370 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2371 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2372 BIT_ULL(POWER_DOMAIN_PORT_CRT) | \
2373 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2374 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2375 BIT_ULL(POWER_DOMAIN_INIT))
2377 #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
2378 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2379 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2380 BIT_ULL(POWER_DOMAIN_INIT))
2382 #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
2383 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2384 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2385 BIT_ULL(POWER_DOMAIN_INIT))
2387 #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
2388 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2389 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2390 BIT_ULL(POWER_DOMAIN_INIT))
2392 #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
2393 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2394 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2395 BIT_ULL(POWER_DOMAIN_INIT))
2397 #define CHV_DISPLAY_POWER_DOMAINS ( \
2398 BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) | \
2399 BIT_ULL(POWER_DOMAIN_PIPE_A) | \
2400 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2401 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2402 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
2403 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2404 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2405 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2406 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2407 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2408 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2409 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2410 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2411 BIT_ULL(POWER_DOMAIN_PORT_DSI) | \
2412 BIT_ULL(POWER_DOMAIN_VGA) | \
2413 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2414 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2415 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2416 BIT_ULL(POWER_DOMAIN_AUX_D) | \
2417 BIT_ULL(POWER_DOMAIN_GMBUS) | \
2418 BIT_ULL(POWER_DOMAIN_INIT))
2420 #define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \
2421 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2422 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2423 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2424 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2425 BIT_ULL(POWER_DOMAIN_INIT))
2427 #define CHV_DPIO_CMN_D_POWER_DOMAINS ( \
2428 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2429 BIT_ULL(POWER_DOMAIN_AUX_D) | \
2430 BIT_ULL(POWER_DOMAIN_INIT))
2432 #define HSW_DISPLAY_POWER_DOMAINS ( \
2433 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2434 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2435 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
2436 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2437 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2438 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2439 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2440 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2441 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2442 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2443 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2444 BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \
2445 BIT_ULL(POWER_DOMAIN_VGA) | \
2446 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2447 BIT_ULL(POWER_DOMAIN_INIT))
2449 #define BDW_DISPLAY_POWER_DOMAINS ( \
2450 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2451 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2452 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2453 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2454 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2455 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2456 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2457 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2458 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2459 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2460 BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \
2461 BIT_ULL(POWER_DOMAIN_VGA) | \
2462 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2463 BIT_ULL(POWER_DOMAIN_INIT))
2465 #define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
2466 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2467 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2468 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2469 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2470 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2471 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2472 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2473 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2474 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2475 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2476 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \
2477 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2478 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2479 BIT_ULL(POWER_DOMAIN_AUX_D) | \
2480 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2481 BIT_ULL(POWER_DOMAIN_VGA) | \
2482 BIT_ULL(POWER_DOMAIN_INIT))
2483 #define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS ( \
2484 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \
2485 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) | \
2486 BIT_ULL(POWER_DOMAIN_INIT))
2487 #define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \
2488 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \
2489 BIT_ULL(POWER_DOMAIN_INIT))
2490 #define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \
2491 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \
2492 BIT_ULL(POWER_DOMAIN_INIT))
2493 #define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS ( \
2494 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \
2495 BIT_ULL(POWER_DOMAIN_INIT))
2496 #define SKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
2497 SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
2498 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
2499 BIT_ULL(POWER_DOMAIN_MODESET) | \
2500 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2501 BIT_ULL(POWER_DOMAIN_INIT))
2503 #define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
2504 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2505 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2506 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2507 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2508 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2509 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2510 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2511 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2512 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2513 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2514 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2515 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2516 BIT_ULL(POWER_DOMAIN_VGA) | \
2517 BIT_ULL(POWER_DOMAIN_INIT))
2518 #define BXT_DISPLAY_DC_OFF_POWER_DOMAINS ( \
2519 BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
2520 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
2521 BIT_ULL(POWER_DOMAIN_MODESET) | \
2522 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2523 BIT_ULL(POWER_DOMAIN_GMBUS) | \
2524 BIT_ULL(POWER_DOMAIN_INIT))
2525 #define BXT_DPIO_CMN_A_POWER_DOMAINS ( \
2526 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \
2527 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2528 BIT_ULL(POWER_DOMAIN_INIT))
2529 #define BXT_DPIO_CMN_BC_POWER_DOMAINS ( \
2530 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2531 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2532 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2533 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2534 BIT_ULL(POWER_DOMAIN_INIT))
2536 #define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
2537 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2538 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2539 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2540 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2541 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2542 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2543 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2544 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2545 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2546 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2547 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2548 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2549 BIT_ULL(POWER_DOMAIN_VGA) | \
2550 BIT_ULL(POWER_DOMAIN_INIT))
2551 #define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS ( \
2552 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
2553 #define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \
2554 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
2555 #define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \
2556 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
2557 #define GLK_DPIO_CMN_A_POWER_DOMAINS ( \
2558 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \
2559 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2560 BIT_ULL(POWER_DOMAIN_INIT))
2561 #define GLK_DPIO_CMN_B_POWER_DOMAINS ( \
2562 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2563 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2564 BIT_ULL(POWER_DOMAIN_INIT))
2565 #define GLK_DPIO_CMN_C_POWER_DOMAINS ( \
2566 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2567 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2568 BIT_ULL(POWER_DOMAIN_INIT))
2569 #define GLK_DISPLAY_AUX_A_POWER_DOMAINS ( \
2570 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2571 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \
2572 BIT_ULL(POWER_DOMAIN_INIT))
2573 #define GLK_DISPLAY_AUX_B_POWER_DOMAINS ( \
2574 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2575 BIT_ULL(POWER_DOMAIN_INIT))
2576 #define GLK_DISPLAY_AUX_C_POWER_DOMAINS ( \
2577 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2578 BIT_ULL(POWER_DOMAIN_INIT))
2579 #define GLK_DISPLAY_DC_OFF_POWER_DOMAINS ( \
2580 GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
2581 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
2582 BIT_ULL(POWER_DOMAIN_MODESET) | \
2583 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2584 BIT_ULL(POWER_DOMAIN_GMBUS) | \
2585 BIT_ULL(POWER_DOMAIN_INIT))
2587 #define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
2588 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2589 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2590 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2591 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2592 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2593 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2594 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2595 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2596 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2597 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2598 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \
2599 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2600 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2601 BIT_ULL(POWER_DOMAIN_AUX_D) | \
2602 BIT_ULL(POWER_DOMAIN_AUX_F) | \
2603 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2604 BIT_ULL(POWER_DOMAIN_VGA) | \
2605 BIT_ULL(POWER_DOMAIN_INIT))
2606 #define CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS ( \
2607 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \
2608 BIT_ULL(POWER_DOMAIN_INIT))
2609 #define CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS ( \
2610 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \
2611 BIT_ULL(POWER_DOMAIN_INIT))
2612 #define CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS ( \
2613 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \
2614 BIT_ULL(POWER_DOMAIN_INIT))
2615 #define CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS ( \
2616 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \
2617 BIT_ULL(POWER_DOMAIN_INIT))
2618 #define CNL_DISPLAY_AUX_A_POWER_DOMAINS ( \
2619 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2620 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \
2621 BIT_ULL(POWER_DOMAIN_INIT))
2622 #define CNL_DISPLAY_AUX_B_POWER_DOMAINS ( \
2623 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2624 BIT_ULL(POWER_DOMAIN_INIT))
2625 #define CNL_DISPLAY_AUX_C_POWER_DOMAINS ( \
2626 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2627 BIT_ULL(POWER_DOMAIN_INIT))
2628 #define CNL_DISPLAY_AUX_D_POWER_DOMAINS ( \
2629 BIT_ULL(POWER_DOMAIN_AUX_D) | \
2630 BIT_ULL(POWER_DOMAIN_INIT))
2631 #define CNL_DISPLAY_AUX_F_POWER_DOMAINS ( \
2632 BIT_ULL(POWER_DOMAIN_AUX_F) | \
2633 BIT_ULL(POWER_DOMAIN_INIT))
2634 #define CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS ( \
2635 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) | \
2636 BIT_ULL(POWER_DOMAIN_INIT))
2637 #define CNL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
2638 CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
2639 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
2640 BIT_ULL(POWER_DOMAIN_MODESET) | \
2641 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2642 BIT_ULL(POWER_DOMAIN_INIT))
2645 * ICL PW_0/PG_0 domains (HW/DMC control):
2647 * - clocks except port PLL
2648 * - central power except FBC
2649 * - shared functions except pipe interrupts, pipe MBUS, DBUF registers
2650 * ICL PW_1/PG_1 domains (HW/DMC control):
2652 * - PIPE_A and its planes, except VGA
2653 * - transcoder EDP + PSR
2658 #define ICL_PW_4_POWER_DOMAINS ( \
2659 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2660 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2661 BIT_ULL(POWER_DOMAIN_INIT))
2663 #define ICL_PW_3_POWER_DOMAINS ( \
2664 ICL_PW_4_POWER_DOMAINS | \
2665 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2666 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2667 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2668 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2669 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2670 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2671 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2672 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2673 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \
2674 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \
2675 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2676 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2677 BIT_ULL(POWER_DOMAIN_AUX_D) | \
2678 BIT_ULL(POWER_DOMAIN_AUX_E) | \
2679 BIT_ULL(POWER_DOMAIN_AUX_F) | \
2680 BIT_ULL(POWER_DOMAIN_AUX_C_TBT) | \
2681 BIT_ULL(POWER_DOMAIN_AUX_D_TBT) | \
2682 BIT_ULL(POWER_DOMAIN_AUX_E_TBT) | \
2683 BIT_ULL(POWER_DOMAIN_AUX_F_TBT) | \
2684 BIT_ULL(POWER_DOMAIN_VGA) | \
2685 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2686 BIT_ULL(POWER_DOMAIN_INIT))
2689 * - KVMR (HW control)
2691 #define ICL_PW_2_POWER_DOMAINS ( \
2692 ICL_PW_3_POWER_DOMAINS | \
2693 BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) | \
2694 BIT_ULL(POWER_DOMAIN_INIT))
2696 * - KVMR (HW control)
2698 #define ICL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
2699 ICL_PW_2_POWER_DOMAINS | \
2700 BIT_ULL(POWER_DOMAIN_MODESET) | \
2701 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2702 BIT_ULL(POWER_DOMAIN_DPLL_DC_OFF) | \
2703 BIT_ULL(POWER_DOMAIN_INIT))
2705 #define ICL_DDI_IO_A_POWER_DOMAINS ( \
2706 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
2707 #define ICL_DDI_IO_B_POWER_DOMAINS ( \
2708 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
2709 #define ICL_DDI_IO_C_POWER_DOMAINS ( \
2710 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
2711 #define ICL_DDI_IO_D_POWER_DOMAINS ( \
2712 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO))
2713 #define ICL_DDI_IO_E_POWER_DOMAINS ( \
2714 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO))
2715 #define ICL_DDI_IO_F_POWER_DOMAINS ( \
2716 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO))
2718 #define ICL_AUX_A_IO_POWER_DOMAINS ( \
2719 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \
2720 BIT_ULL(POWER_DOMAIN_AUX_A))
2721 #define ICL_AUX_B_IO_POWER_DOMAINS ( \
2722 BIT_ULL(POWER_DOMAIN_AUX_B))
2723 #define ICL_AUX_C_TC1_IO_POWER_DOMAINS ( \
2724 BIT_ULL(POWER_DOMAIN_AUX_C))
2725 #define ICL_AUX_D_TC2_IO_POWER_DOMAINS ( \
2726 BIT_ULL(POWER_DOMAIN_AUX_D))
2727 #define ICL_AUX_E_TC3_IO_POWER_DOMAINS ( \
2728 BIT_ULL(POWER_DOMAIN_AUX_E))
2729 #define ICL_AUX_F_TC4_IO_POWER_DOMAINS ( \
2730 BIT_ULL(POWER_DOMAIN_AUX_F))
2731 #define ICL_AUX_C_TBT1_IO_POWER_DOMAINS ( \
2732 BIT_ULL(POWER_DOMAIN_AUX_C_TBT))
2733 #define ICL_AUX_D_TBT2_IO_POWER_DOMAINS ( \
2734 BIT_ULL(POWER_DOMAIN_AUX_D_TBT))
2735 #define ICL_AUX_E_TBT3_IO_POWER_DOMAINS ( \
2736 BIT_ULL(POWER_DOMAIN_AUX_E_TBT))
2737 #define ICL_AUX_F_TBT4_IO_POWER_DOMAINS ( \
2738 BIT_ULL(POWER_DOMAIN_AUX_F_TBT))
2740 #define TGL_PW_5_POWER_DOMAINS ( \
2741 BIT_ULL(POWER_DOMAIN_PIPE_D) | \
2742 BIT_ULL(POWER_DOMAIN_TRANSCODER_D) | \
2743 BIT_ULL(POWER_DOMAIN_PIPE_D_PANEL_FITTER) | \
2744 BIT_ULL(POWER_DOMAIN_INIT))
2746 #define TGL_PW_4_POWER_DOMAINS ( \
2747 TGL_PW_5_POWER_DOMAINS | \
2748 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2749 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2750 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2751 BIT_ULL(POWER_DOMAIN_INIT))
2753 #define TGL_PW_3_POWER_DOMAINS ( \
2754 TGL_PW_4_POWER_DOMAINS | \
2755 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2756 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2757 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2758 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2759 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \
2760 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \
2761 BIT_ULL(POWER_DOMAIN_PORT_DDI_G_LANES) | \
2762 BIT_ULL(POWER_DOMAIN_PORT_DDI_H_LANES) | \
2763 BIT_ULL(POWER_DOMAIN_PORT_DDI_I_LANES) | \
2764 BIT_ULL(POWER_DOMAIN_AUX_D) | \
2765 BIT_ULL(POWER_DOMAIN_AUX_E) | \
2766 BIT_ULL(POWER_DOMAIN_AUX_F) | \
2767 BIT_ULL(POWER_DOMAIN_AUX_G) | \
2768 BIT_ULL(POWER_DOMAIN_AUX_H) | \
2769 BIT_ULL(POWER_DOMAIN_AUX_I) | \
2770 BIT_ULL(POWER_DOMAIN_AUX_D_TBT) | \
2771 BIT_ULL(POWER_DOMAIN_AUX_E_TBT) | \
2772 BIT_ULL(POWER_DOMAIN_AUX_F_TBT) | \
2773 BIT_ULL(POWER_DOMAIN_AUX_G_TBT) | \
2774 BIT_ULL(POWER_DOMAIN_AUX_H_TBT) | \
2775 BIT_ULL(POWER_DOMAIN_AUX_I_TBT) | \
2776 BIT_ULL(POWER_DOMAIN_VGA) | \
2777 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2778 BIT_ULL(POWER_DOMAIN_INIT))
2780 #define TGL_PW_2_POWER_DOMAINS ( \
2781 TGL_PW_3_POWER_DOMAINS | \
2782 BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) | \
2783 BIT_ULL(POWER_DOMAIN_INIT))
2785 #define TGL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
2786 TGL_PW_3_POWER_DOMAINS | \
2787 BIT_ULL(POWER_DOMAIN_MODESET) | \
2788 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2789 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2790 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2791 BIT_ULL(POWER_DOMAIN_INIT))
2793 #define TGL_DDI_IO_D_TC1_POWER_DOMAINS ( \
2794 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO))
2795 #define TGL_DDI_IO_E_TC2_POWER_DOMAINS ( \
2796 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO))
2797 #define TGL_DDI_IO_F_TC3_POWER_DOMAINS ( \
2798 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO))
2799 #define TGL_DDI_IO_G_TC4_POWER_DOMAINS ( \
2800 BIT_ULL(POWER_DOMAIN_PORT_DDI_G_IO))
2801 #define TGL_DDI_IO_H_TC5_POWER_DOMAINS ( \
2802 BIT_ULL(POWER_DOMAIN_PORT_DDI_H_IO))
2803 #define TGL_DDI_IO_I_TC6_POWER_DOMAINS ( \
2804 BIT_ULL(POWER_DOMAIN_PORT_DDI_I_IO))
2806 #define TGL_AUX_A_IO_POWER_DOMAINS ( \
2807 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \
2808 BIT_ULL(POWER_DOMAIN_AUX_A))
2809 #define TGL_AUX_B_IO_POWER_DOMAINS ( \
2810 BIT_ULL(POWER_DOMAIN_AUX_B))
2811 #define TGL_AUX_C_IO_POWER_DOMAINS ( \
2812 BIT_ULL(POWER_DOMAIN_AUX_C))
2813 #define TGL_AUX_D_TC1_IO_POWER_DOMAINS ( \
2814 BIT_ULL(POWER_DOMAIN_AUX_D))
2815 #define TGL_AUX_E_TC2_IO_POWER_DOMAINS ( \
2816 BIT_ULL(POWER_DOMAIN_AUX_E))
2817 #define TGL_AUX_F_TC3_IO_POWER_DOMAINS ( \
2818 BIT_ULL(POWER_DOMAIN_AUX_F))
2819 #define TGL_AUX_G_TC4_IO_POWER_DOMAINS ( \
2820 BIT_ULL(POWER_DOMAIN_AUX_G))
2821 #define TGL_AUX_H_TC5_IO_POWER_DOMAINS ( \
2822 BIT_ULL(POWER_DOMAIN_AUX_H))
2823 #define TGL_AUX_I_TC6_IO_POWER_DOMAINS ( \
2824 BIT_ULL(POWER_DOMAIN_AUX_I))
2825 #define TGL_AUX_D_TBT1_IO_POWER_DOMAINS ( \
2826 BIT_ULL(POWER_DOMAIN_AUX_D_TBT))
2827 #define TGL_AUX_E_TBT2_IO_POWER_DOMAINS ( \
2828 BIT_ULL(POWER_DOMAIN_AUX_E_TBT))
2829 #define TGL_AUX_F_TBT3_IO_POWER_DOMAINS ( \
2830 BIT_ULL(POWER_DOMAIN_AUX_F_TBT))
2831 #define TGL_AUX_G_TBT4_IO_POWER_DOMAINS ( \
2832 BIT_ULL(POWER_DOMAIN_AUX_G_TBT))
2833 #define TGL_AUX_H_TBT5_IO_POWER_DOMAINS ( \
2834 BIT_ULL(POWER_DOMAIN_AUX_H_TBT))
2835 #define TGL_AUX_I_TBT6_IO_POWER_DOMAINS ( \
2836 BIT_ULL(POWER_DOMAIN_AUX_I_TBT))
2838 static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
2839 .sync_hw = i9xx_power_well_sync_hw_noop,
2840 .enable = i9xx_always_on_power_well_noop,
2841 .disable = i9xx_always_on_power_well_noop,
2842 .is_enabled = i9xx_always_on_power_well_enabled,
2845 static const struct i915_power_well_ops chv_pipe_power_well_ops = {
2846 .sync_hw = chv_pipe_power_well_sync_hw,
2847 .enable = chv_pipe_power_well_enable,
2848 .disable = chv_pipe_power_well_disable,
2849 .is_enabled = chv_pipe_power_well_enabled,
2852 static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
2853 .sync_hw = i9xx_power_well_sync_hw_noop,
2854 .enable = chv_dpio_cmn_power_well_enable,
2855 .disable = chv_dpio_cmn_power_well_disable,
2856 .is_enabled = vlv_power_well_enabled,
2859 static const struct i915_power_well_desc i9xx_always_on_power_well[] = {
2861 .name = "always-on",
2863 .domains = POWER_DOMAIN_MASK,
2864 .ops = &i9xx_always_on_power_well_ops,
2865 .id = DISP_PW_ID_NONE,
2869 static const struct i915_power_well_ops i830_pipes_power_well_ops = {
2870 .sync_hw = i830_pipes_power_well_sync_hw,
2871 .enable = i830_pipes_power_well_enable,
2872 .disable = i830_pipes_power_well_disable,
2873 .is_enabled = i830_pipes_power_well_enabled,
2876 static const struct i915_power_well_desc i830_power_wells[] = {
2878 .name = "always-on",
2880 .domains = POWER_DOMAIN_MASK,
2881 .ops = &i9xx_always_on_power_well_ops,
2882 .id = DISP_PW_ID_NONE,
2886 .domains = I830_PIPES_POWER_DOMAINS,
2887 .ops = &i830_pipes_power_well_ops,
2888 .id = DISP_PW_ID_NONE,
2892 static const struct i915_power_well_ops hsw_power_well_ops = {
2893 .sync_hw = hsw_power_well_sync_hw,
2894 .enable = hsw_power_well_enable,
2895 .disable = hsw_power_well_disable,
2896 .is_enabled = hsw_power_well_enabled,
2899 static const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
2900 .sync_hw = i9xx_power_well_sync_hw_noop,
2901 .enable = gen9_dc_off_power_well_enable,
2902 .disable = gen9_dc_off_power_well_disable,
2903 .is_enabled = gen9_dc_off_power_well_enabled,
2906 static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = {
2907 .sync_hw = i9xx_power_well_sync_hw_noop,
2908 .enable = bxt_dpio_cmn_power_well_enable,
2909 .disable = bxt_dpio_cmn_power_well_disable,
2910 .is_enabled = bxt_dpio_cmn_power_well_enabled,
2913 static const struct i915_power_well_regs hsw_power_well_regs = {
2914 .bios = HSW_PWR_WELL_CTL1,
2915 .driver = HSW_PWR_WELL_CTL2,
2916 .kvmr = HSW_PWR_WELL_CTL3,
2917 .debug = HSW_PWR_WELL_CTL4,
2920 static const struct i915_power_well_desc hsw_power_wells[] = {
2922 .name = "always-on",
2924 .domains = POWER_DOMAIN_MASK,
2925 .ops = &i9xx_always_on_power_well_ops,
2926 .id = DISP_PW_ID_NONE,
2930 .domains = HSW_DISPLAY_POWER_DOMAINS,
2931 .ops = &hsw_power_well_ops,
2932 .id = HSW_DISP_PW_GLOBAL,
2934 .hsw.regs = &hsw_power_well_regs,
2935 .hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
2936 .hsw.has_vga = true,
2941 static const struct i915_power_well_desc bdw_power_wells[] = {
2943 .name = "always-on",
2945 .domains = POWER_DOMAIN_MASK,
2946 .ops = &i9xx_always_on_power_well_ops,
2947 .id = DISP_PW_ID_NONE,
2951 .domains = BDW_DISPLAY_POWER_DOMAINS,
2952 .ops = &hsw_power_well_ops,
2953 .id = HSW_DISP_PW_GLOBAL,
2955 .hsw.regs = &hsw_power_well_regs,
2956 .hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
2957 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2958 .hsw.has_vga = true,
2963 static const struct i915_power_well_ops vlv_display_power_well_ops = {
2964 .sync_hw = i9xx_power_well_sync_hw_noop,
2965 .enable = vlv_display_power_well_enable,
2966 .disable = vlv_display_power_well_disable,
2967 .is_enabled = vlv_power_well_enabled,
2970 static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
2971 .sync_hw = i9xx_power_well_sync_hw_noop,
2972 .enable = vlv_dpio_cmn_power_well_enable,
2973 .disable = vlv_dpio_cmn_power_well_disable,
2974 .is_enabled = vlv_power_well_enabled,
2977 static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
2978 .sync_hw = i9xx_power_well_sync_hw_noop,
2979 .enable = vlv_power_well_enable,
2980 .disable = vlv_power_well_disable,
2981 .is_enabled = vlv_power_well_enabled,
2984 static const struct i915_power_well_desc vlv_power_wells[] = {
2986 .name = "always-on",
2988 .domains = POWER_DOMAIN_MASK,
2989 .ops = &i9xx_always_on_power_well_ops,
2990 .id = DISP_PW_ID_NONE,
2994 .domains = VLV_DISPLAY_POWER_DOMAINS,
2995 .ops = &vlv_display_power_well_ops,
2996 .id = VLV_DISP_PW_DISP2D,
2998 .vlv.idx = PUNIT_PWGT_IDX_DISP2D,
3002 .name = "dpio-tx-b-01",
3003 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
3004 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
3005 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
3006 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
3007 .ops = &vlv_dpio_power_well_ops,
3008 .id = DISP_PW_ID_NONE,
3010 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_01,
3014 .name = "dpio-tx-b-23",
3015 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
3016 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
3017 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
3018 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
3019 .ops = &vlv_dpio_power_well_ops,
3020 .id = DISP_PW_ID_NONE,
3022 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_23,
3026 .name = "dpio-tx-c-01",
3027 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
3028 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
3029 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
3030 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
3031 .ops = &vlv_dpio_power_well_ops,
3032 .id = DISP_PW_ID_NONE,
3034 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_01,
3038 .name = "dpio-tx-c-23",
3039 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
3040 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
3041 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
3042 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
3043 .ops = &vlv_dpio_power_well_ops,
3044 .id = DISP_PW_ID_NONE,
3046 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_23,
3050 .name = "dpio-common",
3051 .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
3052 .ops = &vlv_dpio_cmn_power_well_ops,
3053 .id = VLV_DISP_PW_DPIO_CMN_BC,
3055 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
3060 static const struct i915_power_well_desc chv_power_wells[] = {
3062 .name = "always-on",
3064 .domains = POWER_DOMAIN_MASK,
3065 .ops = &i9xx_always_on_power_well_ops,
3066 .id = DISP_PW_ID_NONE,
3071 * Pipe A power well is the new disp2d well. Pipe B and C
3072 * power wells don't actually exist. Pipe A power well is
3073 * required for any pipe to work.
3075 .domains = CHV_DISPLAY_POWER_DOMAINS,
3076 .ops = &chv_pipe_power_well_ops,
3077 .id = DISP_PW_ID_NONE,
3080 .name = "dpio-common-bc",
3081 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
3082 .ops = &chv_dpio_cmn_power_well_ops,
3083 .id = VLV_DISP_PW_DPIO_CMN_BC,
3085 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
3089 .name = "dpio-common-d",
3090 .domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
3091 .ops = &chv_dpio_cmn_power_well_ops,
3092 .id = CHV_DISP_PW_DPIO_CMN_D,
3094 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_D,
3099 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
3100 enum i915_power_well_id power_well_id)
3102 struct i915_power_well *power_well;
3105 power_well = lookup_power_well(dev_priv, power_well_id);
3106 ret = power_well->desc->ops->is_enabled(dev_priv, power_well);
3111 static const struct i915_power_well_desc skl_power_wells[] = {
3113 .name = "always-on",
3115 .domains = POWER_DOMAIN_MASK,
3116 .ops = &i9xx_always_on_power_well_ops,
3117 .id = DISP_PW_ID_NONE,
3120 .name = "power well 1",
3121 /* Handled by the DMC firmware */
3124 .ops = &hsw_power_well_ops,
3125 .id = SKL_DISP_PW_1,
3127 .hsw.regs = &hsw_power_well_regs,
3128 .hsw.idx = SKL_PW_CTL_IDX_PW_1,
3129 .hsw.has_fuses = true,
3133 .name = "MISC IO power well",
3134 /* Handled by the DMC firmware */
3137 .ops = &hsw_power_well_ops,
3138 .id = SKL_DISP_PW_MISC_IO,
3140 .hsw.regs = &hsw_power_well_regs,
3141 .hsw.idx = SKL_PW_CTL_IDX_MISC_IO,
3146 .domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS,
3147 .ops = &gen9_dc_off_power_well_ops,
3148 .id = SKL_DISP_DC_OFF,
3151 .name = "power well 2",
3152 .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3153 .ops = &hsw_power_well_ops,
3154 .id = SKL_DISP_PW_2,
3156 .hsw.regs = &hsw_power_well_regs,
3157 .hsw.idx = SKL_PW_CTL_IDX_PW_2,
3158 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3159 .hsw.has_vga = true,
3160 .hsw.has_fuses = true,
3164 .name = "DDI A/E IO power well",
3165 .domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS,
3166 .ops = &hsw_power_well_ops,
3167 .id = DISP_PW_ID_NONE,
3169 .hsw.regs = &hsw_power_well_regs,
3170 .hsw.idx = SKL_PW_CTL_IDX_DDI_A_E,
3174 .name = "DDI B IO power well",
3175 .domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS,
3176 .ops = &hsw_power_well_ops,
3177 .id = DISP_PW_ID_NONE,
3179 .hsw.regs = &hsw_power_well_regs,
3180 .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
3184 .name = "DDI C IO power well",
3185 .domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS,
3186 .ops = &hsw_power_well_ops,
3187 .id = DISP_PW_ID_NONE,
3189 .hsw.regs = &hsw_power_well_regs,
3190 .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
3194 .name = "DDI D IO power well",
3195 .domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS,
3196 .ops = &hsw_power_well_ops,
3197 .id = DISP_PW_ID_NONE,
3199 .hsw.regs = &hsw_power_well_regs,
3200 .hsw.idx = SKL_PW_CTL_IDX_DDI_D,
3205 static const struct i915_power_well_desc bxt_power_wells[] = {
3207 .name = "always-on",
3209 .domains = POWER_DOMAIN_MASK,
3210 .ops = &i9xx_always_on_power_well_ops,
3211 .id = DISP_PW_ID_NONE,
3214 .name = "power well 1",
3215 /* Handled by the DMC firmware */
3218 .ops = &hsw_power_well_ops,
3219 .id = SKL_DISP_PW_1,
3221 .hsw.regs = &hsw_power_well_regs,
3222 .hsw.idx = SKL_PW_CTL_IDX_PW_1,
3223 .hsw.has_fuses = true,
3228 .domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS,
3229 .ops = &gen9_dc_off_power_well_ops,
3230 .id = SKL_DISP_DC_OFF,
3233 .name = "power well 2",
3234 .domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3235 .ops = &hsw_power_well_ops,
3236 .id = SKL_DISP_PW_2,
3238 .hsw.regs = &hsw_power_well_regs,
3239 .hsw.idx = SKL_PW_CTL_IDX_PW_2,
3240 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3241 .hsw.has_vga = true,
3242 .hsw.has_fuses = true,
3246 .name = "dpio-common-a",
3247 .domains = BXT_DPIO_CMN_A_POWER_DOMAINS,
3248 .ops = &bxt_dpio_cmn_power_well_ops,
3249 .id = BXT_DISP_PW_DPIO_CMN_A,
3251 .bxt.phy = DPIO_PHY1,
3255 .name = "dpio-common-bc",
3256 .domains = BXT_DPIO_CMN_BC_POWER_DOMAINS,
3257 .ops = &bxt_dpio_cmn_power_well_ops,
3258 .id = VLV_DISP_PW_DPIO_CMN_BC,
3260 .bxt.phy = DPIO_PHY0,
3265 static const struct i915_power_well_desc glk_power_wells[] = {
3267 .name = "always-on",
3269 .domains = POWER_DOMAIN_MASK,
3270 .ops = &i9xx_always_on_power_well_ops,
3271 .id = DISP_PW_ID_NONE,
3274 .name = "power well 1",
3275 /* Handled by the DMC firmware */
3278 .ops = &hsw_power_well_ops,
3279 .id = SKL_DISP_PW_1,
3281 .hsw.regs = &hsw_power_well_regs,
3282 .hsw.idx = SKL_PW_CTL_IDX_PW_1,
3283 .hsw.has_fuses = true,
3288 .domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS,
3289 .ops = &gen9_dc_off_power_well_ops,
3290 .id = SKL_DISP_DC_OFF,
3293 .name = "power well 2",
3294 .domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3295 .ops = &hsw_power_well_ops,
3296 .id = SKL_DISP_PW_2,
3298 .hsw.regs = &hsw_power_well_regs,
3299 .hsw.idx = SKL_PW_CTL_IDX_PW_2,
3300 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3301 .hsw.has_vga = true,
3302 .hsw.has_fuses = true,
3306 .name = "dpio-common-a",
3307 .domains = GLK_DPIO_CMN_A_POWER_DOMAINS,
3308 .ops = &bxt_dpio_cmn_power_well_ops,
3309 .id = BXT_DISP_PW_DPIO_CMN_A,
3311 .bxt.phy = DPIO_PHY1,
3315 .name = "dpio-common-b",
3316 .domains = GLK_DPIO_CMN_B_POWER_DOMAINS,
3317 .ops = &bxt_dpio_cmn_power_well_ops,
3318 .id = VLV_DISP_PW_DPIO_CMN_BC,
3320 .bxt.phy = DPIO_PHY0,
3324 .name = "dpio-common-c",
3325 .domains = GLK_DPIO_CMN_C_POWER_DOMAINS,
3326 .ops = &bxt_dpio_cmn_power_well_ops,
3327 .id = GLK_DISP_PW_DPIO_CMN_C,
3329 .bxt.phy = DPIO_PHY2,
3334 .domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS,
3335 .ops = &hsw_power_well_ops,
3336 .id = DISP_PW_ID_NONE,
3338 .hsw.regs = &hsw_power_well_regs,
3339 .hsw.idx = GLK_PW_CTL_IDX_AUX_A,
3344 .domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS,
3345 .ops = &hsw_power_well_ops,
3346 .id = DISP_PW_ID_NONE,
3348 .hsw.regs = &hsw_power_well_regs,
3349 .hsw.idx = GLK_PW_CTL_IDX_AUX_B,
3354 .domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS,
3355 .ops = &hsw_power_well_ops,
3356 .id = DISP_PW_ID_NONE,
3358 .hsw.regs = &hsw_power_well_regs,
3359 .hsw.idx = GLK_PW_CTL_IDX_AUX_C,
3363 .name = "DDI A IO power well",
3364 .domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS,
3365 .ops = &hsw_power_well_ops,
3366 .id = DISP_PW_ID_NONE,
3368 .hsw.regs = &hsw_power_well_regs,
3369 .hsw.idx = GLK_PW_CTL_IDX_DDI_A,
3373 .name = "DDI B IO power well",
3374 .domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS,
3375 .ops = &hsw_power_well_ops,
3376 .id = DISP_PW_ID_NONE,
3378 .hsw.regs = &hsw_power_well_regs,
3379 .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
3383 .name = "DDI C IO power well",
3384 .domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS,
3385 .ops = &hsw_power_well_ops,
3386 .id = DISP_PW_ID_NONE,
3388 .hsw.regs = &hsw_power_well_regs,
3389 .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
3394 static const struct i915_power_well_desc cnl_power_wells[] = {
3396 .name = "always-on",
3398 .domains = POWER_DOMAIN_MASK,
3399 .ops = &i9xx_always_on_power_well_ops,
3400 .id = DISP_PW_ID_NONE,
3403 .name = "power well 1",
3404 /* Handled by the DMC firmware */
3407 .ops = &hsw_power_well_ops,
3408 .id = SKL_DISP_PW_1,
3410 .hsw.regs = &hsw_power_well_regs,
3411 .hsw.idx = SKL_PW_CTL_IDX_PW_1,
3412 .hsw.has_fuses = true,
3417 .domains = CNL_DISPLAY_AUX_A_POWER_DOMAINS,
3418 .ops = &hsw_power_well_ops,
3419 .id = DISP_PW_ID_NONE,
3421 .hsw.regs = &hsw_power_well_regs,
3422 .hsw.idx = GLK_PW_CTL_IDX_AUX_A,
3427 .domains = CNL_DISPLAY_AUX_B_POWER_DOMAINS,
3428 .ops = &hsw_power_well_ops,
3429 .id = DISP_PW_ID_NONE,
3431 .hsw.regs = &hsw_power_well_regs,
3432 .hsw.idx = GLK_PW_CTL_IDX_AUX_B,
3437 .domains = CNL_DISPLAY_AUX_C_POWER_DOMAINS,
3438 .ops = &hsw_power_well_ops,
3439 .id = DISP_PW_ID_NONE,
3441 .hsw.regs = &hsw_power_well_regs,
3442 .hsw.idx = GLK_PW_CTL_IDX_AUX_C,
3447 .domains = CNL_DISPLAY_AUX_D_POWER_DOMAINS,
3448 .ops = &hsw_power_well_ops,
3449 .id = DISP_PW_ID_NONE,
3451 .hsw.regs = &hsw_power_well_regs,
3452 .hsw.idx = CNL_PW_CTL_IDX_AUX_D,
3457 .domains = CNL_DISPLAY_DC_OFF_POWER_DOMAINS,
3458 .ops = &gen9_dc_off_power_well_ops,
3459 .id = SKL_DISP_DC_OFF,
3462 .name = "power well 2",
3463 .domains = CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3464 .ops = &hsw_power_well_ops,
3465 .id = SKL_DISP_PW_2,
3467 .hsw.regs = &hsw_power_well_regs,
3468 .hsw.idx = SKL_PW_CTL_IDX_PW_2,
3469 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3470 .hsw.has_vga = true,
3471 .hsw.has_fuses = true,
3475 .name = "DDI A IO power well",
3476 .domains = CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS,
3477 .ops = &hsw_power_well_ops,
3478 .id = DISP_PW_ID_NONE,
3480 .hsw.regs = &hsw_power_well_regs,
3481 .hsw.idx = GLK_PW_CTL_IDX_DDI_A,
3485 .name = "DDI B IO power well",
3486 .domains = CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS,
3487 .ops = &hsw_power_well_ops,
3488 .id = DISP_PW_ID_NONE,
3490 .hsw.regs = &hsw_power_well_regs,
3491 .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
3495 .name = "DDI C IO power well",
3496 .domains = CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS,
3497 .ops = &hsw_power_well_ops,
3498 .id = DISP_PW_ID_NONE,
3500 .hsw.regs = &hsw_power_well_regs,
3501 .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
3505 .name = "DDI D IO power well",
3506 .domains = CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS,
3507 .ops = &hsw_power_well_ops,
3508 .id = DISP_PW_ID_NONE,
3510 .hsw.regs = &hsw_power_well_regs,
3511 .hsw.idx = SKL_PW_CTL_IDX_DDI_D,
3515 .name = "DDI F IO power well",
3516 .domains = CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS,
3517 .ops = &hsw_power_well_ops,
3518 .id = DISP_PW_ID_NONE,
3520 .hsw.regs = &hsw_power_well_regs,
3521 .hsw.idx = CNL_PW_CTL_IDX_DDI_F,
3526 .domains = CNL_DISPLAY_AUX_F_POWER_DOMAINS,
3527 .ops = &hsw_power_well_ops,
3528 .id = DISP_PW_ID_NONE,
3530 .hsw.regs = &hsw_power_well_regs,
3531 .hsw.idx = CNL_PW_CTL_IDX_AUX_F,
3536 static const struct i915_power_well_ops icl_combo_phy_aux_power_well_ops = {
3537 .sync_hw = hsw_power_well_sync_hw,
3538 .enable = icl_combo_phy_aux_power_well_enable,
3539 .disable = icl_combo_phy_aux_power_well_disable,
3540 .is_enabled = hsw_power_well_enabled,
3543 static const struct i915_power_well_ops icl_tc_phy_aux_power_well_ops = {
3544 .sync_hw = hsw_power_well_sync_hw,
3545 .enable = icl_tc_phy_aux_power_well_enable,
3546 .disable = icl_tc_phy_aux_power_well_disable,
3547 .is_enabled = hsw_power_well_enabled,
3550 static const struct i915_power_well_regs icl_aux_power_well_regs = {
3551 .bios = ICL_PWR_WELL_CTL_AUX1,
3552 .driver = ICL_PWR_WELL_CTL_AUX2,
3553 .debug = ICL_PWR_WELL_CTL_AUX4,
3556 static const struct i915_power_well_regs icl_ddi_power_well_regs = {
3557 .bios = ICL_PWR_WELL_CTL_DDI1,
3558 .driver = ICL_PWR_WELL_CTL_DDI2,
3559 .debug = ICL_PWR_WELL_CTL_DDI4,
3562 static const struct i915_power_well_desc icl_power_wells[] = {
3564 .name = "always-on",
3566 .domains = POWER_DOMAIN_MASK,
3567 .ops = &i9xx_always_on_power_well_ops,
3568 .id = DISP_PW_ID_NONE,
3571 .name = "power well 1",
3572 /* Handled by the DMC firmware */
3575 .ops = &hsw_power_well_ops,
3576 .id = SKL_DISP_PW_1,
3578 .hsw.regs = &hsw_power_well_regs,
3579 .hsw.idx = ICL_PW_CTL_IDX_PW_1,
3580 .hsw.has_fuses = true,
3585 .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
3586 .ops = &gen9_dc_off_power_well_ops,
3587 .id = SKL_DISP_DC_OFF,
3590 .name = "power well 2",
3591 .domains = ICL_PW_2_POWER_DOMAINS,
3592 .ops = &hsw_power_well_ops,
3593 .id = SKL_DISP_PW_2,
3595 .hsw.regs = &hsw_power_well_regs,
3596 .hsw.idx = ICL_PW_CTL_IDX_PW_2,
3597 .hsw.has_fuses = true,
3601 .name = "power well 3",
3602 .domains = ICL_PW_3_POWER_DOMAINS,
3603 .ops = &hsw_power_well_ops,
3604 .id = ICL_DISP_PW_3,
3606 .hsw.regs = &hsw_power_well_regs,
3607 .hsw.idx = ICL_PW_CTL_IDX_PW_3,
3608 .hsw.irq_pipe_mask = BIT(PIPE_B),
3609 .hsw.has_vga = true,
3610 .hsw.has_fuses = true,
3615 .domains = ICL_DDI_IO_A_POWER_DOMAINS,
3616 .ops = &hsw_power_well_ops,
3617 .id = DISP_PW_ID_NONE,
3619 .hsw.regs = &icl_ddi_power_well_regs,
3620 .hsw.idx = ICL_PW_CTL_IDX_DDI_A,
3625 .domains = ICL_DDI_IO_B_POWER_DOMAINS,
3626 .ops = &hsw_power_well_ops,
3627 .id = DISP_PW_ID_NONE,
3629 .hsw.regs = &icl_ddi_power_well_regs,
3630 .hsw.idx = ICL_PW_CTL_IDX_DDI_B,
3635 .domains = ICL_DDI_IO_C_POWER_DOMAINS,
3636 .ops = &hsw_power_well_ops,
3637 .id = DISP_PW_ID_NONE,
3639 .hsw.regs = &icl_ddi_power_well_regs,
3640 .hsw.idx = ICL_PW_CTL_IDX_DDI_C,
3645 .domains = ICL_DDI_IO_D_POWER_DOMAINS,
3646 .ops = &hsw_power_well_ops,
3647 .id = DISP_PW_ID_NONE,
3649 .hsw.regs = &icl_ddi_power_well_regs,
3650 .hsw.idx = ICL_PW_CTL_IDX_DDI_D,
3655 .domains = ICL_DDI_IO_E_POWER_DOMAINS,
3656 .ops = &hsw_power_well_ops,
3657 .id = DISP_PW_ID_NONE,
3659 .hsw.regs = &icl_ddi_power_well_regs,
3660 .hsw.idx = ICL_PW_CTL_IDX_DDI_E,
3665 .domains = ICL_DDI_IO_F_POWER_DOMAINS,
3666 .ops = &hsw_power_well_ops,
3667 .id = DISP_PW_ID_NONE,
3669 .hsw.regs = &icl_ddi_power_well_regs,
3670 .hsw.idx = ICL_PW_CTL_IDX_DDI_F,
3675 .domains = ICL_AUX_A_IO_POWER_DOMAINS,
3676 .ops = &icl_combo_phy_aux_power_well_ops,
3677 .id = DISP_PW_ID_NONE,
3679 .hsw.regs = &icl_aux_power_well_regs,
3680 .hsw.idx = ICL_PW_CTL_IDX_AUX_A,
3685 .domains = ICL_AUX_B_IO_POWER_DOMAINS,
3686 .ops = &icl_combo_phy_aux_power_well_ops,
3687 .id = DISP_PW_ID_NONE,
3689 .hsw.regs = &icl_aux_power_well_regs,
3690 .hsw.idx = ICL_PW_CTL_IDX_AUX_B,
3694 .name = "AUX C TC1",
3695 .domains = ICL_AUX_C_TC1_IO_POWER_DOMAINS,
3696 .ops = &icl_tc_phy_aux_power_well_ops,
3697 .id = DISP_PW_ID_NONE,
3699 .hsw.regs = &icl_aux_power_well_regs,
3700 .hsw.idx = ICL_PW_CTL_IDX_AUX_C,
3701 .hsw.is_tc_tbt = false,
3705 .name = "AUX D TC2",
3706 .domains = ICL_AUX_D_TC2_IO_POWER_DOMAINS,
3707 .ops = &icl_tc_phy_aux_power_well_ops,
3708 .id = DISP_PW_ID_NONE,
3710 .hsw.regs = &icl_aux_power_well_regs,
3711 .hsw.idx = ICL_PW_CTL_IDX_AUX_D,
3712 .hsw.is_tc_tbt = false,
3716 .name = "AUX E TC3",
3717 .domains = ICL_AUX_E_TC3_IO_POWER_DOMAINS,
3718 .ops = &icl_tc_phy_aux_power_well_ops,
3719 .id = DISP_PW_ID_NONE,
3721 .hsw.regs = &icl_aux_power_well_regs,
3722 .hsw.idx = ICL_PW_CTL_IDX_AUX_E,
3723 .hsw.is_tc_tbt = false,
3727 .name = "AUX F TC4",
3728 .domains = ICL_AUX_F_TC4_IO_POWER_DOMAINS,
3729 .ops = &icl_tc_phy_aux_power_well_ops,
3730 .id = DISP_PW_ID_NONE,
3732 .hsw.regs = &icl_aux_power_well_regs,
3733 .hsw.idx = ICL_PW_CTL_IDX_AUX_F,
3734 .hsw.is_tc_tbt = false,
3738 .name = "AUX C TBT1",
3739 .domains = ICL_AUX_C_TBT1_IO_POWER_DOMAINS,
3740 .ops = &icl_tc_phy_aux_power_well_ops,
3741 .id = DISP_PW_ID_NONE,
3743 .hsw.regs = &icl_aux_power_well_regs,
3744 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1,
3745 .hsw.is_tc_tbt = true,
3749 .name = "AUX D TBT2",
3750 .domains = ICL_AUX_D_TBT2_IO_POWER_DOMAINS,
3751 .ops = &icl_tc_phy_aux_power_well_ops,
3752 .id = DISP_PW_ID_NONE,
3754 .hsw.regs = &icl_aux_power_well_regs,
3755 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2,
3756 .hsw.is_tc_tbt = true,
3760 .name = "AUX E TBT3",
3761 .domains = ICL_AUX_E_TBT3_IO_POWER_DOMAINS,
3762 .ops = &icl_tc_phy_aux_power_well_ops,
3763 .id = DISP_PW_ID_NONE,
3765 .hsw.regs = &icl_aux_power_well_regs,
3766 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3,
3767 .hsw.is_tc_tbt = true,
3771 .name = "AUX F TBT4",
3772 .domains = ICL_AUX_F_TBT4_IO_POWER_DOMAINS,
3773 .ops = &icl_tc_phy_aux_power_well_ops,
3774 .id = DISP_PW_ID_NONE,
3776 .hsw.regs = &icl_aux_power_well_regs,
3777 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4,
3778 .hsw.is_tc_tbt = true,
3782 .name = "power well 4",
3783 .domains = ICL_PW_4_POWER_DOMAINS,
3784 .ops = &hsw_power_well_ops,
3785 .id = DISP_PW_ID_NONE,
3787 .hsw.regs = &hsw_power_well_regs,
3788 .hsw.idx = ICL_PW_CTL_IDX_PW_4,
3789 .hsw.has_fuses = true,
3790 .hsw.irq_pipe_mask = BIT(PIPE_C),
3795 static const struct i915_power_well_desc ehl_power_wells[] = {
3797 .name = "always-on",
3799 .domains = POWER_DOMAIN_MASK,
3800 .ops = &i9xx_always_on_power_well_ops,
3801 .id = DISP_PW_ID_NONE,
3804 .name = "power well 1",
3805 /* Handled by the DMC firmware */
3808 .ops = &hsw_power_well_ops,
3809 .id = SKL_DISP_PW_1,
3811 .hsw.regs = &hsw_power_well_regs,
3812 .hsw.idx = ICL_PW_CTL_IDX_PW_1,
3813 .hsw.has_fuses = true,
3818 .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
3819 .ops = &gen9_dc_off_power_well_ops,
3820 .id = SKL_DISP_DC_OFF,
3823 .name = "power well 2",
3824 .domains = ICL_PW_2_POWER_DOMAINS,
3825 .ops = &hsw_power_well_ops,
3826 .id = SKL_DISP_PW_2,
3828 .hsw.regs = &hsw_power_well_regs,
3829 .hsw.idx = ICL_PW_CTL_IDX_PW_2,
3830 .hsw.has_fuses = true,
3834 .name = "power well 3",
3835 .domains = ICL_PW_3_POWER_DOMAINS,
3836 .ops = &hsw_power_well_ops,
3837 .id = DISP_PW_ID_NONE,
3839 .hsw.regs = &hsw_power_well_regs,
3840 .hsw.idx = ICL_PW_CTL_IDX_PW_3,
3841 .hsw.irq_pipe_mask = BIT(PIPE_B),
3842 .hsw.has_vga = true,
3843 .hsw.has_fuses = true,
3848 .domains = ICL_DDI_IO_A_POWER_DOMAINS,
3849 .ops = &hsw_power_well_ops,
3850 .id = DISP_PW_ID_NONE,
3852 .hsw.regs = &icl_ddi_power_well_regs,
3853 .hsw.idx = ICL_PW_CTL_IDX_DDI_A,
3858 .domains = ICL_DDI_IO_B_POWER_DOMAINS,
3859 .ops = &hsw_power_well_ops,
3860 .id = DISP_PW_ID_NONE,
3862 .hsw.regs = &icl_ddi_power_well_regs,
3863 .hsw.idx = ICL_PW_CTL_IDX_DDI_B,
3868 .domains = ICL_DDI_IO_C_POWER_DOMAINS,
3869 .ops = &hsw_power_well_ops,
3870 .id = DISP_PW_ID_NONE,
3872 .hsw.regs = &icl_ddi_power_well_regs,
3873 .hsw.idx = ICL_PW_CTL_IDX_DDI_C,
3878 .domains = ICL_DDI_IO_D_POWER_DOMAINS,
3879 .ops = &hsw_power_well_ops,
3880 .id = DISP_PW_ID_NONE,
3882 .hsw.regs = &icl_ddi_power_well_regs,
3883 .hsw.idx = ICL_PW_CTL_IDX_DDI_D,
3888 .domains = ICL_AUX_A_IO_POWER_DOMAINS,
3889 .ops = &hsw_power_well_ops,
3890 .id = DISP_PW_ID_NONE,
3892 .hsw.regs = &icl_aux_power_well_regs,
3893 .hsw.idx = ICL_PW_CTL_IDX_AUX_A,
3898 .domains = ICL_AUX_B_IO_POWER_DOMAINS,
3899 .ops = &hsw_power_well_ops,
3900 .id = DISP_PW_ID_NONE,
3902 .hsw.regs = &icl_aux_power_well_regs,
3903 .hsw.idx = ICL_PW_CTL_IDX_AUX_B,
3908 .domains = ICL_AUX_C_TC1_IO_POWER_DOMAINS,
3909 .ops = &hsw_power_well_ops,
3910 .id = DISP_PW_ID_NONE,
3912 .hsw.regs = &icl_aux_power_well_regs,
3913 .hsw.idx = ICL_PW_CTL_IDX_AUX_C,
3918 .domains = ICL_AUX_D_TC2_IO_POWER_DOMAINS,
3919 .ops = &hsw_power_well_ops,
3920 .id = DISP_PW_ID_NONE,
3922 .hsw.regs = &icl_aux_power_well_regs,
3923 .hsw.idx = ICL_PW_CTL_IDX_AUX_D,
3927 .name = "power well 4",
3928 .domains = ICL_PW_4_POWER_DOMAINS,
3929 .ops = &hsw_power_well_ops,
3930 .id = DISP_PW_ID_NONE,
3932 .hsw.regs = &hsw_power_well_regs,
3933 .hsw.idx = ICL_PW_CTL_IDX_PW_4,
3934 .hsw.has_fuses = true,
3935 .hsw.irq_pipe_mask = BIT(PIPE_C),
3940 static const struct i915_power_well_desc tgl_power_wells[] = {
3942 .name = "always-on",
3944 .domains = POWER_DOMAIN_MASK,
3945 .ops = &i9xx_always_on_power_well_ops,
3946 .id = DISP_PW_ID_NONE,
3949 .name = "power well 1",
3950 /* Handled by the DMC firmware */
3953 .ops = &hsw_power_well_ops,
3954 .id = SKL_DISP_PW_1,
3956 .hsw.regs = &hsw_power_well_regs,
3957 .hsw.idx = ICL_PW_CTL_IDX_PW_1,
3958 .hsw.has_fuses = true,
3963 .domains = TGL_DISPLAY_DC_OFF_POWER_DOMAINS,
3964 .ops = &gen9_dc_off_power_well_ops,
3965 .id = SKL_DISP_DC_OFF,
3968 .name = "power well 2",
3969 .domains = TGL_PW_2_POWER_DOMAINS,
3970 .ops = &hsw_power_well_ops,
3971 .id = SKL_DISP_PW_2,
3973 .hsw.regs = &hsw_power_well_regs,
3974 .hsw.idx = ICL_PW_CTL_IDX_PW_2,
3975 .hsw.has_fuses = true,
3979 .name = "power well 3",
3980 .domains = TGL_PW_3_POWER_DOMAINS,
3981 .ops = &hsw_power_well_ops,
3982 .id = ICL_DISP_PW_3,
3984 .hsw.regs = &hsw_power_well_regs,
3985 .hsw.idx = ICL_PW_CTL_IDX_PW_3,
3986 .hsw.irq_pipe_mask = BIT(PIPE_B),
3987 .hsw.has_vga = true,
3988 .hsw.has_fuses = true,
3993 .domains = ICL_DDI_IO_A_POWER_DOMAINS,
3994 .ops = &hsw_power_well_ops,
3995 .id = DISP_PW_ID_NONE,
3997 .hsw.regs = &icl_ddi_power_well_regs,
3998 .hsw.idx = ICL_PW_CTL_IDX_DDI_A,
4003 .domains = ICL_DDI_IO_B_POWER_DOMAINS,
4004 .ops = &hsw_power_well_ops,
4005 .id = DISP_PW_ID_NONE,
4007 .hsw.regs = &icl_ddi_power_well_regs,
4008 .hsw.idx = ICL_PW_CTL_IDX_DDI_B,
4013 .domains = ICL_DDI_IO_C_POWER_DOMAINS,
4014 .ops = &hsw_power_well_ops,
4015 .id = DISP_PW_ID_NONE,
4017 .hsw.regs = &icl_ddi_power_well_regs,
4018 .hsw.idx = ICL_PW_CTL_IDX_DDI_C,
4022 .name = "DDI D TC1 IO",
4023 .domains = TGL_DDI_IO_D_TC1_POWER_DOMAINS,
4024 .ops = &hsw_power_well_ops,
4025 .id = DISP_PW_ID_NONE,
4027 .hsw.regs = &icl_ddi_power_well_regs,
4028 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1,
4032 .name = "DDI E TC2 IO",
4033 .domains = TGL_DDI_IO_E_TC2_POWER_DOMAINS,
4034 .ops = &hsw_power_well_ops,
4035 .id = DISP_PW_ID_NONE,
4037 .hsw.regs = &icl_ddi_power_well_regs,
4038 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2,
4042 .name = "DDI F TC3 IO",
4043 .domains = TGL_DDI_IO_F_TC3_POWER_DOMAINS,
4044 .ops = &hsw_power_well_ops,
4045 .id = DISP_PW_ID_NONE,
4047 .hsw.regs = &icl_ddi_power_well_regs,
4048 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC3,
4052 .name = "DDI G TC4 IO",
4053 .domains = TGL_DDI_IO_G_TC4_POWER_DOMAINS,
4054 .ops = &hsw_power_well_ops,
4055 .id = DISP_PW_ID_NONE,
4057 .hsw.regs = &icl_ddi_power_well_regs,
4058 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC4,
4062 .name = "DDI H TC5 IO",
4063 .domains = TGL_DDI_IO_H_TC5_POWER_DOMAINS,
4064 .ops = &hsw_power_well_ops,
4065 .id = DISP_PW_ID_NONE,
4067 .hsw.regs = &icl_ddi_power_well_regs,
4068 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC5,
4072 .name = "DDI I TC6 IO",
4073 .domains = TGL_DDI_IO_I_TC6_POWER_DOMAINS,
4074 .ops = &hsw_power_well_ops,
4075 .id = DISP_PW_ID_NONE,
4077 .hsw.regs = &icl_ddi_power_well_regs,
4078 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC6,
4083 .domains = TGL_AUX_A_IO_POWER_DOMAINS,
4084 .ops = &hsw_power_well_ops,
4085 .id = DISP_PW_ID_NONE,
4087 .hsw.regs = &icl_aux_power_well_regs,
4088 .hsw.idx = ICL_PW_CTL_IDX_AUX_A,
4093 .domains = TGL_AUX_B_IO_POWER_DOMAINS,
4094 .ops = &hsw_power_well_ops,
4095 .id = DISP_PW_ID_NONE,
4097 .hsw.regs = &icl_aux_power_well_regs,
4098 .hsw.idx = ICL_PW_CTL_IDX_AUX_B,
4103 .domains = TGL_AUX_C_IO_POWER_DOMAINS,
4104 .ops = &hsw_power_well_ops,
4105 .id = DISP_PW_ID_NONE,
4107 .hsw.regs = &icl_aux_power_well_regs,
4108 .hsw.idx = ICL_PW_CTL_IDX_AUX_C,
4112 .name = "AUX D TC1",
4113 .domains = TGL_AUX_D_TC1_IO_POWER_DOMAINS,
4114 .ops = &icl_tc_phy_aux_power_well_ops,
4115 .id = DISP_PW_ID_NONE,
4117 .hsw.regs = &icl_aux_power_well_regs,
4118 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1,
4119 .hsw.is_tc_tbt = false,
4123 .name = "AUX E TC2",
4124 .domains = TGL_AUX_E_TC2_IO_POWER_DOMAINS,
4125 .ops = &icl_tc_phy_aux_power_well_ops,
4126 .id = DISP_PW_ID_NONE,
4128 .hsw.regs = &icl_aux_power_well_regs,
4129 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2,
4130 .hsw.is_tc_tbt = false,
4134 .name = "AUX F TC3",
4135 .domains = TGL_AUX_F_TC3_IO_POWER_DOMAINS,
4136 .ops = &icl_tc_phy_aux_power_well_ops,
4137 .id = DISP_PW_ID_NONE,
4139 .hsw.regs = &icl_aux_power_well_regs,
4140 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC3,
4141 .hsw.is_tc_tbt = false,
4145 .name = "AUX G TC4",
4146 .domains = TGL_AUX_G_TC4_IO_POWER_DOMAINS,
4147 .ops = &icl_tc_phy_aux_power_well_ops,
4148 .id = DISP_PW_ID_NONE,
4150 .hsw.regs = &icl_aux_power_well_regs,
4151 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC4,
4152 .hsw.is_tc_tbt = false,
4156 .name = "AUX H TC5",
4157 .domains = TGL_AUX_H_TC5_IO_POWER_DOMAINS,
4158 .ops = &icl_tc_phy_aux_power_well_ops,
4159 .id = DISP_PW_ID_NONE,
4161 .hsw.regs = &icl_aux_power_well_regs,
4162 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC5,
4163 .hsw.is_tc_tbt = false,
4167 .name = "AUX I TC6",
4168 .domains = TGL_AUX_I_TC6_IO_POWER_DOMAINS,
4169 .ops = &icl_tc_phy_aux_power_well_ops,
4170 .id = DISP_PW_ID_NONE,
4172 .hsw.regs = &icl_aux_power_well_regs,
4173 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC6,
4174 .hsw.is_tc_tbt = false,
4178 .name = "AUX D TBT1",
4179 .domains = TGL_AUX_D_TBT1_IO_POWER_DOMAINS,
4180 .ops = &icl_tc_phy_aux_power_well_ops,
4181 .id = DISP_PW_ID_NONE,
4183 .hsw.regs = &icl_aux_power_well_regs,
4184 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1,
4185 .hsw.is_tc_tbt = true,
4189 .name = "AUX E TBT2",
4190 .domains = TGL_AUX_E_TBT2_IO_POWER_DOMAINS,
4191 .ops = &icl_tc_phy_aux_power_well_ops,
4192 .id = DISP_PW_ID_NONE,
4194 .hsw.regs = &icl_aux_power_well_regs,
4195 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2,
4196 .hsw.is_tc_tbt = true,
4200 .name = "AUX F TBT3",
4201 .domains = TGL_AUX_F_TBT3_IO_POWER_DOMAINS,
4202 .ops = &icl_tc_phy_aux_power_well_ops,
4203 .id = DISP_PW_ID_NONE,
4205 .hsw.regs = &icl_aux_power_well_regs,
4206 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3,
4207 .hsw.is_tc_tbt = true,
4211 .name = "AUX G TBT4",
4212 .domains = TGL_AUX_G_TBT4_IO_POWER_DOMAINS,
4213 .ops = &icl_tc_phy_aux_power_well_ops,
4214 .id = DISP_PW_ID_NONE,
4216 .hsw.regs = &icl_aux_power_well_regs,
4217 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4,
4218 .hsw.is_tc_tbt = true,
4222 .name = "AUX H TBT5",
4223 .domains = TGL_AUX_H_TBT5_IO_POWER_DOMAINS,
4224 .ops = &icl_tc_phy_aux_power_well_ops,
4225 .id = DISP_PW_ID_NONE,
4227 .hsw.regs = &icl_aux_power_well_regs,
4228 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT5,
4229 .hsw.is_tc_tbt = true,
4233 .name = "AUX I TBT6",
4234 .domains = TGL_AUX_I_TBT6_IO_POWER_DOMAINS,
4235 .ops = &icl_tc_phy_aux_power_well_ops,
4236 .id = DISP_PW_ID_NONE,
4238 .hsw.regs = &icl_aux_power_well_regs,
4239 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT6,
4240 .hsw.is_tc_tbt = true,
4244 .name = "power well 4",
4245 .domains = TGL_PW_4_POWER_DOMAINS,
4246 .ops = &hsw_power_well_ops,
4247 .id = DISP_PW_ID_NONE,
4249 .hsw.regs = &hsw_power_well_regs,
4250 .hsw.idx = ICL_PW_CTL_IDX_PW_4,
4251 .hsw.has_fuses = true,
4252 .hsw.irq_pipe_mask = BIT(PIPE_C),
4256 .name = "power well 5",
4257 .domains = TGL_PW_5_POWER_DOMAINS,
4258 .ops = &hsw_power_well_ops,
4259 .id = DISP_PW_ID_NONE,
4261 .hsw.regs = &hsw_power_well_regs,
4262 .hsw.idx = TGL_PW_CTL_IDX_PW_5,
4263 .hsw.has_fuses = true,
4264 .hsw.irq_pipe_mask = BIT(PIPE_D),
4270 sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
4271 int disable_power_well)
4273 if (disable_power_well >= 0)
4274 return !!disable_power_well;
4279 static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
4286 if (INTEL_GEN(dev_priv) >= 12) {
4289 * DC9 has a separate HW flow from the rest of the DC states,
4290 * not depending on the DMC firmware. It's needed by system
4291 * suspend/resume, so allow it unconditionally.
4293 mask = DC_STATE_EN_DC9;
4294 } else if (IS_GEN(dev_priv, 11)) {
4296 mask = DC_STATE_EN_DC9;
4297 } else if (IS_GEN(dev_priv, 10) || IS_GEN9_BC(dev_priv)) {
4300 } else if (IS_GEN9_LP(dev_priv)) {
4302 mask = DC_STATE_EN_DC9;
4308 if (!i915_modparams.disable_power_well)
4311 if (enable_dc >= 0 && enable_dc <= max_dc) {
4312 requested_dc = enable_dc;
4313 } else if (enable_dc == -1) {
4314 requested_dc = max_dc;
4315 } else if (enable_dc > max_dc && enable_dc <= 4) {
4316 drm_dbg_kms(&dev_priv->drm,
4317 "Adjusting requested max DC state (%d->%d)\n",
4319 requested_dc = max_dc;
4321 drm_err(&dev_priv->drm,
4322 "Unexpected value for enable_dc (%d)\n", enable_dc);
4323 requested_dc = max_dc;
4326 switch (requested_dc) {
4328 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6;
4331 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC5;
4334 mask |= DC_STATE_EN_UPTO_DC6;
4337 mask |= DC_STATE_EN_UPTO_DC5;
4341 drm_dbg_kms(&dev_priv->drm, "Allowed DC state mask %02x\n", mask);
4347 __set_power_wells(struct i915_power_domains *power_domains,
4348 const struct i915_power_well_desc *power_well_descs,
4349 int power_well_count)
4351 u64 power_well_ids = 0;
4354 power_domains->power_well_count = power_well_count;
4355 power_domains->power_wells =
4356 kcalloc(power_well_count,
4357 sizeof(*power_domains->power_wells),
4359 if (!power_domains->power_wells)
4362 for (i = 0; i < power_well_count; i++) {
4363 enum i915_power_well_id id = power_well_descs[i].id;
4365 power_domains->power_wells[i].desc = &power_well_descs[i];
4367 if (id == DISP_PW_ID_NONE)
4370 WARN_ON(id >= sizeof(power_well_ids) * 8);
4371 WARN_ON(power_well_ids & BIT_ULL(id));
4372 power_well_ids |= BIT_ULL(id);
4378 #define set_power_wells(power_domains, __power_well_descs) \
4379 __set_power_wells(power_domains, __power_well_descs, \
4380 ARRAY_SIZE(__power_well_descs))
4383 * intel_power_domains_init - initializes the power domain structures
4384 * @dev_priv: i915 device instance
4386 * Initializes the power domain structures for @dev_priv depending upon the
4387 * supported platform.
4389 int intel_power_domains_init(struct drm_i915_private *dev_priv)
4391 struct i915_power_domains *power_domains = &dev_priv->power_domains;
4394 i915_modparams.disable_power_well =
4395 sanitize_disable_power_well_option(dev_priv,
4396 i915_modparams.disable_power_well);
4397 dev_priv->csr.allowed_dc_mask =
4398 get_allowed_dc_mask(dev_priv, i915_modparams.enable_dc);
4400 dev_priv->csr.target_dc_state =
4401 sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
4403 BUILD_BUG_ON(POWER_DOMAIN_NUM > 64);
4405 mutex_init(&power_domains->lock);
4407 INIT_DELAYED_WORK(&power_domains->async_put_work,
4408 intel_display_power_put_async_work);
4411 * The enabling order will be from lower to higher indexed wells,
4412 * the disabling order is reversed.
4414 if (IS_GEN(dev_priv, 12)) {
4415 err = set_power_wells(power_domains, tgl_power_wells);
4416 } else if (IS_ELKHARTLAKE(dev_priv)) {
4417 err = set_power_wells(power_domains, ehl_power_wells);
4418 } else if (IS_GEN(dev_priv, 11)) {
4419 err = set_power_wells(power_domains, icl_power_wells);
4420 } else if (IS_CANNONLAKE(dev_priv)) {
4421 err = set_power_wells(power_domains, cnl_power_wells);
4424 * DDI and Aux IO are getting enabled for all ports
4425 * regardless the presence or use. So, in order to avoid
4426 * timeouts, lets remove them from the list
4427 * for the SKUs without port F.
4429 if (!IS_CNL_WITH_PORT_F(dev_priv))
4430 power_domains->power_well_count -= 2;
4431 } else if (IS_GEMINILAKE(dev_priv)) {
4432 err = set_power_wells(power_domains, glk_power_wells);
4433 } else if (IS_BROXTON(dev_priv)) {
4434 err = set_power_wells(power_domains, bxt_power_wells);
4435 } else if (IS_GEN9_BC(dev_priv)) {
4436 err = set_power_wells(power_domains, skl_power_wells);
4437 } else if (IS_CHERRYVIEW(dev_priv)) {
4438 err = set_power_wells(power_domains, chv_power_wells);
4439 } else if (IS_BROADWELL(dev_priv)) {
4440 err = set_power_wells(power_domains, bdw_power_wells);
4441 } else if (IS_HASWELL(dev_priv)) {
4442 err = set_power_wells(power_domains, hsw_power_wells);
4443 } else if (IS_VALLEYVIEW(dev_priv)) {
4444 err = set_power_wells(power_domains, vlv_power_wells);
4445 } else if (IS_I830(dev_priv)) {
4446 err = set_power_wells(power_domains, i830_power_wells);
4448 err = set_power_wells(power_domains, i9xx_always_on_power_well);
4455 * intel_power_domains_cleanup - clean up power domains resources
4456 * @dev_priv: i915 device instance
4458 * Release any resources acquired by intel_power_domains_init()
4460 void intel_power_domains_cleanup(struct drm_i915_private *dev_priv)
4462 kfree(dev_priv->power_domains.power_wells);
4465 static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
4467 struct i915_power_domains *power_domains = &dev_priv->power_domains;
4468 struct i915_power_well *power_well;
4470 mutex_lock(&power_domains->lock);
4471 for_each_power_well(dev_priv, power_well) {
4472 power_well->desc->ops->sync_hw(dev_priv, power_well);
4473 power_well->hw_enabled =
4474 power_well->desc->ops->is_enabled(dev_priv, power_well);
4476 mutex_unlock(&power_domains->lock);
4480 bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv,
4481 i915_reg_t reg, bool enable)
4485 val = intel_de_read(dev_priv, reg);
4486 val = enable ? (val | DBUF_POWER_REQUEST) : (val & ~DBUF_POWER_REQUEST);
4487 intel_de_write(dev_priv, reg, val);
4488 intel_de_posting_read(dev_priv, reg);
4491 status = intel_de_read(dev_priv, reg) & DBUF_POWER_STATE;
4492 if ((enable && !status) || (!enable && status)) {
4493 drm_err(&dev_priv->drm, "DBus power %s timeout!\n",
4494 enable ? "enable" : "disable");
4500 static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
4502 icl_dbuf_slices_update(dev_priv, BIT(DBUF_S1));
4505 static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
4507 icl_dbuf_slices_update(dev_priv, 0);
4510 void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
4514 int max_slices = INTEL_INFO(dev_priv)->num_supported_dbuf_slices;
4515 struct i915_power_domains *power_domains = &dev_priv->power_domains;
4517 drm_WARN(&dev_priv->drm, hweight8(req_slices) > max_slices,
4518 "Invalid number of dbuf slices requested\n");
4520 drm_dbg_kms(&dev_priv->drm, "Updating dbuf slices to 0x%x\n",
4524 * Might be running this in parallel to gen9_dc_off_power_well_enable
4525 * being called from intel_dp_detect for instance,
4526 * which causes assertion triggered by race condition,
4527 * as gen9_assert_dbuf_enabled might preempt this when registers
4528 * were already updated, while dev_priv was not.
4530 mutex_lock(&power_domains->lock);
4532 for (i = 0; i < max_slices; i++) {
4533 intel_dbuf_slice_set(dev_priv,
4535 (req_slices & BIT(i)) != 0);
4538 dev_priv->enabled_dbuf_slices_mask = req_slices;
4540 mutex_unlock(&power_domains->lock);
4543 static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
4545 skl_ddb_get_hw_state(dev_priv);
4547 * Just power up at least 1 slice, we will
4548 * figure out later which slices we have and what we need.
4550 icl_dbuf_slices_update(dev_priv, dev_priv->enabled_dbuf_slices_mask |
4554 static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
4556 icl_dbuf_slices_update(dev_priv, 0);
4559 static void icl_mbus_init(struct drm_i915_private *dev_priv)
4563 mask = MBUS_ABOX_BT_CREDIT_POOL1_MASK |
4564 MBUS_ABOX_BT_CREDIT_POOL2_MASK |
4565 MBUS_ABOX_B_CREDIT_MASK |
4566 MBUS_ABOX_BW_CREDIT_MASK;
4567 val = MBUS_ABOX_BT_CREDIT_POOL1(16) |
4568 MBUS_ABOX_BT_CREDIT_POOL2(16) |
4569 MBUS_ABOX_B_CREDIT(1) |
4570 MBUS_ABOX_BW_CREDIT(1);
4572 intel_de_rmw(dev_priv, MBUS_ABOX_CTL, mask, val);
4573 if (INTEL_GEN(dev_priv) >= 12) {
4574 intel_de_rmw(dev_priv, MBUS_ABOX1_CTL, mask, val);
4575 intel_de_rmw(dev_priv, MBUS_ABOX2_CTL, mask, val);
4579 static void hsw_assert_cdclk(struct drm_i915_private *dev_priv)
4581 u32 val = intel_de_read(dev_priv, LCPLL_CTL);
4584 * The LCPLL register should be turned on by the BIOS. For now
4585 * let's just check its state and print errors in case
4586 * something is wrong. Don't even try to turn it on.
4589 if (val & LCPLL_CD_SOURCE_FCLK)
4590 drm_err(&dev_priv->drm, "CDCLK source is not LCPLL\n");
4592 if (val & LCPLL_PLL_DISABLE)
4593 drm_err(&dev_priv->drm, "LCPLL is disabled\n");
4595 if ((val & LCPLL_REF_MASK) != LCPLL_REF_NON_SSC)
4596 drm_err(&dev_priv->drm, "LCPLL not using non-SSC reference\n");
4599 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
4601 struct drm_device *dev = &dev_priv->drm;
4602 struct intel_crtc *crtc;
4604 for_each_intel_crtc(dev, crtc)
4605 I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
4606 pipe_name(crtc->pipe));
4608 I915_STATE_WARN(intel_de_read(dev_priv, HSW_PWR_WELL_CTL2),
4609 "Display power well on\n");
4610 I915_STATE_WARN(intel_de_read(dev_priv, SPLL_CTL) & SPLL_PLL_ENABLE,
4612 I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(0)) & WRPLL_PLL_ENABLE,
4613 "WRPLL1 enabled\n");
4614 I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(1)) & WRPLL_PLL_ENABLE,
4615 "WRPLL2 enabled\n");
4616 I915_STATE_WARN(intel_de_read(dev_priv, PP_STATUS(0)) & PP_ON,
4617 "Panel power on\n");
4618 I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
4619 "CPU PWM1 enabled\n");
4620 if (IS_HASWELL(dev_priv))
4621 I915_STATE_WARN(intel_de_read(dev_priv, HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
4622 "CPU PWM2 enabled\n");
4623 I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
4624 "PCH PWM1 enabled\n");
4625 I915_STATE_WARN(intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
4626 "Utility pin enabled\n");
4627 I915_STATE_WARN(intel_de_read(dev_priv, PCH_GTC_CTL) & PCH_GTC_ENABLE,
4628 "PCH GTC enabled\n");
4631 * In theory we can still leave IRQs enabled, as long as only the HPD
4632 * interrupts remain enabled. We used to check for that, but since it's
4633 * gen-specific and since we only disable LCPLL after we fully disable
4634 * the interrupts, the check below should be enough.
4636 I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
4639 static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv)
4641 if (IS_HASWELL(dev_priv))
4642 return intel_de_read(dev_priv, D_COMP_HSW);
4644 return intel_de_read(dev_priv, D_COMP_BDW);
4647 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val)
4649 if (IS_HASWELL(dev_priv)) {
4650 if (sandybridge_pcode_write(dev_priv,
4651 GEN6_PCODE_WRITE_D_COMP, val))
4652 drm_dbg_kms(&dev_priv->drm,
4653 "Failed to write to D_COMP\n");
4655 intel_de_write(dev_priv, D_COMP_BDW, val);
4656 intel_de_posting_read(dev_priv, D_COMP_BDW);
4661 * This function implements pieces of two sequences from BSpec:
4662 * - Sequence for display software to disable LCPLL
4663 * - Sequence for display software to allow package C8+
4664 * The steps implemented here are just the steps that actually touch the LCPLL
4665 * register. Callers should take care of disabling all the display engine
4666 * functions, doing the mode unset, fixing interrupts, etc.
4668 static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
4669 bool switch_to_fclk, bool allow_power_down)
4673 assert_can_disable_lcpll(dev_priv);
4675 val = intel_de_read(dev_priv, LCPLL_CTL);
4677 if (switch_to_fclk) {
4678 val |= LCPLL_CD_SOURCE_FCLK;
4679 intel_de_write(dev_priv, LCPLL_CTL, val);
4681 if (wait_for_us(intel_de_read(dev_priv, LCPLL_CTL) &
4682 LCPLL_CD_SOURCE_FCLK_DONE, 1))
4683 drm_err(&dev_priv->drm, "Switching to FCLK failed\n");
4685 val = intel_de_read(dev_priv, LCPLL_CTL);
4688 val |= LCPLL_PLL_DISABLE;
4689 intel_de_write(dev_priv, LCPLL_CTL, val);
4690 intel_de_posting_read(dev_priv, LCPLL_CTL);
4692 if (intel_de_wait_for_clear(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 1))
4693 drm_err(&dev_priv->drm, "LCPLL still locked\n");
4695 val = hsw_read_dcomp(dev_priv);
4696 val |= D_COMP_COMP_DISABLE;
4697 hsw_write_dcomp(dev_priv, val);
4700 if (wait_for((hsw_read_dcomp(dev_priv) &
4701 D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
4702 drm_err(&dev_priv->drm, "D_COMP RCOMP still in progress\n");
4704 if (allow_power_down) {
4705 val = intel_de_read(dev_priv, LCPLL_CTL);
4706 val |= LCPLL_POWER_DOWN_ALLOW;
4707 intel_de_write(dev_priv, LCPLL_CTL, val);
4708 intel_de_posting_read(dev_priv, LCPLL_CTL);
4713 * Fully restores LCPLL, disallowing power down and switching back to LCPLL
4716 static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
4720 val = intel_de_read(dev_priv, LCPLL_CTL);
4722 if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
4723 LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
4727 * Make sure we're not on PC8 state before disabling PC8, otherwise
4728 * we'll hang the machine. To prevent PC8 state, just enable force_wake.
4730 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
4732 if (val & LCPLL_POWER_DOWN_ALLOW) {
4733 val &= ~LCPLL_POWER_DOWN_ALLOW;
4734 intel_de_write(dev_priv, LCPLL_CTL, val);
4735 intel_de_posting_read(dev_priv, LCPLL_CTL);
4738 val = hsw_read_dcomp(dev_priv);
4739 val |= D_COMP_COMP_FORCE;
4740 val &= ~D_COMP_COMP_DISABLE;
4741 hsw_write_dcomp(dev_priv, val);
4743 val = intel_de_read(dev_priv, LCPLL_CTL);
4744 val &= ~LCPLL_PLL_DISABLE;
4745 intel_de_write(dev_priv, LCPLL_CTL, val);
4747 if (intel_de_wait_for_set(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 5))
4748 drm_err(&dev_priv->drm, "LCPLL not locked yet\n");
4750 if (val & LCPLL_CD_SOURCE_FCLK) {
4751 val = intel_de_read(dev_priv, LCPLL_CTL);
4752 val &= ~LCPLL_CD_SOURCE_FCLK;
4753 intel_de_write(dev_priv, LCPLL_CTL, val);
4755 if (wait_for_us((intel_de_read(dev_priv, LCPLL_CTL) &
4756 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
4757 drm_err(&dev_priv->drm,
4758 "Switching back to LCPLL failed\n");
4761 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
4763 intel_update_cdclk(dev_priv);
4764 intel_dump_cdclk_config(&dev_priv->cdclk.hw, "Current CDCLK");
4768 * Package states C8 and deeper are really deep PC states that can only be
4769 * reached when all the devices on the system allow it, so even if the graphics
4770 * device allows PC8+, it doesn't mean the system will actually get to these
4771 * states. Our driver only allows PC8+ when going into runtime PM.
4773 * The requirements for PC8+ are that all the outputs are disabled, the power
4774 * well is disabled and most interrupts are disabled, and these are also
4775 * requirements for runtime PM. When these conditions are met, we manually do
4776 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
4777 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
4780 * When we really reach PC8 or deeper states (not just when we allow it) we lose
4781 * the state of some registers, so when we come back from PC8+ we need to
4782 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
4783 * need to take care of the registers kept by RC6. Notice that this happens even
4784 * if we don't put the device in PCI D3 state (which is what currently happens
4785 * because of the runtime PM support).
4787 * For more, read "Display Sequences for Package C8" on the hardware
4790 static void hsw_enable_pc8(struct drm_i915_private *dev_priv)
4794 drm_dbg_kms(&dev_priv->drm, "Enabling package C8+\n");
4796 if (HAS_PCH_LPT_LP(dev_priv)) {
4797 val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D);
4798 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
4799 intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val);
4802 lpt_disable_clkout_dp(dev_priv);
4803 hsw_disable_lcpll(dev_priv, true, true);
4806 static void hsw_disable_pc8(struct drm_i915_private *dev_priv)
4810 drm_dbg_kms(&dev_priv->drm, "Disabling package C8+\n");
4812 hsw_restore_lcpll(dev_priv);
4813 intel_init_pch_refclk(dev_priv);
4815 if (HAS_PCH_LPT_LP(dev_priv)) {
4816 val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D);
4817 val |= PCH_LP_PARTITION_LEVEL_DISABLE;
4818 intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val);
4822 static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
4826 u32 reset_bits, val;
4828 if (IS_IVYBRIDGE(dev_priv)) {
4830 reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK;
4832 reg = HSW_NDE_RSTWRN_OPT;
4833 reset_bits = RESET_PCH_HANDSHAKE_ENABLE;
4836 val = intel_de_read(dev_priv, reg);
4843 intel_de_write(dev_priv, reg, val);
4846 static void skl_display_core_init(struct drm_i915_private *dev_priv,
4849 struct i915_power_domains *power_domains = &dev_priv->power_domains;
4850 struct i915_power_well *well;
4852 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
4854 /* enable PCH reset handshake */
4855 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
4857 /* enable PG1 and Misc I/O */
4858 mutex_lock(&power_domains->lock);
4860 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4861 intel_power_well_enable(dev_priv, well);
4863 well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
4864 intel_power_well_enable(dev_priv, well);
4866 mutex_unlock(&power_domains->lock);
4868 intel_cdclk_init_hw(dev_priv);
4870 gen9_dbuf_enable(dev_priv);
4872 if (resume && dev_priv->csr.dmc_payload)
4873 intel_csr_load_program(dev_priv);
4876 static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
4878 struct i915_power_domains *power_domains = &dev_priv->power_domains;
4879 struct i915_power_well *well;
4881 gen9_disable_dc_states(dev_priv);
4883 gen9_dbuf_disable(dev_priv);
4885 intel_cdclk_uninit_hw(dev_priv);
4887 /* The spec doesn't call for removing the reset handshake flag */
4888 /* disable PG1 and Misc I/O */
4890 mutex_lock(&power_domains->lock);
4893 * BSpec says to keep the MISC IO power well enabled here, only
4894 * remove our request for power well 1.
4895 * Note that even though the driver's request is removed power well 1
4896 * may stay enabled after this due to DMC's own request on it.
4898 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4899 intel_power_well_disable(dev_priv, well);
4901 mutex_unlock(&power_domains->lock);
4903 usleep_range(10, 30); /* 10 us delay per Bspec */
4906 static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume)
4908 struct i915_power_domains *power_domains = &dev_priv->power_domains;
4909 struct i915_power_well *well;
4911 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
4914 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
4915 * or else the reset will hang because there is no PCH to respond.
4916 * Move the handshake programming to initialization sequence.
4917 * Previously was left up to BIOS.
4919 intel_pch_reset_handshake(dev_priv, false);
4922 mutex_lock(&power_domains->lock);
4924 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4925 intel_power_well_enable(dev_priv, well);
4927 mutex_unlock(&power_domains->lock);
4929 intel_cdclk_init_hw(dev_priv);
4931 gen9_dbuf_enable(dev_priv);
4933 if (resume && dev_priv->csr.dmc_payload)
4934 intel_csr_load_program(dev_priv);
4937 static void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
4939 struct i915_power_domains *power_domains = &dev_priv->power_domains;
4940 struct i915_power_well *well;
4942 gen9_disable_dc_states(dev_priv);
4944 gen9_dbuf_disable(dev_priv);
4946 intel_cdclk_uninit_hw(dev_priv);
4948 /* The spec doesn't call for removing the reset handshake flag */
4951 * Disable PW1 (PG1).
4952 * Note that even though the driver's request is removed power well 1
4953 * may stay enabled after this due to DMC's own request on it.
4955 mutex_lock(&power_domains->lock);
4957 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4958 intel_power_well_disable(dev_priv, well);
4960 mutex_unlock(&power_domains->lock);
4962 usleep_range(10, 30); /* 10 us delay per Bspec */
4965 static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume)
4967 struct i915_power_domains *power_domains = &dev_priv->power_domains;
4968 struct i915_power_well *well;
4970 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
4972 /* 1. Enable PCH Reset Handshake */
4973 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
4976 intel_combo_phy_init(dev_priv);
4979 * 4. Enable Power Well 1 (PG1).
4980 * The AUX IO power wells will be enabled on demand.
4982 mutex_lock(&power_domains->lock);
4983 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4984 intel_power_well_enable(dev_priv, well);
4985 mutex_unlock(&power_domains->lock);
4987 /* 5. Enable CD clock */
4988 intel_cdclk_init_hw(dev_priv);
4990 /* 6. Enable DBUF */
4991 gen9_dbuf_enable(dev_priv);
4993 if (resume && dev_priv->csr.dmc_payload)
4994 intel_csr_load_program(dev_priv);
4997 static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
4999 struct i915_power_domains *power_domains = &dev_priv->power_domains;
5000 struct i915_power_well *well;
5002 gen9_disable_dc_states(dev_priv);
5004 /* 1. Disable all display engine functions -> aready done */
5006 /* 2. Disable DBUF */
5007 gen9_dbuf_disable(dev_priv);
5009 /* 3. Disable CD clock */
5010 intel_cdclk_uninit_hw(dev_priv);
5013 * 4. Disable Power Well 1 (PG1).
5014 * The AUX IO power wells are toggled on demand, so they are already
5015 * disabled at this point.
5017 mutex_lock(&power_domains->lock);
5018 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5019 intel_power_well_disable(dev_priv, well);
5020 mutex_unlock(&power_domains->lock);
5022 usleep_range(10, 30); /* 10 us delay per Bspec */
5025 intel_combo_phy_uninit(dev_priv);
5028 struct buddy_page_mask {
5034 static const struct buddy_page_mask tgl_buddy_page_masks[] = {
5035 { .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0xE },
5036 { .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0xF },
5037 { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1C },
5038 { .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x1F },
5042 static const struct buddy_page_mask wa_1409767108_buddy_page_masks[] = {
5043 { .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1 },
5044 { .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0x1 },
5045 { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x3 },
5046 { .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x3 },
5050 static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv)
5052 enum intel_dram_type type = dev_priv->dram_info.type;
5053 u8 num_channels = dev_priv->dram_info.num_channels;
5054 const struct buddy_page_mask *table;
5057 if (IS_TGL_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_B0))
5058 /* Wa_1409767108: tgl */
5059 table = wa_1409767108_buddy_page_masks;
5061 table = tgl_buddy_page_masks;
5063 for (i = 0; table[i].page_mask != 0; i++)
5064 if (table[i].num_channels == num_channels &&
5065 table[i].type == type)
5068 if (table[i].page_mask == 0) {
5069 drm_dbg(&dev_priv->drm,
5070 "Unknown memory configuration; disabling address buddy logic.\n");
5071 intel_de_write(dev_priv, BW_BUDDY1_CTL, BW_BUDDY_DISABLE);
5072 intel_de_write(dev_priv, BW_BUDDY2_CTL, BW_BUDDY_DISABLE);
5074 intel_de_write(dev_priv, BW_BUDDY1_PAGE_MASK,
5075 table[i].page_mask);
5076 intel_de_write(dev_priv, BW_BUDDY2_PAGE_MASK,
5077 table[i].page_mask);
5079 /* Wa_22010178259:tgl */
5080 intel_de_rmw(dev_priv, BW_BUDDY1_CTL,
5081 BW_BUDDY_TLB_REQ_TIMER_MASK,
5082 REG_FIELD_PREP(BW_BUDDY_TLB_REQ_TIMER_MASK, 0x8));
5083 intel_de_rmw(dev_priv, BW_BUDDY2_CTL,
5084 BW_BUDDY_TLB_REQ_TIMER_MASK,
5085 REG_FIELD_PREP(BW_BUDDY_TLB_REQ_TIMER_MASK, 0x8));
5089 static void icl_display_core_init(struct drm_i915_private *dev_priv,
5092 struct i915_power_domains *power_domains = &dev_priv->power_domains;
5093 struct i915_power_well *well;
5095 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
5097 /* 1. Enable PCH reset handshake. */
5098 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
5100 /* 2. Initialize all combo phys */
5101 intel_combo_phy_init(dev_priv);
5104 * 3. Enable Power Well 1 (PG1).
5105 * The AUX IO power wells will be enabled on demand.
5107 mutex_lock(&power_domains->lock);
5108 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5109 intel_power_well_enable(dev_priv, well);
5110 mutex_unlock(&power_domains->lock);
5112 /* 4. Enable CDCLK. */
5113 intel_cdclk_init_hw(dev_priv);
5115 /* 5. Enable DBUF. */
5116 icl_dbuf_enable(dev_priv);
5118 /* 6. Setup MBUS. */
5119 icl_mbus_init(dev_priv);
5121 /* 7. Program arbiter BW_BUDDY registers */
5122 if (INTEL_GEN(dev_priv) >= 12)
5123 tgl_bw_buddy_init(dev_priv);
5125 if (resume && dev_priv->csr.dmc_payload)
5126 intel_csr_load_program(dev_priv);
5129 static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
5131 struct i915_power_domains *power_domains = &dev_priv->power_domains;
5132 struct i915_power_well *well;
5134 gen9_disable_dc_states(dev_priv);
5136 /* 1. Disable all display engine functions -> aready done */
5138 /* 2. Disable DBUF */
5139 icl_dbuf_disable(dev_priv);
5141 /* 3. Disable CD clock */
5142 intel_cdclk_uninit_hw(dev_priv);
5145 * 4. Disable Power Well 1 (PG1).
5146 * The AUX IO power wells are toggled on demand, so they are already
5147 * disabled at this point.
5149 mutex_lock(&power_domains->lock);
5150 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5151 intel_power_well_disable(dev_priv, well);
5152 mutex_unlock(&power_domains->lock);
5155 intel_combo_phy_uninit(dev_priv);
5158 static void chv_phy_control_init(struct drm_i915_private *dev_priv)
5160 struct i915_power_well *cmn_bc =
5161 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
5162 struct i915_power_well *cmn_d =
5163 lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
5166 * DISPLAY_PHY_CONTROL can get corrupted if read. As a
5167 * workaround never ever read DISPLAY_PHY_CONTROL, and
5168 * instead maintain a shadow copy ourselves. Use the actual
5169 * power well state and lane status to reconstruct the
5170 * expected initial value.
5172 dev_priv->chv_phy_control =
5173 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
5174 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
5175 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
5176 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
5177 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
5180 * If all lanes are disabled we leave the override disabled
5181 * with all power down bits cleared to match the state we
5182 * would use after disabling the port. Otherwise enable the
5183 * override and set the lane powerdown bits accding to the
5184 * current lane status.
5186 if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
5187 u32 status = intel_de_read(dev_priv, DPLL(PIPE_A));
5190 mask = status & DPLL_PORTB_READY_MASK;
5194 dev_priv->chv_phy_control |=
5195 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
5197 dev_priv->chv_phy_control |=
5198 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
5200 mask = (status & DPLL_PORTC_READY_MASK) >> 4;
5204 dev_priv->chv_phy_control |=
5205 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
5207 dev_priv->chv_phy_control |=
5208 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
5210 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
5212 dev_priv->chv_phy_assert[DPIO_PHY0] = false;
5214 dev_priv->chv_phy_assert[DPIO_PHY0] = true;
5217 if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
5218 u32 status = intel_de_read(dev_priv, DPIO_PHY_STATUS);
5221 mask = status & DPLL_PORTD_READY_MASK;
5226 dev_priv->chv_phy_control |=
5227 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
5229 dev_priv->chv_phy_control |=
5230 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
5232 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
5234 dev_priv->chv_phy_assert[DPIO_PHY1] = false;
5236 dev_priv->chv_phy_assert[DPIO_PHY1] = true;
5239 drm_dbg_kms(&dev_priv->drm, "Initial PHY_CONTROL=0x%08x\n",
5240 dev_priv->chv_phy_control);
5242 /* Defer application of initial phy_control to enabling the powerwell */
5245 static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
5247 struct i915_power_well *cmn =
5248 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
5249 struct i915_power_well *disp2d =
5250 lookup_power_well(dev_priv, VLV_DISP_PW_DISP2D);
5252 /* If the display might be already active skip this */
5253 if (cmn->desc->ops->is_enabled(dev_priv, cmn) &&
5254 disp2d->desc->ops->is_enabled(dev_priv, disp2d) &&
5255 intel_de_read(dev_priv, DPIO_CTL) & DPIO_CMNRST)
5258 drm_dbg_kms(&dev_priv->drm, "toggling display PHY side reset\n");
5260 /* cmnlane needs DPLL registers */
5261 disp2d->desc->ops->enable(dev_priv, disp2d);
5264 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
5265 * Need to assert and de-assert PHY SB reset by gating the
5266 * common lane power, then un-gating it.
5267 * Simply ungating isn't enough to reset the PHY enough to get
5268 * ports and lanes running.
5270 cmn->desc->ops->disable(dev_priv, cmn);
5273 static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0)
5277 vlv_punit_get(dev_priv);
5278 ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE;
5279 vlv_punit_put(dev_priv);
5284 static void assert_ved_power_gated(struct drm_i915_private *dev_priv)
5286 drm_WARN(&dev_priv->drm,
5287 !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0),
5288 "VED not power gated\n");
5291 static void assert_isp_power_gated(struct drm_i915_private *dev_priv)
5293 static const struct pci_device_id isp_ids[] = {
5294 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38)},
5295 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x22b8)},
5299 drm_WARN(&dev_priv->drm, !pci_dev_present(isp_ids) &&
5300 !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0),
5301 "ISP not power gated\n");
5304 static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
5307 * intel_power_domains_init_hw - initialize hardware power domain state
5308 * @i915: i915 device instance
5309 * @resume: Called from resume code paths or not
5311 * This function initializes the hardware power domain state and enables all
5312 * power wells belonging to the INIT power domain. Power wells in other
5313 * domains (and not in the INIT domain) are referenced or disabled by
5314 * intel_modeset_readout_hw_state(). After that the reference count of each
5315 * power well must match its HW enabled state, see
5316 * intel_power_domains_verify_state().
5318 * It will return with power domains disabled (to be enabled later by
5319 * intel_power_domains_enable()) and must be paired with
5320 * intel_power_domains_driver_remove().
5322 void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
5324 struct i915_power_domains *power_domains = &i915->power_domains;
5326 power_domains->initializing = true;
5328 if (INTEL_GEN(i915) >= 11) {
5329 icl_display_core_init(i915, resume);
5330 } else if (IS_CANNONLAKE(i915)) {
5331 cnl_display_core_init(i915, resume);
5332 } else if (IS_GEN9_BC(i915)) {
5333 skl_display_core_init(i915, resume);
5334 } else if (IS_GEN9_LP(i915)) {
5335 bxt_display_core_init(i915, resume);
5336 } else if (IS_CHERRYVIEW(i915)) {
5337 mutex_lock(&power_domains->lock);
5338 chv_phy_control_init(i915);
5339 mutex_unlock(&power_domains->lock);
5340 assert_isp_power_gated(i915);
5341 } else if (IS_VALLEYVIEW(i915)) {
5342 mutex_lock(&power_domains->lock);
5343 vlv_cmnlane_wa(i915);
5344 mutex_unlock(&power_domains->lock);
5345 assert_ved_power_gated(i915);
5346 assert_isp_power_gated(i915);
5347 } else if (IS_BROADWELL(i915) || IS_HASWELL(i915)) {
5348 hsw_assert_cdclk(i915);
5349 intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
5350 } else if (IS_IVYBRIDGE(i915)) {
5351 intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
5355 * Keep all power wells enabled for any dependent HW access during
5356 * initialization and to make sure we keep BIOS enabled display HW
5357 * resources powered until display HW readout is complete. We drop
5358 * this reference in intel_power_domains_enable().
5360 power_domains->wakeref =
5361 intel_display_power_get(i915, POWER_DOMAIN_INIT);
5363 /* Disable power support if the user asked so. */
5364 if (!i915_modparams.disable_power_well)
5365 intel_display_power_get(i915, POWER_DOMAIN_INIT);
5366 intel_power_domains_sync_hw(i915);
5368 power_domains->initializing = false;
5372 * intel_power_domains_driver_remove - deinitialize hw power domain state
5373 * @i915: i915 device instance
5375 * De-initializes the display power domain HW state. It also ensures that the
5376 * device stays powered up so that the driver can be reloaded.
5378 * It must be called with power domains already disabled (after a call to
5379 * intel_power_domains_disable()) and must be paired with
5380 * intel_power_domains_init_hw().
5382 void intel_power_domains_driver_remove(struct drm_i915_private *i915)
5384 intel_wakeref_t wakeref __maybe_unused =
5385 fetch_and_zero(&i915->power_domains.wakeref);
5387 /* Remove the refcount we took to keep power well support disabled. */
5388 if (!i915_modparams.disable_power_well)
5389 intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
5391 intel_display_power_flush_work_sync(i915);
5393 intel_power_domains_verify_state(i915);
5395 /* Keep the power well enabled, but cancel its rpm wakeref. */
5396 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
5400 * intel_power_domains_enable - enable toggling of display power wells
5401 * @i915: i915 device instance
5403 * Enable the ondemand enabling/disabling of the display power wells. Note that
5404 * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled
5405 * only at specific points of the display modeset sequence, thus they are not
5406 * affected by the intel_power_domains_enable()/disable() calls. The purpose
5407 * of these function is to keep the rest of power wells enabled until the end
5408 * of display HW readout (which will acquire the power references reflecting
5409 * the current HW state).
5411 void intel_power_domains_enable(struct drm_i915_private *i915)
5413 intel_wakeref_t wakeref __maybe_unused =
5414 fetch_and_zero(&i915->power_domains.wakeref);
5416 intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
5417 intel_power_domains_verify_state(i915);
5421 * intel_power_domains_disable - disable toggling of display power wells
5422 * @i915: i915 device instance
5424 * Disable the ondemand enabling/disabling of the display power wells. See
5425 * intel_power_domains_enable() for which power wells this call controls.
5427 void intel_power_domains_disable(struct drm_i915_private *i915)
5429 struct i915_power_domains *power_domains = &i915->power_domains;
5431 drm_WARN_ON(&i915->drm, power_domains->wakeref);
5432 power_domains->wakeref =
5433 intel_display_power_get(i915, POWER_DOMAIN_INIT);
5435 intel_power_domains_verify_state(i915);
5439 * intel_power_domains_suspend - suspend power domain state
5440 * @i915: i915 device instance
5441 * @suspend_mode: specifies the target suspend state (idle, mem, hibernation)
5443 * This function prepares the hardware power domain state before entering
5446 * It must be called with power domains already disabled (after a call to
5447 * intel_power_domains_disable()) and paired with intel_power_domains_resume().
5449 void intel_power_domains_suspend(struct drm_i915_private *i915,
5450 enum i915_drm_suspend_mode suspend_mode)
5452 struct i915_power_domains *power_domains = &i915->power_domains;
5453 intel_wakeref_t wakeref __maybe_unused =
5454 fetch_and_zero(&power_domains->wakeref);
5456 intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
5459 * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9
5460 * support don't manually deinit the power domains. This also means the
5461 * CSR/DMC firmware will stay active, it will power down any HW
5462 * resources as required and also enable deeper system power states
5463 * that would be blocked if the firmware was inactive.
5465 if (!(i915->csr.allowed_dc_mask & DC_STATE_EN_DC9) &&
5466 suspend_mode == I915_DRM_SUSPEND_IDLE &&
5467 i915->csr.dmc_payload) {
5468 intel_display_power_flush_work(i915);
5469 intel_power_domains_verify_state(i915);
5474 * Even if power well support was disabled we still want to disable
5475 * power wells if power domains must be deinitialized for suspend.
5477 if (!i915_modparams.disable_power_well)
5478 intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
5480 intel_display_power_flush_work(i915);
5481 intel_power_domains_verify_state(i915);
5483 if (INTEL_GEN(i915) >= 11)
5484 icl_display_core_uninit(i915);
5485 else if (IS_CANNONLAKE(i915))
5486 cnl_display_core_uninit(i915);
5487 else if (IS_GEN9_BC(i915))
5488 skl_display_core_uninit(i915);
5489 else if (IS_GEN9_LP(i915))
5490 bxt_display_core_uninit(i915);
5492 power_domains->display_core_suspended = true;
5496 * intel_power_domains_resume - resume power domain state
5497 * @i915: i915 device instance
5499 * This function resume the hardware power domain state during system resume.
5501 * It will return with power domain support disabled (to be enabled later by
5502 * intel_power_domains_enable()) and must be paired with
5503 * intel_power_domains_suspend().
5505 void intel_power_domains_resume(struct drm_i915_private *i915)
5507 struct i915_power_domains *power_domains = &i915->power_domains;
5509 if (power_domains->display_core_suspended) {
5510 intel_power_domains_init_hw(i915, true);
5511 power_domains->display_core_suspended = false;
5513 drm_WARN_ON(&i915->drm, power_domains->wakeref);
5514 power_domains->wakeref =
5515 intel_display_power_get(i915, POWER_DOMAIN_INIT);
5518 intel_power_domains_verify_state(i915);
5521 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
5523 static void intel_power_domains_dump_info(struct drm_i915_private *i915)
5525 struct i915_power_domains *power_domains = &i915->power_domains;
5526 struct i915_power_well *power_well;
5528 for_each_power_well(i915, power_well) {
5529 enum intel_display_power_domain domain;
5531 drm_dbg(&i915->drm, "%-25s %d\n",
5532 power_well->desc->name, power_well->count);
5534 for_each_power_domain(domain, power_well->desc->domains)
5535 drm_dbg(&i915->drm, " %-23s %d\n",
5536 intel_display_power_domain_str(domain),
5537 power_domains->domain_use_count[domain]);
5542 * intel_power_domains_verify_state - verify the HW/SW state for all power wells
5543 * @i915: i915 device instance
5545 * Verify if the reference count of each power well matches its HW enabled
5546 * state and the total refcount of the domains it belongs to. This must be
5547 * called after modeset HW state sanitization, which is responsible for
5548 * acquiring reference counts for any power wells in use and disabling the
5549 * ones left on by BIOS but not required by any active output.
5551 static void intel_power_domains_verify_state(struct drm_i915_private *i915)
5553 struct i915_power_domains *power_domains = &i915->power_domains;
5554 struct i915_power_well *power_well;
5555 bool dump_domain_info;
5557 mutex_lock(&power_domains->lock);
5559 verify_async_put_domains_state(power_domains);
5561 dump_domain_info = false;
5562 for_each_power_well(i915, power_well) {
5563 enum intel_display_power_domain domain;
5567 enabled = power_well->desc->ops->is_enabled(i915, power_well);
5568 if ((power_well->count || power_well->desc->always_on) !=
5571 "power well %s state mismatch (refcount %d/enabled %d)",
5572 power_well->desc->name,
5573 power_well->count, enabled);
5576 for_each_power_domain(domain, power_well->desc->domains)
5577 domains_count += power_domains->domain_use_count[domain];
5579 if (power_well->count != domains_count) {
5581 "power well %s refcount/domain refcount mismatch "
5582 "(refcount %d/domains refcount %d)\n",
5583 power_well->desc->name, power_well->count,
5585 dump_domain_info = true;
5589 if (dump_domain_info) {
5593 intel_power_domains_dump_info(i915);
5598 mutex_unlock(&power_domains->lock);
5603 static void intel_power_domains_verify_state(struct drm_i915_private *i915)
5609 void intel_display_power_suspend_late(struct drm_i915_private *i915)
5611 if (INTEL_GEN(i915) >= 11 || IS_GEN9_LP(i915))
5612 bxt_enable_dc9(i915);
5613 else if (IS_HASWELL(i915) || IS_BROADWELL(i915))
5614 hsw_enable_pc8(i915);
5617 void intel_display_power_resume_early(struct drm_i915_private *i915)
5619 if (INTEL_GEN(i915) >= 11 || IS_GEN9_LP(i915)) {
5620 gen9_sanitize_dc_state(i915);
5621 bxt_disable_dc9(i915);
5622 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
5623 hsw_disable_pc8(i915);
5627 void intel_display_power_suspend(struct drm_i915_private *i915)
5629 if (INTEL_GEN(i915) >= 11) {
5630 icl_display_core_uninit(i915);
5631 bxt_enable_dc9(i915);
5632 } else if (IS_GEN9_LP(i915)) {
5633 bxt_display_core_uninit(i915);
5634 bxt_enable_dc9(i915);
5635 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
5636 hsw_enable_pc8(i915);
5640 void intel_display_power_resume(struct drm_i915_private *i915)
5642 if (INTEL_GEN(i915) >= 11) {
5643 bxt_disable_dc9(i915);
5644 icl_display_core_init(i915, true);
5645 if (i915->csr.dmc_payload) {
5646 if (i915->csr.allowed_dc_mask &
5647 DC_STATE_EN_UPTO_DC6)
5648 skl_enable_dc6(i915);
5649 else if (i915->csr.allowed_dc_mask &
5650 DC_STATE_EN_UPTO_DC5)
5651 gen9_enable_dc5(i915);
5653 } else if (IS_GEN9_LP(i915)) {
5654 bxt_disable_dc9(i915);
5655 bxt_display_core_init(i915, true);
5656 if (i915->csr.dmc_payload &&
5657 (i915->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
5658 gen9_enable_dc5(i915);
5659 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
5660 hsw_disable_pc8(i915);