1 /* SPDX-License-Identifier: MIT */
3 * Copyright © 2019 Intel Corporation
6 #include "display/intel_crt.h"
7 #include "display/intel_dp.h"
11 #include "intel_cdclk.h"
12 #include "intel_combo_phy.h"
13 #include "intel_csr.h"
14 #include "intel_display_power.h"
15 #include "intel_display_types.h"
16 #include "intel_dpio_phy.h"
17 #include "intel_hotplug.h"
19 #include "intel_sideband.h"
21 #include "intel_vga.h"
23 static const struct i915_power_well_ops icl_tc_phy_aux_power_well_ops;
25 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
26 enum i915_power_well_id power_well_id);
29 intel_display_power_domain_str(enum intel_display_power_domain domain)
32 case POWER_DOMAIN_DISPLAY_CORE:
33 return "DISPLAY_CORE";
34 case POWER_DOMAIN_PIPE_A:
36 case POWER_DOMAIN_PIPE_B:
38 case POWER_DOMAIN_PIPE_C:
40 case POWER_DOMAIN_PIPE_D:
42 case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
43 return "PIPE_A_PANEL_FITTER";
44 case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
45 return "PIPE_B_PANEL_FITTER";
46 case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
47 return "PIPE_C_PANEL_FITTER";
48 case POWER_DOMAIN_PIPE_D_PANEL_FITTER:
49 return "PIPE_D_PANEL_FITTER";
50 case POWER_DOMAIN_TRANSCODER_A:
51 return "TRANSCODER_A";
52 case POWER_DOMAIN_TRANSCODER_B:
53 return "TRANSCODER_B";
54 case POWER_DOMAIN_TRANSCODER_C:
55 return "TRANSCODER_C";
56 case POWER_DOMAIN_TRANSCODER_D:
57 return "TRANSCODER_D";
58 case POWER_DOMAIN_TRANSCODER_EDP:
59 return "TRANSCODER_EDP";
60 case POWER_DOMAIN_TRANSCODER_VDSC_PW2:
61 return "TRANSCODER_VDSC_PW2";
62 case POWER_DOMAIN_TRANSCODER_DSI_A:
63 return "TRANSCODER_DSI_A";
64 case POWER_DOMAIN_TRANSCODER_DSI_C:
65 return "TRANSCODER_DSI_C";
66 case POWER_DOMAIN_PORT_DDI_A_LANES:
67 return "PORT_DDI_A_LANES";
68 case POWER_DOMAIN_PORT_DDI_B_LANES:
69 return "PORT_DDI_B_LANES";
70 case POWER_DOMAIN_PORT_DDI_C_LANES:
71 return "PORT_DDI_C_LANES";
72 case POWER_DOMAIN_PORT_DDI_D_LANES:
73 return "PORT_DDI_D_LANES";
74 case POWER_DOMAIN_PORT_DDI_E_LANES:
75 return "PORT_DDI_E_LANES";
76 case POWER_DOMAIN_PORT_DDI_F_LANES:
77 return "PORT_DDI_F_LANES";
78 case POWER_DOMAIN_PORT_DDI_G_LANES:
79 return "PORT_DDI_G_LANES";
80 case POWER_DOMAIN_PORT_DDI_H_LANES:
81 return "PORT_DDI_H_LANES";
82 case POWER_DOMAIN_PORT_DDI_I_LANES:
83 return "PORT_DDI_I_LANES";
84 case POWER_DOMAIN_PORT_DDI_A_IO:
85 return "PORT_DDI_A_IO";
86 case POWER_DOMAIN_PORT_DDI_B_IO:
87 return "PORT_DDI_B_IO";
88 case POWER_DOMAIN_PORT_DDI_C_IO:
89 return "PORT_DDI_C_IO";
90 case POWER_DOMAIN_PORT_DDI_D_IO:
91 return "PORT_DDI_D_IO";
92 case POWER_DOMAIN_PORT_DDI_E_IO:
93 return "PORT_DDI_E_IO";
94 case POWER_DOMAIN_PORT_DDI_F_IO:
95 return "PORT_DDI_F_IO";
96 case POWER_DOMAIN_PORT_DDI_G_IO:
97 return "PORT_DDI_G_IO";
98 case POWER_DOMAIN_PORT_DDI_H_IO:
99 return "PORT_DDI_H_IO";
100 case POWER_DOMAIN_PORT_DDI_I_IO:
101 return "PORT_DDI_I_IO";
102 case POWER_DOMAIN_PORT_DSI:
104 case POWER_DOMAIN_PORT_CRT:
106 case POWER_DOMAIN_PORT_OTHER:
108 case POWER_DOMAIN_VGA:
110 case POWER_DOMAIN_AUDIO:
112 case POWER_DOMAIN_AUX_A:
114 case POWER_DOMAIN_AUX_B:
116 case POWER_DOMAIN_AUX_C:
118 case POWER_DOMAIN_AUX_D:
120 case POWER_DOMAIN_AUX_E:
122 case POWER_DOMAIN_AUX_F:
124 case POWER_DOMAIN_AUX_G:
126 case POWER_DOMAIN_AUX_H:
128 case POWER_DOMAIN_AUX_I:
130 case POWER_DOMAIN_AUX_IO_A:
132 case POWER_DOMAIN_AUX_C_TBT:
134 case POWER_DOMAIN_AUX_D_TBT:
136 case POWER_DOMAIN_AUX_E_TBT:
138 case POWER_DOMAIN_AUX_F_TBT:
140 case POWER_DOMAIN_AUX_G_TBT:
142 case POWER_DOMAIN_AUX_H_TBT:
144 case POWER_DOMAIN_AUX_I_TBT:
146 case POWER_DOMAIN_GMBUS:
148 case POWER_DOMAIN_INIT:
150 case POWER_DOMAIN_MODESET:
152 case POWER_DOMAIN_GT_IRQ:
154 case POWER_DOMAIN_DPLL_DC_OFF:
155 return "DPLL_DC_OFF";
156 case POWER_DOMAIN_TC_COLD_OFF:
157 return "TC_COLD_OFF";
159 MISSING_CASE(domain);
164 static void intel_power_well_enable(struct drm_i915_private *dev_priv,
165 struct i915_power_well *power_well)
167 drm_dbg_kms(&dev_priv->drm, "enabling %s\n", power_well->desc->name);
168 power_well->desc->ops->enable(dev_priv, power_well);
169 power_well->hw_enabled = true;
172 static void intel_power_well_disable(struct drm_i915_private *dev_priv,
173 struct i915_power_well *power_well)
175 drm_dbg_kms(&dev_priv->drm, "disabling %s\n", power_well->desc->name);
176 power_well->hw_enabled = false;
177 power_well->desc->ops->disable(dev_priv, power_well);
180 static void intel_power_well_get(struct drm_i915_private *dev_priv,
181 struct i915_power_well *power_well)
183 if (!power_well->count++)
184 intel_power_well_enable(dev_priv, power_well);
187 static void intel_power_well_put(struct drm_i915_private *dev_priv,
188 struct i915_power_well *power_well)
190 drm_WARN(&dev_priv->drm, !power_well->count,
191 "Use count on power well %s is already zero",
192 power_well->desc->name);
194 if (!--power_well->count)
195 intel_power_well_disable(dev_priv, power_well);
199 * __intel_display_power_is_enabled - unlocked check for a power domain
200 * @dev_priv: i915 device instance
201 * @domain: power domain to check
203 * This is the unlocked version of intel_display_power_is_enabled() and should
204 * only be used from error capture and recovery code where deadlocks are
208 * True when the power domain is enabled, false otherwise.
210 bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
211 enum intel_display_power_domain domain)
213 struct i915_power_well *power_well;
216 if (dev_priv->runtime_pm.suspended)
221 for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) {
222 if (power_well->desc->always_on)
225 if (!power_well->hw_enabled) {
235 * intel_display_power_is_enabled - check for a power domain
236 * @dev_priv: i915 device instance
237 * @domain: power domain to check
239 * This function can be used to check the hw power domain state. It is mostly
240 * used in hardware state readout functions. Everywhere else code should rely
241 * upon explicit power domain reference counting to ensure that the hardware
242 * block is powered up before accessing it.
244 * Callers must hold the relevant modesetting locks to ensure that concurrent
245 * threads can't disable the power well while the caller tries to read a few
249 * True when the power domain is enabled, false otherwise.
251 bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
252 enum intel_display_power_domain domain)
254 struct i915_power_domains *power_domains;
257 power_domains = &dev_priv->power_domains;
259 mutex_lock(&power_domains->lock);
260 ret = __intel_display_power_is_enabled(dev_priv, domain);
261 mutex_unlock(&power_domains->lock);
267 * Starting with Haswell, we have a "Power Down Well" that can be turned off
268 * when not needed anymore. We have 4 registers that can request the power well
269 * to be enabled, and it will only be disabled if none of the registers is
270 * requesting it to be enabled.
272 static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv,
273 u8 irq_pipe_mask, bool has_vga)
276 intel_vga_reset_io_mem(dev_priv);
279 gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask);
282 static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv,
286 gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask);
289 #define ICL_AUX_PW_TO_CH(pw_idx) \
290 ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A)
292 #define ICL_TBT_AUX_PW_TO_CH(pw_idx) \
293 ((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C)
295 static enum aux_ch icl_tc_phy_aux_ch(struct drm_i915_private *dev_priv,
296 struct i915_power_well *power_well)
298 int pw_idx = power_well->desc->hsw.idx;
300 return power_well->desc->hsw.is_tc_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) :
301 ICL_AUX_PW_TO_CH(pw_idx);
304 static struct intel_digital_port *
305 aux_ch_to_digital_port(struct drm_i915_private *dev_priv,
308 struct intel_digital_port *dig_port = NULL;
309 struct intel_encoder *encoder;
311 for_each_intel_encoder(&dev_priv->drm, encoder) {
312 /* We'll check the MST primary port */
313 if (encoder->type == INTEL_OUTPUT_DP_MST)
316 dig_port = enc_to_dig_port(encoder);
320 if (dig_port->aux_ch != aux_ch) {
331 static bool tc_phy_aux_timeout_expected(struct drm_i915_private *dev_priv,
332 struct i915_power_well *power_well)
334 /* An AUX timeout is expected if the TBT DP tunnel is down. */
335 if (power_well->desc->hsw.is_tc_tbt)
339 * An AUX timeout is expected because we enable TC legacy port aux
340 * to hold port out of TC cold
342 if (INTEL_GEN(dev_priv) == 11 &&
343 power_well->desc->ops == &icl_tc_phy_aux_power_well_ops) {
344 enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
345 struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch);
347 return dig_port->tc_legacy_port;
353 static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
354 struct i915_power_well *power_well)
356 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
357 int pw_idx = power_well->desc->hsw.idx;
359 /* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
360 if (intel_de_wait_for_set(dev_priv, regs->driver,
361 HSW_PWR_WELL_CTL_STATE(pw_idx), 1)) {
362 drm_dbg_kms(&dev_priv->drm, "%s power well enable timeout\n",
363 power_well->desc->name);
365 drm_WARN_ON(&dev_priv->drm,
366 !tc_phy_aux_timeout_expected(dev_priv, power_well));
371 static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv,
372 const struct i915_power_well_regs *regs,
375 u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
378 ret = intel_de_read(dev_priv, regs->bios) & req_mask ? 1 : 0;
379 ret |= intel_de_read(dev_priv, regs->driver) & req_mask ? 2 : 0;
381 ret |= intel_de_read(dev_priv, regs->kvmr) & req_mask ? 4 : 0;
382 ret |= intel_de_read(dev_priv, regs->debug) & req_mask ? 8 : 0;
387 static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv,
388 struct i915_power_well *power_well)
390 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
391 int pw_idx = power_well->desc->hsw.idx;
396 * Bspec doesn't require waiting for PWs to get disabled, but still do
397 * this for paranoia. The known cases where a PW will be forced on:
398 * - a KVMR request on any power well via the KVMR request register
399 * - a DMC request on PW1 and MISC_IO power wells via the BIOS and
400 * DEBUG request registers
401 * Skip the wait in case any of the request bits are set and print a
402 * diagnostic message.
404 wait_for((disabled = !(intel_de_read(dev_priv, regs->driver) &
405 HSW_PWR_WELL_CTL_STATE(pw_idx))) ||
406 (reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1);
410 drm_dbg_kms(&dev_priv->drm,
411 "%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n",
412 power_well->desc->name,
413 !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8));
416 static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv,
417 enum skl_power_gate pg)
419 /* Timeout 5us for PG#0, for other PGs 1us */
420 drm_WARN_ON(&dev_priv->drm,
421 intel_de_wait_for_set(dev_priv, SKL_FUSE_STATUS,
422 SKL_FUSE_PG_DIST_STATUS(pg), 1));
425 static void hsw_power_well_enable_prepare(struct drm_i915_private *dev_priv,
426 struct i915_power_well *power_well)
428 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
429 int pw_idx = power_well->desc->hsw.idx;
432 if (power_well->desc->hsw.has_fuses) {
433 enum skl_power_gate pg;
435 pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
436 SKL_PW_CTL_IDX_TO_PG(pw_idx);
438 * For PW1 we have to wait both for the PW0/PG0 fuse state
439 * before enabling the power well and PW1/PG1's own fuse
440 * state after the enabling. For all other power wells with
441 * fuses we only have to wait for that PW/PG's fuse state
442 * after the enabling.
445 gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0);
448 val = intel_de_read(dev_priv, regs->driver);
449 intel_de_write(dev_priv, regs->driver,
450 val | HSW_PWR_WELL_CTL_REQ(pw_idx));
453 static void hsw_power_well_enable_complete(struct drm_i915_private *dev_priv,
454 struct i915_power_well *power_well)
456 int pw_idx = power_well->desc->hsw.idx;
458 hsw_wait_for_power_well_enable(dev_priv, power_well);
460 /* Display WA #1178: cnl */
461 if (IS_CANNONLAKE(dev_priv) &&
462 pw_idx >= GLK_PW_CTL_IDX_AUX_B &&
463 pw_idx <= CNL_PW_CTL_IDX_AUX_F) {
466 val = intel_de_read(dev_priv, CNL_AUX_ANAOVRD1(pw_idx));
467 val |= CNL_AUX_ANAOVRD1_ENABLE | CNL_AUX_ANAOVRD1_LDO_BYPASS;
468 intel_de_write(dev_priv, CNL_AUX_ANAOVRD1(pw_idx), val);
471 if (power_well->desc->hsw.has_fuses) {
472 enum skl_power_gate pg;
474 pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
475 SKL_PW_CTL_IDX_TO_PG(pw_idx);
476 gen9_wait_for_power_well_fuses(dev_priv, pg);
479 hsw_power_well_post_enable(dev_priv,
480 power_well->desc->hsw.irq_pipe_mask,
481 power_well->desc->hsw.has_vga);
484 static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
485 struct i915_power_well *power_well)
487 hsw_power_well_enable_prepare(dev_priv, power_well);
488 hsw_power_well_enable_complete(dev_priv, power_well);
491 static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
492 struct i915_power_well *power_well)
494 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
495 int pw_idx = power_well->desc->hsw.idx;
498 hsw_power_well_pre_disable(dev_priv,
499 power_well->desc->hsw.irq_pipe_mask);
501 val = intel_de_read(dev_priv, regs->driver);
502 intel_de_write(dev_priv, regs->driver,
503 val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
504 hsw_wait_for_power_well_disable(dev_priv, power_well);
507 #define ICL_AUX_PW_TO_PHY(pw_idx) ((pw_idx) - ICL_PW_CTL_IDX_AUX_A)
510 icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
511 struct i915_power_well *power_well)
513 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
514 int pw_idx = power_well->desc->hsw.idx;
515 enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx);
518 drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
520 val = intel_de_read(dev_priv, regs->driver);
521 intel_de_write(dev_priv, regs->driver,
522 val | HSW_PWR_WELL_CTL_REQ(pw_idx));
524 if (INTEL_GEN(dev_priv) < 12) {
525 val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy));
526 intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy),
527 val | ICL_LANE_ENABLE_AUX);
530 hsw_wait_for_power_well_enable(dev_priv, power_well);
532 /* Display WA #1178: icl */
533 if (pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B &&
534 !intel_bios_is_port_edp(dev_priv, (enum port)phy)) {
535 val = intel_de_read(dev_priv, ICL_AUX_ANAOVRD1(pw_idx));
536 val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS;
537 intel_de_write(dev_priv, ICL_AUX_ANAOVRD1(pw_idx), val);
542 icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
543 struct i915_power_well *power_well)
545 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
546 int pw_idx = power_well->desc->hsw.idx;
547 enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx);
550 drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
552 val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy));
553 intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy),
554 val & ~ICL_LANE_ENABLE_AUX);
556 val = intel_de_read(dev_priv, regs->driver);
557 intel_de_write(dev_priv, regs->driver,
558 val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
560 hsw_wait_for_power_well_disable(dev_priv, power_well);
563 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
565 static u64 async_put_domains_mask(struct i915_power_domains *power_domains);
567 static int power_well_async_ref_count(struct drm_i915_private *dev_priv,
568 struct i915_power_well *power_well)
570 int refs = hweight64(power_well->desc->domains &
571 async_put_domains_mask(&dev_priv->power_domains));
573 drm_WARN_ON(&dev_priv->drm, refs > power_well->count);
578 static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
579 struct i915_power_well *power_well,
580 struct intel_digital_port *dig_port)
582 /* Bypass the check if all references are released asynchronously */
583 if (power_well_async_ref_count(dev_priv, power_well) ==
587 if (drm_WARN_ON(&dev_priv->drm, !dig_port))
590 if (INTEL_GEN(dev_priv) == 11 && dig_port->tc_legacy_port)
593 drm_WARN_ON(&dev_priv->drm, !intel_tc_port_ref_held(dig_port));
598 static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
599 struct i915_power_well *power_well,
600 struct intel_digital_port *dig_port)
606 #define TGL_AUX_PW_TO_TC_PORT(pw_idx) ((pw_idx) - TGL_PW_CTL_IDX_AUX_TC1)
608 static void icl_tc_cold_exit(struct drm_i915_private *i915)
613 ret = sandybridge_pcode_write_timeout(i915,
614 ICL_PCODE_EXIT_TCCOLD,
616 if (ret != -EAGAIN || ++tries == 3)
621 /* Spec states that TC cold exit can take up to 1ms to complete */
625 /* TODO: turn failure into a error as soon i915 CI updates ICL IFWI */
626 drm_dbg_kms(&i915->drm, "TC cold block %s\n", ret ? "failed" :
631 icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
632 struct i915_power_well *power_well)
634 enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
635 struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch);
638 icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port);
640 val = intel_de_read(dev_priv, DP_AUX_CH_CTL(aux_ch));
641 val &= ~DP_AUX_CH_CTL_TBT_IO;
642 if (power_well->desc->hsw.is_tc_tbt)
643 val |= DP_AUX_CH_CTL_TBT_IO;
644 intel_de_write(dev_priv, DP_AUX_CH_CTL(aux_ch), val);
646 hsw_power_well_enable_prepare(dev_priv, power_well);
648 if (INTEL_GEN(dev_priv) == 11 && dig_port->tc_legacy_port)
649 icl_tc_cold_exit(dev_priv);
651 hsw_power_well_enable_complete(dev_priv, power_well);
653 if (INTEL_GEN(dev_priv) >= 12 && !power_well->desc->hsw.is_tc_tbt) {
654 enum tc_port tc_port;
656 tc_port = TGL_AUX_PW_TO_TC_PORT(power_well->desc->hsw.idx);
657 intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
658 HIP_INDEX_VAL(tc_port, 0x2));
660 if (intel_de_wait_for_set(dev_priv, DKL_CMN_UC_DW_27(tc_port),
661 DKL_CMN_UC_DW27_UC_HEALTH, 1))
662 drm_warn(&dev_priv->drm,
663 "Timeout waiting TC uC health\n");
668 icl_tc_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
669 struct i915_power_well *power_well)
671 enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
672 struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch);
674 icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port);
676 hsw_power_well_disable(dev_priv, power_well);
680 icl_aux_power_well_enable(struct drm_i915_private *dev_priv,
681 struct i915_power_well *power_well)
683 int pw_idx = power_well->desc->hsw.idx;
684 enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx); /* non-TBT only */
685 bool is_tbt = power_well->desc->hsw.is_tc_tbt;
687 if (is_tbt || intel_phy_is_tc(dev_priv, phy))
688 return icl_tc_phy_aux_power_well_enable(dev_priv, power_well);
689 else if (IS_ICELAKE(dev_priv))
690 return icl_combo_phy_aux_power_well_enable(dev_priv,
693 return hsw_power_well_enable(dev_priv, power_well);
697 icl_aux_power_well_disable(struct drm_i915_private *dev_priv,
698 struct i915_power_well *power_well)
700 int pw_idx = power_well->desc->hsw.idx;
701 enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx); /* non-TBT only */
702 bool is_tbt = power_well->desc->hsw.is_tc_tbt;
704 if (is_tbt || intel_phy_is_tc(dev_priv, phy))
705 return icl_tc_phy_aux_power_well_disable(dev_priv, power_well);
706 else if (IS_ICELAKE(dev_priv))
707 return icl_combo_phy_aux_power_well_disable(dev_priv,
710 return hsw_power_well_disable(dev_priv, power_well);
714 * We should only use the power well if we explicitly asked the hardware to
715 * enable it, so check if it's enabled and also check if we've requested it to
718 static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
719 struct i915_power_well *power_well)
721 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
722 enum i915_power_well_id id = power_well->desc->id;
723 int pw_idx = power_well->desc->hsw.idx;
724 u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) |
725 HSW_PWR_WELL_CTL_STATE(pw_idx);
728 val = intel_de_read(dev_priv, regs->driver);
731 * On GEN9 big core due to a DMC bug the driver's request bits for PW1
732 * and the MISC_IO PW will be not restored, so check instead for the
733 * BIOS's own request bits, which are forced-on for these power wells
734 * when exiting DC5/6.
736 if (IS_GEN(dev_priv, 9) && !IS_GEN9_LP(dev_priv) &&
737 (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO))
738 val |= intel_de_read(dev_priv, regs->bios);
740 return (val & mask) == mask;
743 static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
745 drm_WARN_ONCE(&dev_priv->drm,
746 (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC9),
747 "DC9 already programmed to be enabled.\n");
748 drm_WARN_ONCE(&dev_priv->drm,
749 intel_de_read(dev_priv, DC_STATE_EN) &
750 DC_STATE_EN_UPTO_DC5,
751 "DC5 still not disabled to enable DC9.\n");
752 drm_WARN_ONCE(&dev_priv->drm,
753 intel_de_read(dev_priv, HSW_PWR_WELL_CTL2) &
754 HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2),
755 "Power well 2 on.\n");
756 drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv),
757 "Interrupts not disabled yet.\n");
760 * TODO: check for the following to verify the conditions to enter DC9
761 * state are satisfied:
762 * 1] Check relevant display engine registers to verify if mode set
763 * disable sequence was followed.
764 * 2] Check if display uninitialize sequence is initialized.
768 static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
770 drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv),
771 "Interrupts not disabled yet.\n");
772 drm_WARN_ONCE(&dev_priv->drm,
773 intel_de_read(dev_priv, DC_STATE_EN) &
774 DC_STATE_EN_UPTO_DC5,
775 "DC5 still not disabled.\n");
778 * TODO: check for the following to verify DC9 state was indeed
779 * entered before programming to disable it:
780 * 1] Check relevant display engine registers to verify if mode
781 * set disable sequence was followed.
782 * 2] Check if display uninitialize sequence is initialized.
786 static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
793 intel_de_write(dev_priv, DC_STATE_EN, state);
795 /* It has been observed that disabling the dc6 state sometimes
796 * doesn't stick and dmc keeps returning old value. Make sure
797 * the write really sticks enough times and also force rewrite until
798 * we are confident that state is exactly what we want.
801 v = intel_de_read(dev_priv, DC_STATE_EN);
804 intel_de_write(dev_priv, DC_STATE_EN, state);
807 } else if (rereads++ > 5) {
811 } while (rewrites < 100);
814 drm_err(&dev_priv->drm,
815 "Writing dc state to 0x%x failed, now 0x%x\n",
818 /* Most of the times we need one retry, avoid spam */
820 drm_dbg_kms(&dev_priv->drm,
821 "Rewrote dc state to 0x%x %d times\n",
825 static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
829 mask = DC_STATE_EN_UPTO_DC5;
831 if (INTEL_GEN(dev_priv) >= 12)
832 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6
834 else if (IS_GEN(dev_priv, 11))
835 mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9;
836 else if (IS_GEN9_LP(dev_priv))
837 mask |= DC_STATE_EN_DC9;
839 mask |= DC_STATE_EN_UPTO_DC6;
844 static void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
848 val = intel_de_read(dev_priv, DC_STATE_EN) & gen9_dc_mask(dev_priv);
850 drm_dbg_kms(&dev_priv->drm,
851 "Resetting DC state tracking from %02x to %02x\n",
852 dev_priv->csr.dc_state, val);
853 dev_priv->csr.dc_state = val;
857 * gen9_set_dc_state - set target display C power state
858 * @dev_priv: i915 device instance
859 * @state: target DC power state
861 * - DC_STATE_EN_UPTO_DC5
862 * - DC_STATE_EN_UPTO_DC6
865 * Signal to DMC firmware/HW the target DC power state passed in @state.
866 * DMC/HW can turn off individual display clocks and power rails when entering
867 * a deeper DC power state (higher in number) and turns these back when exiting
868 * that state to a shallower power state (lower in number). The HW will decide
869 * when to actually enter a given state on an on-demand basis, for instance
870 * depending on the active state of display pipes. The state of display
871 * registers backed by affected power rails are saved/restored as needed.
873 * Based on the above enabling a deeper DC power state is asynchronous wrt.
874 * enabling it. Disabling a deeper power state is synchronous: for instance
875 * setting %DC_STATE_DISABLE won't complete until all HW resources are turned
876 * back on and register state is restored. This is guaranteed by the MMIO write
877 * to DC_STATE_EN blocking until the state is restored.
879 static void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
884 if (drm_WARN_ON_ONCE(&dev_priv->drm,
885 state & ~dev_priv->csr.allowed_dc_mask))
886 state &= dev_priv->csr.allowed_dc_mask;
888 val = intel_de_read(dev_priv, DC_STATE_EN);
889 mask = gen9_dc_mask(dev_priv);
890 drm_dbg_kms(&dev_priv->drm, "Setting DC state from %02x to %02x\n",
893 /* Check if DMC is ignoring our DC state requests */
894 if ((val & mask) != dev_priv->csr.dc_state)
895 drm_err(&dev_priv->drm, "DC state mismatch (0x%x -> 0x%x)\n",
896 dev_priv->csr.dc_state, val & mask);
901 gen9_write_dc_state(dev_priv, val);
903 dev_priv->csr.dc_state = val & mask;
907 sanitize_target_dc_state(struct drm_i915_private *dev_priv,
911 DC_STATE_EN_UPTO_DC6,
912 DC_STATE_EN_UPTO_DC5,
918 for (i = 0; i < ARRAY_SIZE(states) - 1; i++) {
919 if (target_dc_state != states[i])
922 if (dev_priv->csr.allowed_dc_mask & target_dc_state)
925 target_dc_state = states[i + 1];
928 return target_dc_state;
931 static void tgl_enable_dc3co(struct drm_i915_private *dev_priv)
933 drm_dbg_kms(&dev_priv->drm, "Enabling DC3CO\n");
934 gen9_set_dc_state(dev_priv, DC_STATE_EN_DC3CO);
937 static void tgl_disable_dc3co(struct drm_i915_private *dev_priv)
941 drm_dbg_kms(&dev_priv->drm, "Disabling DC3CO\n");
942 val = intel_de_read(dev_priv, DC_STATE_EN);
943 val &= ~DC_STATE_DC3CO_STATUS;
944 intel_de_write(dev_priv, DC_STATE_EN, val);
945 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
947 * Delay of 200us DC3CO Exit time B.Spec 49196
949 usleep_range(200, 210);
952 static void bxt_enable_dc9(struct drm_i915_private *dev_priv)
954 assert_can_enable_dc9(dev_priv);
956 drm_dbg_kms(&dev_priv->drm, "Enabling DC9\n");
958 * Power sequencer reset is not needed on
959 * platforms with South Display Engine on PCH,
960 * because PPS registers are always on.
962 if (!HAS_PCH_SPLIT(dev_priv))
963 intel_power_sequencer_reset(dev_priv);
964 gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
967 static void bxt_disable_dc9(struct drm_i915_private *dev_priv)
969 assert_can_disable_dc9(dev_priv);
971 drm_dbg_kms(&dev_priv->drm, "Disabling DC9\n");
973 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
975 intel_pps_unlock_regs_wa(dev_priv);
978 static void assert_csr_loaded(struct drm_i915_private *dev_priv)
980 drm_WARN_ONCE(&dev_priv->drm,
981 !intel_de_read(dev_priv, CSR_PROGRAM(0)),
982 "CSR program storage start is NULL\n");
983 drm_WARN_ONCE(&dev_priv->drm, !intel_de_read(dev_priv, CSR_SSP_BASE),
984 "CSR SSP Base Not fine\n");
985 drm_WARN_ONCE(&dev_priv->drm, !intel_de_read(dev_priv, CSR_HTP_SKL),
986 "CSR HTP Not fine\n");
989 static struct i915_power_well *
990 lookup_power_well(struct drm_i915_private *dev_priv,
991 enum i915_power_well_id power_well_id)
993 struct i915_power_well *power_well;
995 for_each_power_well(dev_priv, power_well)
996 if (power_well->desc->id == power_well_id)
1000 * It's not feasible to add error checking code to the callers since
1001 * this condition really shouldn't happen and it doesn't even make sense
1002 * to abort things like display initialization sequences. Just return
1003 * the first power well and hope the WARN gets reported so we can fix
1006 drm_WARN(&dev_priv->drm, 1,
1007 "Power well %d not defined for this platform\n",
1009 return &dev_priv->power_domains.power_wells[0];
1013 * intel_display_power_set_target_dc_state - Set target dc state.
1014 * @dev_priv: i915 device
1015 * @state: state which needs to be set as target_dc_state.
1017 * This function set the "DC off" power well target_dc_state,
1018 * based upon this target_dc_stste, "DC off" power well will
1019 * enable desired DC state.
1021 void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv,
1024 struct i915_power_well *power_well;
1025 bool dc_off_enabled;
1026 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1028 mutex_lock(&power_domains->lock);
1029 power_well = lookup_power_well(dev_priv, SKL_DISP_DC_OFF);
1031 if (drm_WARN_ON(&dev_priv->drm, !power_well))
1034 state = sanitize_target_dc_state(dev_priv, state);
1036 if (state == dev_priv->csr.target_dc_state)
1039 dc_off_enabled = power_well->desc->ops->is_enabled(dev_priv,
1042 * If DC off power well is disabled, need to enable and disable the
1043 * DC off power well to effect target DC state.
1045 if (!dc_off_enabled)
1046 power_well->desc->ops->enable(dev_priv, power_well);
1048 dev_priv->csr.target_dc_state = state;
1050 if (!dc_off_enabled)
1051 power_well->desc->ops->disable(dev_priv, power_well);
1054 mutex_unlock(&power_domains->lock);
1057 static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
1059 enum i915_power_well_id high_pg;
1061 /* Power wells at this level and above must be disabled for DC5 entry */
1062 if (INTEL_GEN(dev_priv) >= 12)
1063 high_pg = ICL_DISP_PW_3;
1065 high_pg = SKL_DISP_PW_2;
1067 drm_WARN_ONCE(&dev_priv->drm,
1068 intel_display_power_well_is_enabled(dev_priv, high_pg),
1069 "Power wells above platform's DC5 limit still enabled.\n");
1071 drm_WARN_ONCE(&dev_priv->drm,
1072 (intel_de_read(dev_priv, DC_STATE_EN) &
1073 DC_STATE_EN_UPTO_DC5),
1074 "DC5 already programmed to be enabled.\n");
1075 assert_rpm_wakelock_held(&dev_priv->runtime_pm);
1077 assert_csr_loaded(dev_priv);
1080 static void gen9_enable_dc5(struct drm_i915_private *dev_priv)
1082 assert_can_enable_dc5(dev_priv);
1084 drm_dbg_kms(&dev_priv->drm, "Enabling DC5\n");
1086 /* Wa Display #1183: skl,kbl,cfl */
1087 if (IS_GEN9_BC(dev_priv))
1088 intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1,
1089 intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT);
1091 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
1094 static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
1096 drm_WARN_ONCE(&dev_priv->drm,
1097 intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
1098 "Backlight is not disabled.\n");
1099 drm_WARN_ONCE(&dev_priv->drm,
1100 (intel_de_read(dev_priv, DC_STATE_EN) &
1101 DC_STATE_EN_UPTO_DC6),
1102 "DC6 already programmed to be enabled.\n");
1104 assert_csr_loaded(dev_priv);
1107 static void skl_enable_dc6(struct drm_i915_private *dev_priv)
1109 assert_can_enable_dc6(dev_priv);
1111 drm_dbg_kms(&dev_priv->drm, "Enabling DC6\n");
1113 /* Wa Display #1183: skl,kbl,cfl */
1114 if (IS_GEN9_BC(dev_priv))
1115 intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1,
1116 intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT);
1118 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
1121 static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
1122 struct i915_power_well *power_well)
1124 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
1125 int pw_idx = power_well->desc->hsw.idx;
1126 u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
1127 u32 bios_req = intel_de_read(dev_priv, regs->bios);
1129 /* Take over the request bit if set by BIOS. */
1130 if (bios_req & mask) {
1131 u32 drv_req = intel_de_read(dev_priv, regs->driver);
1133 if (!(drv_req & mask))
1134 intel_de_write(dev_priv, regs->driver, drv_req | mask);
1135 intel_de_write(dev_priv, regs->bios, bios_req & ~mask);
1139 static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1140 struct i915_power_well *power_well)
1142 bxt_ddi_phy_init(dev_priv, power_well->desc->bxt.phy);
1145 static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1146 struct i915_power_well *power_well)
1148 bxt_ddi_phy_uninit(dev_priv, power_well->desc->bxt.phy);
1151 static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
1152 struct i915_power_well *power_well)
1154 return bxt_ddi_phy_is_enabled(dev_priv, power_well->desc->bxt.phy);
1157 static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
1159 struct i915_power_well *power_well;
1161 power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A);
1162 if (power_well->count > 0)
1163 bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
1165 power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
1166 if (power_well->count > 0)
1167 bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
1169 if (IS_GEMINILAKE(dev_priv)) {
1170 power_well = lookup_power_well(dev_priv,
1171 GLK_DISP_PW_DPIO_CMN_C);
1172 if (power_well->count > 0)
1173 bxt_ddi_phy_verify_state(dev_priv,
1174 power_well->desc->bxt.phy);
1178 static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
1179 struct i915_power_well *power_well)
1181 return ((intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC3CO) == 0 &&
1182 (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0);
1185 static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
1187 u8 hw_enabled_dbuf_slices = intel_enabled_dbuf_slices_mask(dev_priv);
1188 u8 enabled_dbuf_slices = dev_priv->enabled_dbuf_slices_mask;
1190 drm_WARN(&dev_priv->drm,
1191 hw_enabled_dbuf_slices != enabled_dbuf_slices,
1192 "Unexpected DBuf power power state (0x%08x, expected 0x%08x)\n",
1193 hw_enabled_dbuf_slices,
1194 enabled_dbuf_slices);
1197 static void gen9_disable_dc_states(struct drm_i915_private *dev_priv)
1199 struct intel_cdclk_config cdclk_config = {};
1201 if (dev_priv->csr.target_dc_state == DC_STATE_EN_DC3CO) {
1202 tgl_disable_dc3co(dev_priv);
1206 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
1208 dev_priv->display.get_cdclk(dev_priv, &cdclk_config);
1209 /* Can't read out voltage_level so can't use intel_cdclk_changed() */
1210 drm_WARN_ON(&dev_priv->drm,
1211 intel_cdclk_needs_modeset(&dev_priv->cdclk.hw,
1214 gen9_assert_dbuf_enabled(dev_priv);
1216 if (IS_GEN9_LP(dev_priv))
1217 bxt_verify_ddi_phy_power_wells(dev_priv);
1219 if (INTEL_GEN(dev_priv) >= 11)
1221 * DMC retains HW context only for port A, the other combo
1222 * PHY's HW context for port B is lost after DC transitions,
1223 * so we need to restore it manually.
1225 intel_combo_phy_init(dev_priv);
1228 static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
1229 struct i915_power_well *power_well)
1231 gen9_disable_dc_states(dev_priv);
1234 static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
1235 struct i915_power_well *power_well)
1237 if (!dev_priv->csr.dmc_payload)
1240 switch (dev_priv->csr.target_dc_state) {
1241 case DC_STATE_EN_DC3CO:
1242 tgl_enable_dc3co(dev_priv);
1244 case DC_STATE_EN_UPTO_DC6:
1245 skl_enable_dc6(dev_priv);
1247 case DC_STATE_EN_UPTO_DC5:
1248 gen9_enable_dc5(dev_priv);
1253 static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv,
1254 struct i915_power_well *power_well)
1258 static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
1259 struct i915_power_well *power_well)
1263 static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
1264 struct i915_power_well *power_well)
1269 static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv,
1270 struct i915_power_well *power_well)
1272 if ((intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0)
1273 i830_enable_pipe(dev_priv, PIPE_A);
1274 if ((intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0)
1275 i830_enable_pipe(dev_priv, PIPE_B);
1278 static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv,
1279 struct i915_power_well *power_well)
1281 i830_disable_pipe(dev_priv, PIPE_B);
1282 i830_disable_pipe(dev_priv, PIPE_A);
1285 static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv,
1286 struct i915_power_well *power_well)
1288 return intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE &&
1289 intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
1292 static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv,
1293 struct i915_power_well *power_well)
1295 if (power_well->count > 0)
1296 i830_pipes_power_well_enable(dev_priv, power_well);
1298 i830_pipes_power_well_disable(dev_priv, power_well);
1301 static void vlv_set_power_well(struct drm_i915_private *dev_priv,
1302 struct i915_power_well *power_well, bool enable)
1304 int pw_idx = power_well->desc->vlv.idx;
1309 mask = PUNIT_PWRGT_MASK(pw_idx);
1310 state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) :
1311 PUNIT_PWRGT_PWR_GATE(pw_idx);
1313 vlv_punit_get(dev_priv);
1316 ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
1321 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
1324 vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
1326 if (wait_for(COND, 100))
1327 drm_err(&dev_priv->drm,
1328 "timeout setting power well state %08x (%08x)\n",
1330 vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
1335 vlv_punit_put(dev_priv);
1338 static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
1339 struct i915_power_well *power_well)
1341 vlv_set_power_well(dev_priv, power_well, true);
1344 static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
1345 struct i915_power_well *power_well)
1347 vlv_set_power_well(dev_priv, power_well, false);
1350 static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
1351 struct i915_power_well *power_well)
1353 int pw_idx = power_well->desc->vlv.idx;
1354 bool enabled = false;
1359 mask = PUNIT_PWRGT_MASK(pw_idx);
1360 ctrl = PUNIT_PWRGT_PWR_ON(pw_idx);
1362 vlv_punit_get(dev_priv);
1364 state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
1366 * We only ever set the power-on and power-gate states, anything
1367 * else is unexpected.
1369 drm_WARN_ON(&dev_priv->drm, state != PUNIT_PWRGT_PWR_ON(pw_idx) &&
1370 state != PUNIT_PWRGT_PWR_GATE(pw_idx));
1375 * A transient state at this point would mean some unexpected party
1376 * is poking at the power controls too.
1378 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
1379 drm_WARN_ON(&dev_priv->drm, ctrl != state);
1381 vlv_punit_put(dev_priv);
1386 static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
1391 * On driver load, a pipe may be active and driving a DSI display.
1392 * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
1393 * (and never recovering) in this case. intel_dsi_post_disable() will
1394 * clear it when we turn off the display.
1396 val = intel_de_read(dev_priv, DSPCLK_GATE_D);
1397 val &= DPOUNIT_CLOCK_GATE_DISABLE;
1398 val |= VRHUNIT_CLOCK_GATE_DISABLE;
1399 intel_de_write(dev_priv, DSPCLK_GATE_D, val);
1402 * Disable trickle feed and enable pnd deadline calculation
1404 intel_de_write(dev_priv, MI_ARB_VLV,
1405 MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
1406 intel_de_write(dev_priv, CBR1_VLV, 0);
1408 drm_WARN_ON(&dev_priv->drm, RUNTIME_INFO(dev_priv)->rawclk_freq == 0);
1409 intel_de_write(dev_priv, RAWCLK_FREQ_VLV,
1410 DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq,
1414 static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
1416 struct intel_encoder *encoder;
1420 * Enable the CRI clock source so we can get at the
1421 * display and the reference clock for VGA
1422 * hotplug / manual detection. Supposedly DSI also
1423 * needs the ref clock up and running.
1425 * CHV DPLL B/C have some issues if VGA mode is enabled.
1427 for_each_pipe(dev_priv, pipe) {
1428 u32 val = intel_de_read(dev_priv, DPLL(pipe));
1430 val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1432 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1434 intel_de_write(dev_priv, DPLL(pipe), val);
1437 vlv_init_display_clock_gating(dev_priv);
1439 spin_lock_irq(&dev_priv->irq_lock);
1440 valleyview_enable_display_irqs(dev_priv);
1441 spin_unlock_irq(&dev_priv->irq_lock);
1444 * During driver initialization/resume we can avoid restoring the
1445 * part of the HW/SW state that will be inited anyway explicitly.
1447 if (dev_priv->power_domains.initializing)
1450 intel_hpd_init(dev_priv);
1452 /* Re-enable the ADPA, if we have one */
1453 for_each_intel_encoder(&dev_priv->drm, encoder) {
1454 if (encoder->type == INTEL_OUTPUT_ANALOG)
1455 intel_crt_reset(&encoder->base);
1458 intel_vga_redisable_power_on(dev_priv);
1460 intel_pps_unlock_regs_wa(dev_priv);
1463 static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
1465 spin_lock_irq(&dev_priv->irq_lock);
1466 valleyview_disable_display_irqs(dev_priv);
1467 spin_unlock_irq(&dev_priv->irq_lock);
1469 /* make sure we're done processing display irqs */
1470 intel_synchronize_irq(dev_priv);
1472 intel_power_sequencer_reset(dev_priv);
1474 /* Prevent us from re-enabling polling on accident in late suspend */
1475 if (!dev_priv->drm.dev->power.is_suspended)
1476 intel_hpd_poll_init(dev_priv);
1479 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
1480 struct i915_power_well *power_well)
1482 vlv_set_power_well(dev_priv, power_well, true);
1484 vlv_display_power_well_init(dev_priv);
1487 static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
1488 struct i915_power_well *power_well)
1490 vlv_display_power_well_deinit(dev_priv);
1492 vlv_set_power_well(dev_priv, power_well, false);
1495 static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1496 struct i915_power_well *power_well)
1498 /* since ref/cri clock was enabled */
1499 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1501 vlv_set_power_well(dev_priv, power_well, true);
1504 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
1505 * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
1506 * a. GUnit 0x2110 bit[0] set to 1 (def 0)
1507 * b. The other bits such as sfr settings / modesel may all
1510 * This should only be done on init and resume from S3 with
1511 * both PLLs disabled, or we risk losing DPIO and PLL
1514 intel_de_write(dev_priv, DPIO_CTL,
1515 intel_de_read(dev_priv, DPIO_CTL) | DPIO_CMNRST);
1518 static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1519 struct i915_power_well *power_well)
1523 for_each_pipe(dev_priv, pipe)
1524 assert_pll_disabled(dev_priv, pipe);
1526 /* Assert common reset */
1527 intel_de_write(dev_priv, DPIO_CTL,
1528 intel_de_read(dev_priv, DPIO_CTL) & ~DPIO_CMNRST);
1530 vlv_set_power_well(dev_priv, power_well, false);
1533 #define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0))
1535 #define BITS_SET(val, bits) (((val) & (bits)) == (bits))
1537 static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
1539 struct i915_power_well *cmn_bc =
1540 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
1541 struct i915_power_well *cmn_d =
1542 lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
1543 u32 phy_control = dev_priv->chv_phy_control;
1545 u32 phy_status_mask = 0xffffffff;
1548 * The BIOS can leave the PHY is some weird state
1549 * where it doesn't fully power down some parts.
1550 * Disable the asserts until the PHY has been fully
1551 * reset (ie. the power well has been disabled at
1554 if (!dev_priv->chv_phy_assert[DPIO_PHY0])
1555 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
1556 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
1557 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
1558 PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
1559 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
1560 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
1562 if (!dev_priv->chv_phy_assert[DPIO_PHY1])
1563 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
1564 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
1565 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
1567 if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
1568 phy_status |= PHY_POWERGOOD(DPIO_PHY0);
1570 /* this assumes override is only used to enable lanes */
1571 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
1572 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
1574 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
1575 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
1577 /* CL1 is on whenever anything is on in either channel */
1578 if (BITS_SET(phy_control,
1579 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
1580 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
1581 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
1584 * The DPLLB check accounts for the pipe B + port A usage
1585 * with CL2 powered up but all the lanes in the second channel
1588 if (BITS_SET(phy_control,
1589 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
1590 (intel_de_read(dev_priv, DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
1591 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
1593 if (BITS_SET(phy_control,
1594 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
1595 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
1596 if (BITS_SET(phy_control,
1597 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
1598 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
1600 if (BITS_SET(phy_control,
1601 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
1602 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
1603 if (BITS_SET(phy_control,
1604 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
1605 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
1608 if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
1609 phy_status |= PHY_POWERGOOD(DPIO_PHY1);
1611 /* this assumes override is only used to enable lanes */
1612 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
1613 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
1615 if (BITS_SET(phy_control,
1616 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
1617 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
1619 if (BITS_SET(phy_control,
1620 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
1621 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
1622 if (BITS_SET(phy_control,
1623 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
1624 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
1627 phy_status &= phy_status_mask;
1630 * The PHY may be busy with some initial calibration and whatnot,
1631 * so the power state can take a while to actually change.
1633 if (intel_de_wait_for_register(dev_priv, DISPLAY_PHY_STATUS,
1634 phy_status_mask, phy_status, 10))
1635 drm_err(&dev_priv->drm,
1636 "Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
1637 intel_de_read(dev_priv, DISPLAY_PHY_STATUS) & phy_status_mask,
1638 phy_status, dev_priv->chv_phy_control);
1643 static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1644 struct i915_power_well *power_well)
1650 drm_WARN_ON_ONCE(&dev_priv->drm,
1651 power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
1652 power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
1654 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1662 /* since ref/cri clock was enabled */
1663 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1664 vlv_set_power_well(dev_priv, power_well, true);
1666 /* Poll for phypwrgood signal */
1667 if (intel_de_wait_for_set(dev_priv, DISPLAY_PHY_STATUS,
1668 PHY_POWERGOOD(phy), 1))
1669 drm_err(&dev_priv->drm, "Display PHY %d is not power up\n",
1672 vlv_dpio_get(dev_priv);
1674 /* Enable dynamic power down */
1675 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
1676 tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
1677 DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
1678 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
1680 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1681 tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
1682 tmp |= DPIO_DYNPWRDOWNEN_CH1;
1683 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
1686 * Force the non-existing CL2 off. BXT does this
1687 * too, so maybe it saves some power even though
1688 * CL2 doesn't exist?
1690 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
1691 tmp |= DPIO_CL2_LDOFUSE_PWRENB;
1692 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
1695 vlv_dpio_put(dev_priv);
1697 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
1698 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1699 dev_priv->chv_phy_control);
1701 drm_dbg_kms(&dev_priv->drm,
1702 "Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1703 phy, dev_priv->chv_phy_control);
1705 assert_chv_phy_status(dev_priv);
1708 static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1709 struct i915_power_well *power_well)
1713 drm_WARN_ON_ONCE(&dev_priv->drm,
1714 power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
1715 power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
1717 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1719 assert_pll_disabled(dev_priv, PIPE_A);
1720 assert_pll_disabled(dev_priv, PIPE_B);
1723 assert_pll_disabled(dev_priv, PIPE_C);
1726 dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
1727 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1728 dev_priv->chv_phy_control);
1730 vlv_set_power_well(dev_priv, power_well, false);
1732 drm_dbg_kms(&dev_priv->drm,
1733 "Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1734 phy, dev_priv->chv_phy_control);
1736 /* PHY is fully reset now, so we can enable the PHY state asserts */
1737 dev_priv->chv_phy_assert[phy] = true;
1739 assert_chv_phy_status(dev_priv);
1742 static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1743 enum dpio_channel ch, bool override, unsigned int mask)
1745 enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
1746 u32 reg, val, expected, actual;
1749 * The BIOS can leave the PHY is some weird state
1750 * where it doesn't fully power down some parts.
1751 * Disable the asserts until the PHY has been fully
1752 * reset (ie. the power well has been disabled at
1755 if (!dev_priv->chv_phy_assert[phy])
1759 reg = _CHV_CMN_DW0_CH0;
1761 reg = _CHV_CMN_DW6_CH1;
1763 vlv_dpio_get(dev_priv);
1764 val = vlv_dpio_read(dev_priv, pipe, reg);
1765 vlv_dpio_put(dev_priv);
1768 * This assumes !override is only used when the port is disabled.
1769 * All lanes should power down even without the override when
1770 * the port is disabled.
1772 if (!override || mask == 0xf) {
1773 expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1775 * If CH1 common lane is not active anymore
1776 * (eg. for pipe B DPLL) the entire channel will
1777 * shut down, which causes the common lane registers
1778 * to read as 0. That means we can't actually check
1779 * the lane power down status bits, but as the entire
1780 * register reads as 0 it's a good indication that the
1781 * channel is indeed entirely powered down.
1783 if (ch == DPIO_CH1 && val == 0)
1785 } else if (mask != 0x0) {
1786 expected = DPIO_ANYDL_POWERDOWN;
1792 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0;
1794 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
1795 actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1797 drm_WARN(&dev_priv->drm, actual != expected,
1798 "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
1799 !!(actual & DPIO_ALLDL_POWERDOWN),
1800 !!(actual & DPIO_ANYDL_POWERDOWN),
1801 !!(expected & DPIO_ALLDL_POWERDOWN),
1802 !!(expected & DPIO_ANYDL_POWERDOWN),
1806 bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1807 enum dpio_channel ch, bool override)
1809 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1812 mutex_lock(&power_domains->lock);
1814 was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1816 if (override == was_override)
1820 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1822 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1824 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1825 dev_priv->chv_phy_control);
1827 drm_dbg_kms(&dev_priv->drm,
1828 "Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
1829 phy, ch, dev_priv->chv_phy_control);
1831 assert_chv_phy_status(dev_priv);
1834 mutex_unlock(&power_domains->lock);
1836 return was_override;
1839 void chv_phy_powergate_lanes(struct intel_encoder *encoder,
1840 bool override, unsigned int mask)
1842 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1843 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1844 enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(encoder));
1845 enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(encoder));
1847 mutex_lock(&power_domains->lock);
1849 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
1850 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
1853 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1855 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1857 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1858 dev_priv->chv_phy_control);
1860 drm_dbg_kms(&dev_priv->drm,
1861 "Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
1862 phy, ch, mask, dev_priv->chv_phy_control);
1864 assert_chv_phy_status(dev_priv);
1866 assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
1868 mutex_unlock(&power_domains->lock);
1871 static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
1872 struct i915_power_well *power_well)
1874 enum pipe pipe = PIPE_A;
1878 vlv_punit_get(dev_priv);
1880 state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe);
1882 * We only ever set the power-on and power-gate states, anything
1883 * else is unexpected.
1885 drm_WARN_ON(&dev_priv->drm, state != DP_SSS_PWR_ON(pipe) &&
1886 state != DP_SSS_PWR_GATE(pipe));
1887 enabled = state == DP_SSS_PWR_ON(pipe);
1890 * A transient state at this point would mean some unexpected party
1891 * is poking at the power controls too.
1893 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe);
1894 drm_WARN_ON(&dev_priv->drm, ctrl << 16 != state);
1896 vlv_punit_put(dev_priv);
1901 static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
1902 struct i915_power_well *power_well,
1905 enum pipe pipe = PIPE_A;
1909 state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
1911 vlv_punit_get(dev_priv);
1914 ((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state)
1919 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
1920 ctrl &= ~DP_SSC_MASK(pipe);
1921 ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
1922 vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl);
1924 if (wait_for(COND, 100))
1925 drm_err(&dev_priv->drm,
1926 "timeout setting power well state %08x (%08x)\n",
1928 vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM));
1933 vlv_punit_put(dev_priv);
1936 static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
1937 struct i915_power_well *power_well)
1939 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
1940 dev_priv->chv_phy_control);
1943 static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
1944 struct i915_power_well *power_well)
1946 chv_set_pipe_power_well(dev_priv, power_well, true);
1948 vlv_display_power_well_init(dev_priv);
1951 static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
1952 struct i915_power_well *power_well)
1954 vlv_display_power_well_deinit(dev_priv);
1956 chv_set_pipe_power_well(dev_priv, power_well, false);
1959 static u64 __async_put_domains_mask(struct i915_power_domains *power_domains)
1961 return power_domains->async_put_domains[0] |
1962 power_domains->async_put_domains[1];
1965 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
1968 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
1970 return !WARN_ON(power_domains->async_put_domains[0] &
1971 power_domains->async_put_domains[1]);
1975 __async_put_domains_state_ok(struct i915_power_domains *power_domains)
1977 enum intel_display_power_domain domain;
1980 err |= !assert_async_put_domain_masks_disjoint(power_domains);
1981 err |= WARN_ON(!!power_domains->async_put_wakeref !=
1982 !!__async_put_domains_mask(power_domains));
1984 for_each_power_domain(domain, __async_put_domains_mask(power_domains))
1985 err |= WARN_ON(power_domains->domain_use_count[domain] != 1);
1990 static void print_power_domains(struct i915_power_domains *power_domains,
1991 const char *prefix, u64 mask)
1993 struct drm_i915_private *i915 = container_of(power_domains,
1994 struct drm_i915_private,
1996 enum intel_display_power_domain domain;
1998 drm_dbg(&i915->drm, "%s (%lu):\n", prefix, hweight64(mask));
1999 for_each_power_domain(domain, mask)
2000 drm_dbg(&i915->drm, "%s use_count %d\n",
2001 intel_display_power_domain_str(domain),
2002 power_domains->domain_use_count[domain]);
2006 print_async_put_domains_state(struct i915_power_domains *power_domains)
2008 struct drm_i915_private *i915 = container_of(power_domains,
2009 struct drm_i915_private,
2012 drm_dbg(&i915->drm, "async_put_wakeref %u\n",
2013 power_domains->async_put_wakeref);
2015 print_power_domains(power_domains, "async_put_domains[0]",
2016 power_domains->async_put_domains[0]);
2017 print_power_domains(power_domains, "async_put_domains[1]",
2018 power_domains->async_put_domains[1]);
2022 verify_async_put_domains_state(struct i915_power_domains *power_domains)
2024 if (!__async_put_domains_state_ok(power_domains))
2025 print_async_put_domains_state(power_domains);
2031 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
2036 verify_async_put_domains_state(struct i915_power_domains *power_domains)
2040 #endif /* CONFIG_DRM_I915_DEBUG_RUNTIME_PM */
2042 static u64 async_put_domains_mask(struct i915_power_domains *power_domains)
2044 assert_async_put_domain_masks_disjoint(power_domains);
2046 return __async_put_domains_mask(power_domains);
2050 async_put_domains_clear_domain(struct i915_power_domains *power_domains,
2051 enum intel_display_power_domain domain)
2053 assert_async_put_domain_masks_disjoint(power_domains);
2055 power_domains->async_put_domains[0] &= ~BIT_ULL(domain);
2056 power_domains->async_put_domains[1] &= ~BIT_ULL(domain);
2060 intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv,
2061 enum intel_display_power_domain domain)
2063 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2066 if (!(async_put_domains_mask(power_domains) & BIT_ULL(domain)))
2069 async_put_domains_clear_domain(power_domains, domain);
2073 if (async_put_domains_mask(power_domains))
2076 cancel_delayed_work(&power_domains->async_put_work);
2077 intel_runtime_pm_put_raw(&dev_priv->runtime_pm,
2078 fetch_and_zero(&power_domains->async_put_wakeref));
2080 verify_async_put_domains_state(power_domains);
2086 __intel_display_power_get_domain(struct drm_i915_private *dev_priv,
2087 enum intel_display_power_domain domain)
2089 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2090 struct i915_power_well *power_well;
2092 if (intel_display_power_grab_async_put_ref(dev_priv, domain))
2095 for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain))
2096 intel_power_well_get(dev_priv, power_well);
2098 power_domains->domain_use_count[domain]++;
2102 * intel_display_power_get - grab a power domain reference
2103 * @dev_priv: i915 device instance
2104 * @domain: power domain to reference
2106 * This function grabs a power domain reference for @domain and ensures that the
2107 * power domain and all its parents are powered up. Therefore users should only
2108 * grab a reference to the innermost power domain they need.
2110 * Any power domain reference obtained by this function must have a symmetric
2111 * call to intel_display_power_put() to release the reference again.
2113 intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
2114 enum intel_display_power_domain domain)
2116 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2117 intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2119 mutex_lock(&power_domains->lock);
2120 __intel_display_power_get_domain(dev_priv, domain);
2121 mutex_unlock(&power_domains->lock);
2127 * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
2128 * @dev_priv: i915 device instance
2129 * @domain: power domain to reference
2131 * This function grabs a power domain reference for @domain and ensures that the
2132 * power domain and all its parents are powered up. Therefore users should only
2133 * grab a reference to the innermost power domain they need.
2135 * Any power domain reference obtained by this function must have a symmetric
2136 * call to intel_display_power_put() to release the reference again.
2139 intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
2140 enum intel_display_power_domain domain)
2142 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2143 intel_wakeref_t wakeref;
2146 wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm);
2150 mutex_lock(&power_domains->lock);
2152 if (__intel_display_power_is_enabled(dev_priv, domain)) {
2153 __intel_display_power_get_domain(dev_priv, domain);
2159 mutex_unlock(&power_domains->lock);
2162 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2170 __intel_display_power_put_domain(struct drm_i915_private *dev_priv,
2171 enum intel_display_power_domain domain)
2173 struct i915_power_domains *power_domains;
2174 struct i915_power_well *power_well;
2175 const char *name = intel_display_power_domain_str(domain);
2177 power_domains = &dev_priv->power_domains;
2179 drm_WARN(&dev_priv->drm, !power_domains->domain_use_count[domain],
2180 "Use count on domain %s is already zero\n",
2182 drm_WARN(&dev_priv->drm,
2183 async_put_domains_mask(power_domains) & BIT_ULL(domain),
2184 "Async disabling of domain %s is pending\n",
2187 power_domains->domain_use_count[domain]--;
2189 for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain))
2190 intel_power_well_put(dev_priv, power_well);
2193 static void __intel_display_power_put(struct drm_i915_private *dev_priv,
2194 enum intel_display_power_domain domain)
2196 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2198 mutex_lock(&power_domains->lock);
2199 __intel_display_power_put_domain(dev_priv, domain);
2200 mutex_unlock(&power_domains->lock);
2204 * intel_display_power_put_unchecked - release an unchecked power domain reference
2205 * @dev_priv: i915 device instance
2206 * @domain: power domain to reference
2208 * This function drops the power domain reference obtained by
2209 * intel_display_power_get() and might power down the corresponding hardware
2210 * block right away if this is the last reference.
2212 * This function exists only for historical reasons and should be avoided in
2213 * new code, as the correctness of its use cannot be checked. Always use
2214 * intel_display_power_put() instead.
2216 void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
2217 enum intel_display_power_domain domain)
2219 __intel_display_power_put(dev_priv, domain);
2220 intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
2224 queue_async_put_domains_work(struct i915_power_domains *power_domains,
2225 intel_wakeref_t wakeref)
2227 WARN_ON(power_domains->async_put_wakeref);
2228 power_domains->async_put_wakeref = wakeref;
2229 WARN_ON(!queue_delayed_work(system_unbound_wq,
2230 &power_domains->async_put_work,
2231 msecs_to_jiffies(100)));
2235 release_async_put_domains(struct i915_power_domains *power_domains, u64 mask)
2237 struct drm_i915_private *dev_priv =
2238 container_of(power_domains, struct drm_i915_private,
2240 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
2241 enum intel_display_power_domain domain;
2242 intel_wakeref_t wakeref;
2245 * The caller must hold already raw wakeref, upgrade that to a proper
2246 * wakeref to make the state checker happy about the HW access during
2247 * power well disabling.
2249 assert_rpm_raw_wakeref_held(rpm);
2250 wakeref = intel_runtime_pm_get(rpm);
2252 for_each_power_domain(domain, mask) {
2253 /* Clear before put, so put's sanity check is happy. */
2254 async_put_domains_clear_domain(power_domains, domain);
2255 __intel_display_power_put_domain(dev_priv, domain);
2258 intel_runtime_pm_put(rpm, wakeref);
2262 intel_display_power_put_async_work(struct work_struct *work)
2264 struct drm_i915_private *dev_priv =
2265 container_of(work, struct drm_i915_private,
2266 power_domains.async_put_work.work);
2267 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2268 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
2269 intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm);
2270 intel_wakeref_t old_work_wakeref = 0;
2272 mutex_lock(&power_domains->lock);
2275 * Bail out if all the domain refs pending to be released were grabbed
2276 * by subsequent gets or a flush_work.
2278 old_work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
2279 if (!old_work_wakeref)
2282 release_async_put_domains(power_domains,
2283 power_domains->async_put_domains[0]);
2285 /* Requeue the work if more domains were async put meanwhile. */
2286 if (power_domains->async_put_domains[1]) {
2287 power_domains->async_put_domains[0] =
2288 fetch_and_zero(&power_domains->async_put_domains[1]);
2289 queue_async_put_domains_work(power_domains,
2290 fetch_and_zero(&new_work_wakeref));
2294 verify_async_put_domains_state(power_domains);
2296 mutex_unlock(&power_domains->lock);
2298 if (old_work_wakeref)
2299 intel_runtime_pm_put_raw(rpm, old_work_wakeref);
2300 if (new_work_wakeref)
2301 intel_runtime_pm_put_raw(rpm, new_work_wakeref);
2305 * intel_display_power_put_async - release a power domain reference asynchronously
2306 * @i915: i915 device instance
2307 * @domain: power domain to reference
2308 * @wakeref: wakeref acquired for the reference that is being released
2310 * This function drops the power domain reference obtained by
2311 * intel_display_power_get*() and schedules a work to power down the
2312 * corresponding hardware block if this is the last reference.
2314 void __intel_display_power_put_async(struct drm_i915_private *i915,
2315 enum intel_display_power_domain domain,
2316 intel_wakeref_t wakeref)
2318 struct i915_power_domains *power_domains = &i915->power_domains;
2319 struct intel_runtime_pm *rpm = &i915->runtime_pm;
2320 intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm);
2322 mutex_lock(&power_domains->lock);
2324 if (power_domains->domain_use_count[domain] > 1) {
2325 __intel_display_power_put_domain(i915, domain);
2330 drm_WARN_ON(&i915->drm, power_domains->domain_use_count[domain] != 1);
2332 /* Let a pending work requeue itself or queue a new one. */
2333 if (power_domains->async_put_wakeref) {
2334 power_domains->async_put_domains[1] |= BIT_ULL(domain);
2336 power_domains->async_put_domains[0] |= BIT_ULL(domain);
2337 queue_async_put_domains_work(power_domains,
2338 fetch_and_zero(&work_wakeref));
2342 verify_async_put_domains_state(power_domains);
2344 mutex_unlock(&power_domains->lock);
2347 intel_runtime_pm_put_raw(rpm, work_wakeref);
2349 intel_runtime_pm_put(rpm, wakeref);
2353 * intel_display_power_flush_work - flushes the async display power disabling work
2354 * @i915: i915 device instance
2356 * Flushes any pending work that was scheduled by a preceding
2357 * intel_display_power_put_async() call, completing the disabling of the
2358 * corresponding power domains.
2360 * Note that the work handler function may still be running after this
2361 * function returns; to ensure that the work handler isn't running use
2362 * intel_display_power_flush_work_sync() instead.
2364 void intel_display_power_flush_work(struct drm_i915_private *i915)
2366 struct i915_power_domains *power_domains = &i915->power_domains;
2367 intel_wakeref_t work_wakeref;
2369 mutex_lock(&power_domains->lock);
2371 work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
2375 release_async_put_domains(power_domains,
2376 async_put_domains_mask(power_domains));
2377 cancel_delayed_work(&power_domains->async_put_work);
2380 verify_async_put_domains_state(power_domains);
2382 mutex_unlock(&power_domains->lock);
2385 intel_runtime_pm_put_raw(&i915->runtime_pm, work_wakeref);
2389 * intel_display_power_flush_work_sync - flushes and syncs the async display power disabling work
2390 * @i915: i915 device instance
2392 * Like intel_display_power_flush_work(), but also ensure that the work
2393 * handler function is not running any more when this function returns.
2396 intel_display_power_flush_work_sync(struct drm_i915_private *i915)
2398 struct i915_power_domains *power_domains = &i915->power_domains;
2400 intel_display_power_flush_work(i915);
2401 cancel_delayed_work_sync(&power_domains->async_put_work);
2403 verify_async_put_domains_state(power_domains);
2405 drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref);
2408 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
2410 * intel_display_power_put - release a power domain reference
2411 * @dev_priv: i915 device instance
2412 * @domain: power domain to reference
2413 * @wakeref: wakeref acquired for the reference that is being released
2415 * This function drops the power domain reference obtained by
2416 * intel_display_power_get() and might power down the corresponding hardware
2417 * block right away if this is the last reference.
2419 void intel_display_power_put(struct drm_i915_private *dev_priv,
2420 enum intel_display_power_domain domain,
2421 intel_wakeref_t wakeref)
2423 __intel_display_power_put(dev_priv, domain);
2424 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2428 #define I830_PIPES_POWER_DOMAINS ( \
2429 BIT_ULL(POWER_DOMAIN_PIPE_A) | \
2430 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2431 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
2432 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2433 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2434 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2435 BIT_ULL(POWER_DOMAIN_INIT))
2437 #define VLV_DISPLAY_POWER_DOMAINS ( \
2438 BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) | \
2439 BIT_ULL(POWER_DOMAIN_PIPE_A) | \
2440 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2441 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
2442 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2443 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2444 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2445 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2446 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2447 BIT_ULL(POWER_DOMAIN_PORT_DSI) | \
2448 BIT_ULL(POWER_DOMAIN_PORT_CRT) | \
2449 BIT_ULL(POWER_DOMAIN_VGA) | \
2450 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2451 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2452 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2453 BIT_ULL(POWER_DOMAIN_GMBUS) | \
2454 BIT_ULL(POWER_DOMAIN_INIT))
2456 #define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \
2457 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2458 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2459 BIT_ULL(POWER_DOMAIN_PORT_CRT) | \
2460 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2461 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2462 BIT_ULL(POWER_DOMAIN_INIT))
2464 #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
2465 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2466 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2467 BIT_ULL(POWER_DOMAIN_INIT))
2469 #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
2470 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2471 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2472 BIT_ULL(POWER_DOMAIN_INIT))
2474 #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
2475 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2476 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2477 BIT_ULL(POWER_DOMAIN_INIT))
2479 #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
2480 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2481 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2482 BIT_ULL(POWER_DOMAIN_INIT))
2484 #define CHV_DISPLAY_POWER_DOMAINS ( \
2485 BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) | \
2486 BIT_ULL(POWER_DOMAIN_PIPE_A) | \
2487 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2488 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2489 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
2490 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2491 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2492 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2493 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2494 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2495 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2496 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2497 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2498 BIT_ULL(POWER_DOMAIN_PORT_DSI) | \
2499 BIT_ULL(POWER_DOMAIN_VGA) | \
2500 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2501 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2502 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2503 BIT_ULL(POWER_DOMAIN_AUX_D) | \
2504 BIT_ULL(POWER_DOMAIN_GMBUS) | \
2505 BIT_ULL(POWER_DOMAIN_INIT))
2507 #define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \
2508 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2509 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2510 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2511 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2512 BIT_ULL(POWER_DOMAIN_INIT))
2514 #define CHV_DPIO_CMN_D_POWER_DOMAINS ( \
2515 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2516 BIT_ULL(POWER_DOMAIN_AUX_D) | \
2517 BIT_ULL(POWER_DOMAIN_INIT))
2519 #define HSW_DISPLAY_POWER_DOMAINS ( \
2520 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2521 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2522 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
2523 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2524 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2525 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2526 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2527 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2528 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2529 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2530 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2531 BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \
2532 BIT_ULL(POWER_DOMAIN_VGA) | \
2533 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2534 BIT_ULL(POWER_DOMAIN_INIT))
2536 #define BDW_DISPLAY_POWER_DOMAINS ( \
2537 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2538 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2539 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2540 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2541 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2542 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2543 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2544 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2545 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2546 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2547 BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \
2548 BIT_ULL(POWER_DOMAIN_VGA) | \
2549 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2550 BIT_ULL(POWER_DOMAIN_INIT))
2552 #define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
2553 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2554 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2555 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2556 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2557 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2558 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2559 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2560 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2561 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2562 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2563 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \
2564 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2565 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2566 BIT_ULL(POWER_DOMAIN_AUX_D) | \
2567 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2568 BIT_ULL(POWER_DOMAIN_VGA) | \
2569 BIT_ULL(POWER_DOMAIN_INIT))
2570 #define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS ( \
2571 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \
2572 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) | \
2573 BIT_ULL(POWER_DOMAIN_INIT))
2574 #define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \
2575 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \
2576 BIT_ULL(POWER_DOMAIN_INIT))
2577 #define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \
2578 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \
2579 BIT_ULL(POWER_DOMAIN_INIT))
2580 #define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS ( \
2581 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \
2582 BIT_ULL(POWER_DOMAIN_INIT))
2583 #define SKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
2584 SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
2585 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
2586 BIT_ULL(POWER_DOMAIN_MODESET) | \
2587 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2588 BIT_ULL(POWER_DOMAIN_INIT))
2590 #define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
2591 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2592 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2593 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2594 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2595 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2596 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2597 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2598 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2599 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2600 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2601 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2602 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2603 BIT_ULL(POWER_DOMAIN_VGA) | \
2604 BIT_ULL(POWER_DOMAIN_INIT))
2605 #define BXT_DISPLAY_DC_OFF_POWER_DOMAINS ( \
2606 BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
2607 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
2608 BIT_ULL(POWER_DOMAIN_MODESET) | \
2609 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2610 BIT_ULL(POWER_DOMAIN_GMBUS) | \
2611 BIT_ULL(POWER_DOMAIN_INIT))
2612 #define BXT_DPIO_CMN_A_POWER_DOMAINS ( \
2613 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \
2614 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2615 BIT_ULL(POWER_DOMAIN_INIT))
2616 #define BXT_DPIO_CMN_BC_POWER_DOMAINS ( \
2617 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2618 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2619 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2620 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2621 BIT_ULL(POWER_DOMAIN_INIT))
2623 #define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
2624 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2625 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2626 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2627 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2628 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2629 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2630 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2631 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2632 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2633 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2634 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2635 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2636 BIT_ULL(POWER_DOMAIN_VGA) | \
2637 BIT_ULL(POWER_DOMAIN_INIT))
2638 #define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS ( \
2639 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
2640 #define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \
2641 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
2642 #define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \
2643 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
2644 #define GLK_DPIO_CMN_A_POWER_DOMAINS ( \
2645 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \
2646 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2647 BIT_ULL(POWER_DOMAIN_INIT))
2648 #define GLK_DPIO_CMN_B_POWER_DOMAINS ( \
2649 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2650 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2651 BIT_ULL(POWER_DOMAIN_INIT))
2652 #define GLK_DPIO_CMN_C_POWER_DOMAINS ( \
2653 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2654 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2655 BIT_ULL(POWER_DOMAIN_INIT))
2656 #define GLK_DISPLAY_AUX_A_POWER_DOMAINS ( \
2657 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2658 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \
2659 BIT_ULL(POWER_DOMAIN_INIT))
2660 #define GLK_DISPLAY_AUX_B_POWER_DOMAINS ( \
2661 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2662 BIT_ULL(POWER_DOMAIN_INIT))
2663 #define GLK_DISPLAY_AUX_C_POWER_DOMAINS ( \
2664 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2665 BIT_ULL(POWER_DOMAIN_INIT))
2666 #define GLK_DISPLAY_DC_OFF_POWER_DOMAINS ( \
2667 GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
2668 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
2669 BIT_ULL(POWER_DOMAIN_MODESET) | \
2670 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2671 BIT_ULL(POWER_DOMAIN_GMBUS) | \
2672 BIT_ULL(POWER_DOMAIN_INIT))
2674 #define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
2675 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2676 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2677 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2678 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2679 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2680 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2681 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2682 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2683 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2684 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2685 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \
2686 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2687 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2688 BIT_ULL(POWER_DOMAIN_AUX_D) | \
2689 BIT_ULL(POWER_DOMAIN_AUX_F) | \
2690 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2691 BIT_ULL(POWER_DOMAIN_VGA) | \
2692 BIT_ULL(POWER_DOMAIN_INIT))
2693 #define CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS ( \
2694 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \
2695 BIT_ULL(POWER_DOMAIN_INIT))
2696 #define CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS ( \
2697 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \
2698 BIT_ULL(POWER_DOMAIN_INIT))
2699 #define CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS ( \
2700 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \
2701 BIT_ULL(POWER_DOMAIN_INIT))
2702 #define CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS ( \
2703 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \
2704 BIT_ULL(POWER_DOMAIN_INIT))
2705 #define CNL_DISPLAY_AUX_A_POWER_DOMAINS ( \
2706 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2707 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \
2708 BIT_ULL(POWER_DOMAIN_INIT))
2709 #define CNL_DISPLAY_AUX_B_POWER_DOMAINS ( \
2710 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2711 BIT_ULL(POWER_DOMAIN_INIT))
2712 #define CNL_DISPLAY_AUX_C_POWER_DOMAINS ( \
2713 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2714 BIT_ULL(POWER_DOMAIN_INIT))
2715 #define CNL_DISPLAY_AUX_D_POWER_DOMAINS ( \
2716 BIT_ULL(POWER_DOMAIN_AUX_D) | \
2717 BIT_ULL(POWER_DOMAIN_INIT))
2718 #define CNL_DISPLAY_AUX_F_POWER_DOMAINS ( \
2719 BIT_ULL(POWER_DOMAIN_AUX_F) | \
2720 BIT_ULL(POWER_DOMAIN_INIT))
2721 #define CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS ( \
2722 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) | \
2723 BIT_ULL(POWER_DOMAIN_INIT))
2724 #define CNL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
2725 CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
2726 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
2727 BIT_ULL(POWER_DOMAIN_MODESET) | \
2728 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2729 BIT_ULL(POWER_DOMAIN_INIT))
2732 * ICL PW_0/PG_0 domains (HW/DMC control):
2734 * - clocks except port PLL
2735 * - central power except FBC
2736 * - shared functions except pipe interrupts, pipe MBUS, DBUF registers
2737 * ICL PW_1/PG_1 domains (HW/DMC control):
2739 * - PIPE_A and its planes, except VGA
2740 * - transcoder EDP + PSR
2745 #define ICL_PW_4_POWER_DOMAINS ( \
2746 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2747 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2748 BIT_ULL(POWER_DOMAIN_INIT))
2750 #define ICL_PW_3_POWER_DOMAINS ( \
2751 ICL_PW_4_POWER_DOMAINS | \
2752 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2753 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2754 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2755 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2756 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2757 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2758 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2759 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2760 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \
2761 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \
2762 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2763 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2764 BIT_ULL(POWER_DOMAIN_AUX_D) | \
2765 BIT_ULL(POWER_DOMAIN_AUX_E) | \
2766 BIT_ULL(POWER_DOMAIN_AUX_F) | \
2767 BIT_ULL(POWER_DOMAIN_AUX_C_TBT) | \
2768 BIT_ULL(POWER_DOMAIN_AUX_D_TBT) | \
2769 BIT_ULL(POWER_DOMAIN_AUX_E_TBT) | \
2770 BIT_ULL(POWER_DOMAIN_AUX_F_TBT) | \
2771 BIT_ULL(POWER_DOMAIN_VGA) | \
2772 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2773 BIT_ULL(POWER_DOMAIN_INIT))
2776 * - KVMR (HW control)
2778 #define ICL_PW_2_POWER_DOMAINS ( \
2779 ICL_PW_3_POWER_DOMAINS | \
2780 BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) | \
2781 BIT_ULL(POWER_DOMAIN_INIT))
2783 * - KVMR (HW control)
2785 #define ICL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
2786 ICL_PW_2_POWER_DOMAINS | \
2787 BIT_ULL(POWER_DOMAIN_MODESET) | \
2788 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2789 BIT_ULL(POWER_DOMAIN_DPLL_DC_OFF) | \
2790 BIT_ULL(POWER_DOMAIN_INIT))
2792 #define ICL_DDI_IO_A_POWER_DOMAINS ( \
2793 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
2794 #define ICL_DDI_IO_B_POWER_DOMAINS ( \
2795 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
2796 #define ICL_DDI_IO_C_POWER_DOMAINS ( \
2797 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
2798 #define ICL_DDI_IO_D_POWER_DOMAINS ( \
2799 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO))
2800 #define ICL_DDI_IO_E_POWER_DOMAINS ( \
2801 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO))
2802 #define ICL_DDI_IO_F_POWER_DOMAINS ( \
2803 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO))
2805 #define ICL_AUX_A_IO_POWER_DOMAINS ( \
2806 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \
2807 BIT_ULL(POWER_DOMAIN_AUX_A))
2808 #define ICL_AUX_B_IO_POWER_DOMAINS ( \
2809 BIT_ULL(POWER_DOMAIN_AUX_B))
2810 #define ICL_AUX_C_TC1_IO_POWER_DOMAINS ( \
2811 BIT_ULL(POWER_DOMAIN_AUX_C))
2812 #define ICL_AUX_D_TC2_IO_POWER_DOMAINS ( \
2813 BIT_ULL(POWER_DOMAIN_AUX_D))
2814 #define ICL_AUX_E_TC3_IO_POWER_DOMAINS ( \
2815 BIT_ULL(POWER_DOMAIN_AUX_E))
2816 #define ICL_AUX_F_TC4_IO_POWER_DOMAINS ( \
2817 BIT_ULL(POWER_DOMAIN_AUX_F))
2818 #define ICL_AUX_C_TBT1_IO_POWER_DOMAINS ( \
2819 BIT_ULL(POWER_DOMAIN_AUX_C_TBT))
2820 #define ICL_AUX_D_TBT2_IO_POWER_DOMAINS ( \
2821 BIT_ULL(POWER_DOMAIN_AUX_D_TBT))
2822 #define ICL_AUX_E_TBT3_IO_POWER_DOMAINS ( \
2823 BIT_ULL(POWER_DOMAIN_AUX_E_TBT))
2824 #define ICL_AUX_F_TBT4_IO_POWER_DOMAINS ( \
2825 BIT_ULL(POWER_DOMAIN_AUX_F_TBT))
2827 #define TGL_PW_5_POWER_DOMAINS ( \
2828 BIT_ULL(POWER_DOMAIN_PIPE_D) | \
2829 BIT_ULL(POWER_DOMAIN_TRANSCODER_D) | \
2830 BIT_ULL(POWER_DOMAIN_PIPE_D_PANEL_FITTER) | \
2831 BIT_ULL(POWER_DOMAIN_INIT))
2833 #define TGL_PW_4_POWER_DOMAINS ( \
2834 TGL_PW_5_POWER_DOMAINS | \
2835 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2836 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2837 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2838 BIT_ULL(POWER_DOMAIN_INIT))
2840 #define TGL_PW_3_POWER_DOMAINS ( \
2841 TGL_PW_4_POWER_DOMAINS | \
2842 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2843 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2844 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2845 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2846 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \
2847 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \
2848 BIT_ULL(POWER_DOMAIN_PORT_DDI_G_LANES) | \
2849 BIT_ULL(POWER_DOMAIN_PORT_DDI_H_LANES) | \
2850 BIT_ULL(POWER_DOMAIN_PORT_DDI_I_LANES) | \
2851 BIT_ULL(POWER_DOMAIN_AUX_D) | \
2852 BIT_ULL(POWER_DOMAIN_AUX_E) | \
2853 BIT_ULL(POWER_DOMAIN_AUX_F) | \
2854 BIT_ULL(POWER_DOMAIN_AUX_G) | \
2855 BIT_ULL(POWER_DOMAIN_AUX_H) | \
2856 BIT_ULL(POWER_DOMAIN_AUX_I) | \
2857 BIT_ULL(POWER_DOMAIN_AUX_D_TBT) | \
2858 BIT_ULL(POWER_DOMAIN_AUX_E_TBT) | \
2859 BIT_ULL(POWER_DOMAIN_AUX_F_TBT) | \
2860 BIT_ULL(POWER_DOMAIN_AUX_G_TBT) | \
2861 BIT_ULL(POWER_DOMAIN_AUX_H_TBT) | \
2862 BIT_ULL(POWER_DOMAIN_AUX_I_TBT) | \
2863 BIT_ULL(POWER_DOMAIN_VGA) | \
2864 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2865 BIT_ULL(POWER_DOMAIN_INIT))
2867 #define TGL_PW_2_POWER_DOMAINS ( \
2868 TGL_PW_3_POWER_DOMAINS | \
2869 BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) | \
2870 BIT_ULL(POWER_DOMAIN_INIT))
2872 #define TGL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
2873 TGL_PW_3_POWER_DOMAINS | \
2874 BIT_ULL(POWER_DOMAIN_MODESET) | \
2875 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2876 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2877 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2878 BIT_ULL(POWER_DOMAIN_INIT))
2880 #define TGL_DDI_IO_D_TC1_POWER_DOMAINS ( \
2881 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO))
2882 #define TGL_DDI_IO_E_TC2_POWER_DOMAINS ( \
2883 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO))
2884 #define TGL_DDI_IO_F_TC3_POWER_DOMAINS ( \
2885 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO))
2886 #define TGL_DDI_IO_G_TC4_POWER_DOMAINS ( \
2887 BIT_ULL(POWER_DOMAIN_PORT_DDI_G_IO))
2888 #define TGL_DDI_IO_H_TC5_POWER_DOMAINS ( \
2889 BIT_ULL(POWER_DOMAIN_PORT_DDI_H_IO))
2890 #define TGL_DDI_IO_I_TC6_POWER_DOMAINS ( \
2891 BIT_ULL(POWER_DOMAIN_PORT_DDI_I_IO))
2893 #define TGL_AUX_A_IO_POWER_DOMAINS ( \
2894 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \
2895 BIT_ULL(POWER_DOMAIN_AUX_A))
2896 #define TGL_AUX_B_IO_POWER_DOMAINS ( \
2897 BIT_ULL(POWER_DOMAIN_AUX_B))
2898 #define TGL_AUX_C_IO_POWER_DOMAINS ( \
2899 BIT_ULL(POWER_DOMAIN_AUX_C))
2900 #define TGL_AUX_D_TC1_IO_POWER_DOMAINS ( \
2901 BIT_ULL(POWER_DOMAIN_AUX_D))
2902 #define TGL_AUX_E_TC2_IO_POWER_DOMAINS ( \
2903 BIT_ULL(POWER_DOMAIN_AUX_E))
2904 #define TGL_AUX_F_TC3_IO_POWER_DOMAINS ( \
2905 BIT_ULL(POWER_DOMAIN_AUX_F))
2906 #define TGL_AUX_G_TC4_IO_POWER_DOMAINS ( \
2907 BIT_ULL(POWER_DOMAIN_AUX_G))
2908 #define TGL_AUX_H_TC5_IO_POWER_DOMAINS ( \
2909 BIT_ULL(POWER_DOMAIN_AUX_H))
2910 #define TGL_AUX_I_TC6_IO_POWER_DOMAINS ( \
2911 BIT_ULL(POWER_DOMAIN_AUX_I))
2912 #define TGL_AUX_D_TBT1_IO_POWER_DOMAINS ( \
2913 BIT_ULL(POWER_DOMAIN_AUX_D_TBT))
2914 #define TGL_AUX_E_TBT2_IO_POWER_DOMAINS ( \
2915 BIT_ULL(POWER_DOMAIN_AUX_E_TBT))
2916 #define TGL_AUX_F_TBT3_IO_POWER_DOMAINS ( \
2917 BIT_ULL(POWER_DOMAIN_AUX_F_TBT))
2918 #define TGL_AUX_G_TBT4_IO_POWER_DOMAINS ( \
2919 BIT_ULL(POWER_DOMAIN_AUX_G_TBT))
2920 #define TGL_AUX_H_TBT5_IO_POWER_DOMAINS ( \
2921 BIT_ULL(POWER_DOMAIN_AUX_H_TBT))
2922 #define TGL_AUX_I_TBT6_IO_POWER_DOMAINS ( \
2923 BIT_ULL(POWER_DOMAIN_AUX_I_TBT))
2925 #define TGL_TC_COLD_OFF_POWER_DOMAINS ( \
2926 BIT_ULL(POWER_DOMAIN_AUX_D) | \
2927 BIT_ULL(POWER_DOMAIN_AUX_E) | \
2928 BIT_ULL(POWER_DOMAIN_AUX_F) | \
2929 BIT_ULL(POWER_DOMAIN_AUX_G) | \
2930 BIT_ULL(POWER_DOMAIN_AUX_H) | \
2931 BIT_ULL(POWER_DOMAIN_AUX_I) | \
2932 BIT_ULL(POWER_DOMAIN_AUX_D_TBT) | \
2933 BIT_ULL(POWER_DOMAIN_AUX_E_TBT) | \
2934 BIT_ULL(POWER_DOMAIN_AUX_F_TBT) | \
2935 BIT_ULL(POWER_DOMAIN_AUX_G_TBT) | \
2936 BIT_ULL(POWER_DOMAIN_AUX_H_TBT) | \
2937 BIT_ULL(POWER_DOMAIN_AUX_I_TBT) | \
2938 BIT_ULL(POWER_DOMAIN_TC_COLD_OFF))
2940 static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
2941 .sync_hw = i9xx_power_well_sync_hw_noop,
2942 .enable = i9xx_always_on_power_well_noop,
2943 .disable = i9xx_always_on_power_well_noop,
2944 .is_enabled = i9xx_always_on_power_well_enabled,
2947 static const struct i915_power_well_ops chv_pipe_power_well_ops = {
2948 .sync_hw = chv_pipe_power_well_sync_hw,
2949 .enable = chv_pipe_power_well_enable,
2950 .disable = chv_pipe_power_well_disable,
2951 .is_enabled = chv_pipe_power_well_enabled,
2954 static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
2955 .sync_hw = i9xx_power_well_sync_hw_noop,
2956 .enable = chv_dpio_cmn_power_well_enable,
2957 .disable = chv_dpio_cmn_power_well_disable,
2958 .is_enabled = vlv_power_well_enabled,
2961 static const struct i915_power_well_desc i9xx_always_on_power_well[] = {
2963 .name = "always-on",
2965 .domains = POWER_DOMAIN_MASK,
2966 .ops = &i9xx_always_on_power_well_ops,
2967 .id = DISP_PW_ID_NONE,
2971 static const struct i915_power_well_ops i830_pipes_power_well_ops = {
2972 .sync_hw = i830_pipes_power_well_sync_hw,
2973 .enable = i830_pipes_power_well_enable,
2974 .disable = i830_pipes_power_well_disable,
2975 .is_enabled = i830_pipes_power_well_enabled,
2978 static const struct i915_power_well_desc i830_power_wells[] = {
2980 .name = "always-on",
2982 .domains = POWER_DOMAIN_MASK,
2983 .ops = &i9xx_always_on_power_well_ops,
2984 .id = DISP_PW_ID_NONE,
2988 .domains = I830_PIPES_POWER_DOMAINS,
2989 .ops = &i830_pipes_power_well_ops,
2990 .id = DISP_PW_ID_NONE,
2994 static const struct i915_power_well_ops hsw_power_well_ops = {
2995 .sync_hw = hsw_power_well_sync_hw,
2996 .enable = hsw_power_well_enable,
2997 .disable = hsw_power_well_disable,
2998 .is_enabled = hsw_power_well_enabled,
3001 static const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
3002 .sync_hw = i9xx_power_well_sync_hw_noop,
3003 .enable = gen9_dc_off_power_well_enable,
3004 .disable = gen9_dc_off_power_well_disable,
3005 .is_enabled = gen9_dc_off_power_well_enabled,
3008 static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = {
3009 .sync_hw = i9xx_power_well_sync_hw_noop,
3010 .enable = bxt_dpio_cmn_power_well_enable,
3011 .disable = bxt_dpio_cmn_power_well_disable,
3012 .is_enabled = bxt_dpio_cmn_power_well_enabled,
3015 static const struct i915_power_well_regs hsw_power_well_regs = {
3016 .bios = HSW_PWR_WELL_CTL1,
3017 .driver = HSW_PWR_WELL_CTL2,
3018 .kvmr = HSW_PWR_WELL_CTL3,
3019 .debug = HSW_PWR_WELL_CTL4,
3022 static const struct i915_power_well_desc hsw_power_wells[] = {
3024 .name = "always-on",
3026 .domains = POWER_DOMAIN_MASK,
3027 .ops = &i9xx_always_on_power_well_ops,
3028 .id = DISP_PW_ID_NONE,
3032 .domains = HSW_DISPLAY_POWER_DOMAINS,
3033 .ops = &hsw_power_well_ops,
3034 .id = HSW_DISP_PW_GLOBAL,
3036 .hsw.regs = &hsw_power_well_regs,
3037 .hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
3038 .hsw.has_vga = true,
3043 static const struct i915_power_well_desc bdw_power_wells[] = {
3045 .name = "always-on",
3047 .domains = POWER_DOMAIN_MASK,
3048 .ops = &i9xx_always_on_power_well_ops,
3049 .id = DISP_PW_ID_NONE,
3053 .domains = BDW_DISPLAY_POWER_DOMAINS,
3054 .ops = &hsw_power_well_ops,
3055 .id = HSW_DISP_PW_GLOBAL,
3057 .hsw.regs = &hsw_power_well_regs,
3058 .hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
3059 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3060 .hsw.has_vga = true,
3065 static const struct i915_power_well_ops vlv_display_power_well_ops = {
3066 .sync_hw = i9xx_power_well_sync_hw_noop,
3067 .enable = vlv_display_power_well_enable,
3068 .disable = vlv_display_power_well_disable,
3069 .is_enabled = vlv_power_well_enabled,
3072 static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
3073 .sync_hw = i9xx_power_well_sync_hw_noop,
3074 .enable = vlv_dpio_cmn_power_well_enable,
3075 .disable = vlv_dpio_cmn_power_well_disable,
3076 .is_enabled = vlv_power_well_enabled,
3079 static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
3080 .sync_hw = i9xx_power_well_sync_hw_noop,
3081 .enable = vlv_power_well_enable,
3082 .disable = vlv_power_well_disable,
3083 .is_enabled = vlv_power_well_enabled,
3086 static const struct i915_power_well_desc vlv_power_wells[] = {
3088 .name = "always-on",
3090 .domains = POWER_DOMAIN_MASK,
3091 .ops = &i9xx_always_on_power_well_ops,
3092 .id = DISP_PW_ID_NONE,
3096 .domains = VLV_DISPLAY_POWER_DOMAINS,
3097 .ops = &vlv_display_power_well_ops,
3098 .id = VLV_DISP_PW_DISP2D,
3100 .vlv.idx = PUNIT_PWGT_IDX_DISP2D,
3104 .name = "dpio-tx-b-01",
3105 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
3106 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
3107 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
3108 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
3109 .ops = &vlv_dpio_power_well_ops,
3110 .id = DISP_PW_ID_NONE,
3112 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_01,
3116 .name = "dpio-tx-b-23",
3117 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
3118 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
3119 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
3120 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
3121 .ops = &vlv_dpio_power_well_ops,
3122 .id = DISP_PW_ID_NONE,
3124 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_23,
3128 .name = "dpio-tx-c-01",
3129 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
3130 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
3131 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
3132 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
3133 .ops = &vlv_dpio_power_well_ops,
3134 .id = DISP_PW_ID_NONE,
3136 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_01,
3140 .name = "dpio-tx-c-23",
3141 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
3142 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
3143 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
3144 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
3145 .ops = &vlv_dpio_power_well_ops,
3146 .id = DISP_PW_ID_NONE,
3148 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_23,
3152 .name = "dpio-common",
3153 .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
3154 .ops = &vlv_dpio_cmn_power_well_ops,
3155 .id = VLV_DISP_PW_DPIO_CMN_BC,
3157 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
3162 static const struct i915_power_well_desc chv_power_wells[] = {
3164 .name = "always-on",
3166 .domains = POWER_DOMAIN_MASK,
3167 .ops = &i9xx_always_on_power_well_ops,
3168 .id = DISP_PW_ID_NONE,
3173 * Pipe A power well is the new disp2d well. Pipe B and C
3174 * power wells don't actually exist. Pipe A power well is
3175 * required for any pipe to work.
3177 .domains = CHV_DISPLAY_POWER_DOMAINS,
3178 .ops = &chv_pipe_power_well_ops,
3179 .id = DISP_PW_ID_NONE,
3182 .name = "dpio-common-bc",
3183 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
3184 .ops = &chv_dpio_cmn_power_well_ops,
3185 .id = VLV_DISP_PW_DPIO_CMN_BC,
3187 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
3191 .name = "dpio-common-d",
3192 .domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
3193 .ops = &chv_dpio_cmn_power_well_ops,
3194 .id = CHV_DISP_PW_DPIO_CMN_D,
3196 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_D,
3201 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
3202 enum i915_power_well_id power_well_id)
3204 struct i915_power_well *power_well;
3207 power_well = lookup_power_well(dev_priv, power_well_id);
3208 ret = power_well->desc->ops->is_enabled(dev_priv, power_well);
3213 static const struct i915_power_well_desc skl_power_wells[] = {
3215 .name = "always-on",
3217 .domains = POWER_DOMAIN_MASK,
3218 .ops = &i9xx_always_on_power_well_ops,
3219 .id = DISP_PW_ID_NONE,
3222 .name = "power well 1",
3223 /* Handled by the DMC firmware */
3226 .ops = &hsw_power_well_ops,
3227 .id = SKL_DISP_PW_1,
3229 .hsw.regs = &hsw_power_well_regs,
3230 .hsw.idx = SKL_PW_CTL_IDX_PW_1,
3231 .hsw.has_fuses = true,
3235 .name = "MISC IO power well",
3236 /* Handled by the DMC firmware */
3239 .ops = &hsw_power_well_ops,
3240 .id = SKL_DISP_PW_MISC_IO,
3242 .hsw.regs = &hsw_power_well_regs,
3243 .hsw.idx = SKL_PW_CTL_IDX_MISC_IO,
3248 .domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS,
3249 .ops = &gen9_dc_off_power_well_ops,
3250 .id = SKL_DISP_DC_OFF,
3253 .name = "power well 2",
3254 .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3255 .ops = &hsw_power_well_ops,
3256 .id = SKL_DISP_PW_2,
3258 .hsw.regs = &hsw_power_well_regs,
3259 .hsw.idx = SKL_PW_CTL_IDX_PW_2,
3260 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3261 .hsw.has_vga = true,
3262 .hsw.has_fuses = true,
3266 .name = "DDI A/E IO power well",
3267 .domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS,
3268 .ops = &hsw_power_well_ops,
3269 .id = DISP_PW_ID_NONE,
3271 .hsw.regs = &hsw_power_well_regs,
3272 .hsw.idx = SKL_PW_CTL_IDX_DDI_A_E,
3276 .name = "DDI B IO power well",
3277 .domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS,
3278 .ops = &hsw_power_well_ops,
3279 .id = DISP_PW_ID_NONE,
3281 .hsw.regs = &hsw_power_well_regs,
3282 .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
3286 .name = "DDI C IO power well",
3287 .domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS,
3288 .ops = &hsw_power_well_ops,
3289 .id = DISP_PW_ID_NONE,
3291 .hsw.regs = &hsw_power_well_regs,
3292 .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
3296 .name = "DDI D IO power well",
3297 .domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS,
3298 .ops = &hsw_power_well_ops,
3299 .id = DISP_PW_ID_NONE,
3301 .hsw.regs = &hsw_power_well_regs,
3302 .hsw.idx = SKL_PW_CTL_IDX_DDI_D,
3307 static const struct i915_power_well_desc bxt_power_wells[] = {
3309 .name = "always-on",
3311 .domains = POWER_DOMAIN_MASK,
3312 .ops = &i9xx_always_on_power_well_ops,
3313 .id = DISP_PW_ID_NONE,
3316 .name = "power well 1",
3317 /* Handled by the DMC firmware */
3320 .ops = &hsw_power_well_ops,
3321 .id = SKL_DISP_PW_1,
3323 .hsw.regs = &hsw_power_well_regs,
3324 .hsw.idx = SKL_PW_CTL_IDX_PW_1,
3325 .hsw.has_fuses = true,
3330 .domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS,
3331 .ops = &gen9_dc_off_power_well_ops,
3332 .id = SKL_DISP_DC_OFF,
3335 .name = "power well 2",
3336 .domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3337 .ops = &hsw_power_well_ops,
3338 .id = SKL_DISP_PW_2,
3340 .hsw.regs = &hsw_power_well_regs,
3341 .hsw.idx = SKL_PW_CTL_IDX_PW_2,
3342 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3343 .hsw.has_vga = true,
3344 .hsw.has_fuses = true,
3348 .name = "dpio-common-a",
3349 .domains = BXT_DPIO_CMN_A_POWER_DOMAINS,
3350 .ops = &bxt_dpio_cmn_power_well_ops,
3351 .id = BXT_DISP_PW_DPIO_CMN_A,
3353 .bxt.phy = DPIO_PHY1,
3357 .name = "dpio-common-bc",
3358 .domains = BXT_DPIO_CMN_BC_POWER_DOMAINS,
3359 .ops = &bxt_dpio_cmn_power_well_ops,
3360 .id = VLV_DISP_PW_DPIO_CMN_BC,
3362 .bxt.phy = DPIO_PHY0,
3367 static const struct i915_power_well_desc glk_power_wells[] = {
3369 .name = "always-on",
3371 .domains = POWER_DOMAIN_MASK,
3372 .ops = &i9xx_always_on_power_well_ops,
3373 .id = DISP_PW_ID_NONE,
3376 .name = "power well 1",
3377 /* Handled by the DMC firmware */
3380 .ops = &hsw_power_well_ops,
3381 .id = SKL_DISP_PW_1,
3383 .hsw.regs = &hsw_power_well_regs,
3384 .hsw.idx = SKL_PW_CTL_IDX_PW_1,
3385 .hsw.has_fuses = true,
3390 .domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS,
3391 .ops = &gen9_dc_off_power_well_ops,
3392 .id = SKL_DISP_DC_OFF,
3395 .name = "power well 2",
3396 .domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3397 .ops = &hsw_power_well_ops,
3398 .id = SKL_DISP_PW_2,
3400 .hsw.regs = &hsw_power_well_regs,
3401 .hsw.idx = SKL_PW_CTL_IDX_PW_2,
3402 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3403 .hsw.has_vga = true,
3404 .hsw.has_fuses = true,
3408 .name = "dpio-common-a",
3409 .domains = GLK_DPIO_CMN_A_POWER_DOMAINS,
3410 .ops = &bxt_dpio_cmn_power_well_ops,
3411 .id = BXT_DISP_PW_DPIO_CMN_A,
3413 .bxt.phy = DPIO_PHY1,
3417 .name = "dpio-common-b",
3418 .domains = GLK_DPIO_CMN_B_POWER_DOMAINS,
3419 .ops = &bxt_dpio_cmn_power_well_ops,
3420 .id = VLV_DISP_PW_DPIO_CMN_BC,
3422 .bxt.phy = DPIO_PHY0,
3426 .name = "dpio-common-c",
3427 .domains = GLK_DPIO_CMN_C_POWER_DOMAINS,
3428 .ops = &bxt_dpio_cmn_power_well_ops,
3429 .id = GLK_DISP_PW_DPIO_CMN_C,
3431 .bxt.phy = DPIO_PHY2,
3436 .domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS,
3437 .ops = &hsw_power_well_ops,
3438 .id = DISP_PW_ID_NONE,
3440 .hsw.regs = &hsw_power_well_regs,
3441 .hsw.idx = GLK_PW_CTL_IDX_AUX_A,
3446 .domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS,
3447 .ops = &hsw_power_well_ops,
3448 .id = DISP_PW_ID_NONE,
3450 .hsw.regs = &hsw_power_well_regs,
3451 .hsw.idx = GLK_PW_CTL_IDX_AUX_B,
3456 .domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS,
3457 .ops = &hsw_power_well_ops,
3458 .id = DISP_PW_ID_NONE,
3460 .hsw.regs = &hsw_power_well_regs,
3461 .hsw.idx = GLK_PW_CTL_IDX_AUX_C,
3465 .name = "DDI A IO power well",
3466 .domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS,
3467 .ops = &hsw_power_well_ops,
3468 .id = DISP_PW_ID_NONE,
3470 .hsw.regs = &hsw_power_well_regs,
3471 .hsw.idx = GLK_PW_CTL_IDX_DDI_A,
3475 .name = "DDI B IO power well",
3476 .domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS,
3477 .ops = &hsw_power_well_ops,
3478 .id = DISP_PW_ID_NONE,
3480 .hsw.regs = &hsw_power_well_regs,
3481 .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
3485 .name = "DDI C IO power well",
3486 .domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS,
3487 .ops = &hsw_power_well_ops,
3488 .id = DISP_PW_ID_NONE,
3490 .hsw.regs = &hsw_power_well_regs,
3491 .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
3496 static const struct i915_power_well_desc cnl_power_wells[] = {
3498 .name = "always-on",
3500 .domains = POWER_DOMAIN_MASK,
3501 .ops = &i9xx_always_on_power_well_ops,
3502 .id = DISP_PW_ID_NONE,
3505 .name = "power well 1",
3506 /* Handled by the DMC firmware */
3509 .ops = &hsw_power_well_ops,
3510 .id = SKL_DISP_PW_1,
3512 .hsw.regs = &hsw_power_well_regs,
3513 .hsw.idx = SKL_PW_CTL_IDX_PW_1,
3514 .hsw.has_fuses = true,
3519 .domains = CNL_DISPLAY_AUX_A_POWER_DOMAINS,
3520 .ops = &hsw_power_well_ops,
3521 .id = DISP_PW_ID_NONE,
3523 .hsw.regs = &hsw_power_well_regs,
3524 .hsw.idx = GLK_PW_CTL_IDX_AUX_A,
3529 .domains = CNL_DISPLAY_AUX_B_POWER_DOMAINS,
3530 .ops = &hsw_power_well_ops,
3531 .id = DISP_PW_ID_NONE,
3533 .hsw.regs = &hsw_power_well_regs,
3534 .hsw.idx = GLK_PW_CTL_IDX_AUX_B,
3539 .domains = CNL_DISPLAY_AUX_C_POWER_DOMAINS,
3540 .ops = &hsw_power_well_ops,
3541 .id = DISP_PW_ID_NONE,
3543 .hsw.regs = &hsw_power_well_regs,
3544 .hsw.idx = GLK_PW_CTL_IDX_AUX_C,
3549 .domains = CNL_DISPLAY_AUX_D_POWER_DOMAINS,
3550 .ops = &hsw_power_well_ops,
3551 .id = DISP_PW_ID_NONE,
3553 .hsw.regs = &hsw_power_well_regs,
3554 .hsw.idx = CNL_PW_CTL_IDX_AUX_D,
3559 .domains = CNL_DISPLAY_DC_OFF_POWER_DOMAINS,
3560 .ops = &gen9_dc_off_power_well_ops,
3561 .id = SKL_DISP_DC_OFF,
3564 .name = "power well 2",
3565 .domains = CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3566 .ops = &hsw_power_well_ops,
3567 .id = SKL_DISP_PW_2,
3569 .hsw.regs = &hsw_power_well_regs,
3570 .hsw.idx = SKL_PW_CTL_IDX_PW_2,
3571 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3572 .hsw.has_vga = true,
3573 .hsw.has_fuses = true,
3577 .name = "DDI A IO power well",
3578 .domains = CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS,
3579 .ops = &hsw_power_well_ops,
3580 .id = DISP_PW_ID_NONE,
3582 .hsw.regs = &hsw_power_well_regs,
3583 .hsw.idx = GLK_PW_CTL_IDX_DDI_A,
3587 .name = "DDI B IO power well",
3588 .domains = CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS,
3589 .ops = &hsw_power_well_ops,
3590 .id = DISP_PW_ID_NONE,
3592 .hsw.regs = &hsw_power_well_regs,
3593 .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
3597 .name = "DDI C IO power well",
3598 .domains = CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS,
3599 .ops = &hsw_power_well_ops,
3600 .id = DISP_PW_ID_NONE,
3602 .hsw.regs = &hsw_power_well_regs,
3603 .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
3607 .name = "DDI D IO power well",
3608 .domains = CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS,
3609 .ops = &hsw_power_well_ops,
3610 .id = DISP_PW_ID_NONE,
3612 .hsw.regs = &hsw_power_well_regs,
3613 .hsw.idx = SKL_PW_CTL_IDX_DDI_D,
3617 .name = "DDI F IO power well",
3618 .domains = CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS,
3619 .ops = &hsw_power_well_ops,
3620 .id = DISP_PW_ID_NONE,
3622 .hsw.regs = &hsw_power_well_regs,
3623 .hsw.idx = CNL_PW_CTL_IDX_DDI_F,
3628 .domains = CNL_DISPLAY_AUX_F_POWER_DOMAINS,
3629 .ops = &hsw_power_well_ops,
3630 .id = DISP_PW_ID_NONE,
3632 .hsw.regs = &hsw_power_well_regs,
3633 .hsw.idx = CNL_PW_CTL_IDX_AUX_F,
3638 static const struct i915_power_well_ops icl_aux_power_well_ops = {
3639 .sync_hw = hsw_power_well_sync_hw,
3640 .enable = icl_aux_power_well_enable,
3641 .disable = icl_aux_power_well_disable,
3642 .is_enabled = hsw_power_well_enabled,
3645 static const struct i915_power_well_regs icl_aux_power_well_regs = {
3646 .bios = ICL_PWR_WELL_CTL_AUX1,
3647 .driver = ICL_PWR_WELL_CTL_AUX2,
3648 .debug = ICL_PWR_WELL_CTL_AUX4,
3651 static const struct i915_power_well_regs icl_ddi_power_well_regs = {
3652 .bios = ICL_PWR_WELL_CTL_DDI1,
3653 .driver = ICL_PWR_WELL_CTL_DDI2,
3654 .debug = ICL_PWR_WELL_CTL_DDI4,
3657 static const struct i915_power_well_desc icl_power_wells[] = {
3659 .name = "always-on",
3661 .domains = POWER_DOMAIN_MASK,
3662 .ops = &i9xx_always_on_power_well_ops,
3663 .id = DISP_PW_ID_NONE,
3666 .name = "power well 1",
3667 /* Handled by the DMC firmware */
3670 .ops = &hsw_power_well_ops,
3671 .id = SKL_DISP_PW_1,
3673 .hsw.regs = &hsw_power_well_regs,
3674 .hsw.idx = ICL_PW_CTL_IDX_PW_1,
3675 .hsw.has_fuses = true,
3680 .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
3681 .ops = &gen9_dc_off_power_well_ops,
3682 .id = SKL_DISP_DC_OFF,
3685 .name = "power well 2",
3686 .domains = ICL_PW_2_POWER_DOMAINS,
3687 .ops = &hsw_power_well_ops,
3688 .id = SKL_DISP_PW_2,
3690 .hsw.regs = &hsw_power_well_regs,
3691 .hsw.idx = ICL_PW_CTL_IDX_PW_2,
3692 .hsw.has_fuses = true,
3696 .name = "power well 3",
3697 .domains = ICL_PW_3_POWER_DOMAINS,
3698 .ops = &hsw_power_well_ops,
3699 .id = ICL_DISP_PW_3,
3701 .hsw.regs = &hsw_power_well_regs,
3702 .hsw.idx = ICL_PW_CTL_IDX_PW_3,
3703 .hsw.irq_pipe_mask = BIT(PIPE_B),
3704 .hsw.has_vga = true,
3705 .hsw.has_fuses = true,
3710 .domains = ICL_DDI_IO_A_POWER_DOMAINS,
3711 .ops = &hsw_power_well_ops,
3712 .id = DISP_PW_ID_NONE,
3714 .hsw.regs = &icl_ddi_power_well_regs,
3715 .hsw.idx = ICL_PW_CTL_IDX_DDI_A,
3720 .domains = ICL_DDI_IO_B_POWER_DOMAINS,
3721 .ops = &hsw_power_well_ops,
3722 .id = DISP_PW_ID_NONE,
3724 .hsw.regs = &icl_ddi_power_well_regs,
3725 .hsw.idx = ICL_PW_CTL_IDX_DDI_B,
3730 .domains = ICL_DDI_IO_C_POWER_DOMAINS,
3731 .ops = &hsw_power_well_ops,
3732 .id = DISP_PW_ID_NONE,
3734 .hsw.regs = &icl_ddi_power_well_regs,
3735 .hsw.idx = ICL_PW_CTL_IDX_DDI_C,
3740 .domains = ICL_DDI_IO_D_POWER_DOMAINS,
3741 .ops = &hsw_power_well_ops,
3742 .id = DISP_PW_ID_NONE,
3744 .hsw.regs = &icl_ddi_power_well_regs,
3745 .hsw.idx = ICL_PW_CTL_IDX_DDI_D,
3750 .domains = ICL_DDI_IO_E_POWER_DOMAINS,
3751 .ops = &hsw_power_well_ops,
3752 .id = DISP_PW_ID_NONE,
3754 .hsw.regs = &icl_ddi_power_well_regs,
3755 .hsw.idx = ICL_PW_CTL_IDX_DDI_E,
3760 .domains = ICL_DDI_IO_F_POWER_DOMAINS,
3761 .ops = &hsw_power_well_ops,
3762 .id = DISP_PW_ID_NONE,
3764 .hsw.regs = &icl_ddi_power_well_regs,
3765 .hsw.idx = ICL_PW_CTL_IDX_DDI_F,
3770 .domains = ICL_AUX_A_IO_POWER_DOMAINS,
3771 .ops = &icl_aux_power_well_ops,
3772 .id = DISP_PW_ID_NONE,
3774 .hsw.regs = &icl_aux_power_well_regs,
3775 .hsw.idx = ICL_PW_CTL_IDX_AUX_A,
3780 .domains = ICL_AUX_B_IO_POWER_DOMAINS,
3781 .ops = &icl_aux_power_well_ops,
3782 .id = DISP_PW_ID_NONE,
3784 .hsw.regs = &icl_aux_power_well_regs,
3785 .hsw.idx = ICL_PW_CTL_IDX_AUX_B,
3789 .name = "AUX C TC1",
3790 .domains = ICL_AUX_C_TC1_IO_POWER_DOMAINS,
3791 .ops = &icl_aux_power_well_ops,
3792 .id = DISP_PW_ID_NONE,
3794 .hsw.regs = &icl_aux_power_well_regs,
3795 .hsw.idx = ICL_PW_CTL_IDX_AUX_C,
3796 .hsw.is_tc_tbt = false,
3800 .name = "AUX D TC2",
3801 .domains = ICL_AUX_D_TC2_IO_POWER_DOMAINS,
3802 .ops = &icl_aux_power_well_ops,
3803 .id = DISP_PW_ID_NONE,
3805 .hsw.regs = &icl_aux_power_well_regs,
3806 .hsw.idx = ICL_PW_CTL_IDX_AUX_D,
3807 .hsw.is_tc_tbt = false,
3811 .name = "AUX E TC3",
3812 .domains = ICL_AUX_E_TC3_IO_POWER_DOMAINS,
3813 .ops = &icl_aux_power_well_ops,
3814 .id = DISP_PW_ID_NONE,
3816 .hsw.regs = &icl_aux_power_well_regs,
3817 .hsw.idx = ICL_PW_CTL_IDX_AUX_E,
3818 .hsw.is_tc_tbt = false,
3822 .name = "AUX F TC4",
3823 .domains = ICL_AUX_F_TC4_IO_POWER_DOMAINS,
3824 .ops = &icl_aux_power_well_ops,
3825 .id = DISP_PW_ID_NONE,
3827 .hsw.regs = &icl_aux_power_well_regs,
3828 .hsw.idx = ICL_PW_CTL_IDX_AUX_F,
3829 .hsw.is_tc_tbt = false,
3833 .name = "AUX C TBT1",
3834 .domains = ICL_AUX_C_TBT1_IO_POWER_DOMAINS,
3835 .ops = &icl_aux_power_well_ops,
3836 .id = DISP_PW_ID_NONE,
3838 .hsw.regs = &icl_aux_power_well_regs,
3839 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1,
3840 .hsw.is_tc_tbt = true,
3844 .name = "AUX D TBT2",
3845 .domains = ICL_AUX_D_TBT2_IO_POWER_DOMAINS,
3846 .ops = &icl_aux_power_well_ops,
3847 .id = DISP_PW_ID_NONE,
3849 .hsw.regs = &icl_aux_power_well_regs,
3850 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2,
3851 .hsw.is_tc_tbt = true,
3855 .name = "AUX E TBT3",
3856 .domains = ICL_AUX_E_TBT3_IO_POWER_DOMAINS,
3857 .ops = &icl_aux_power_well_ops,
3858 .id = DISP_PW_ID_NONE,
3860 .hsw.regs = &icl_aux_power_well_regs,
3861 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3,
3862 .hsw.is_tc_tbt = true,
3866 .name = "AUX F TBT4",
3867 .domains = ICL_AUX_F_TBT4_IO_POWER_DOMAINS,
3868 .ops = &icl_aux_power_well_ops,
3869 .id = DISP_PW_ID_NONE,
3871 .hsw.regs = &icl_aux_power_well_regs,
3872 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4,
3873 .hsw.is_tc_tbt = true,
3877 .name = "power well 4",
3878 .domains = ICL_PW_4_POWER_DOMAINS,
3879 .ops = &hsw_power_well_ops,
3880 .id = DISP_PW_ID_NONE,
3882 .hsw.regs = &hsw_power_well_regs,
3883 .hsw.idx = ICL_PW_CTL_IDX_PW_4,
3884 .hsw.has_fuses = true,
3885 .hsw.irq_pipe_mask = BIT(PIPE_C),
3891 tgl_tc_cold_request(struct drm_i915_private *i915, bool block)
3897 u32 low_val = 0, high_val;
3900 high_val = TGL_PCODE_EXIT_TCCOLD_DATA_H_BLOCK_REQ;
3902 high_val = TGL_PCODE_EXIT_TCCOLD_DATA_H_UNBLOCK_REQ;
3905 * Spec states that we should timeout the request after 200us
3906 * but the function below will timeout after 500us
3908 ret = sandybridge_pcode_read(i915, TGL_PCODE_TCCOLD, &low_val,
3912 (low_val & TGL_PCODE_EXIT_TCCOLD_DATA_L_EXIT_FAILED))
3926 drm_err(&i915->drm, "TC cold %sblock failed\n",
3929 drm_dbg_kms(&i915->drm, "TC cold %sblock succeeded\n",
3934 tgl_tc_cold_off_power_well_enable(struct drm_i915_private *i915,
3935 struct i915_power_well *power_well)
3937 tgl_tc_cold_request(i915, true);
3941 tgl_tc_cold_off_power_well_disable(struct drm_i915_private *i915,
3942 struct i915_power_well *power_well)
3944 tgl_tc_cold_request(i915, false);
3948 tgl_tc_cold_off_power_well_sync_hw(struct drm_i915_private *i915,
3949 struct i915_power_well *power_well)
3951 if (power_well->count > 0)
3952 tgl_tc_cold_off_power_well_enable(i915, power_well);
3954 tgl_tc_cold_off_power_well_disable(i915, power_well);
3958 tgl_tc_cold_off_power_well_is_enabled(struct drm_i915_private *dev_priv,
3959 struct i915_power_well *power_well)
3962 * Not the correctly implementation but there is no way to just read it
3963 * from PCODE, so returning count to avoid state mismatch errors
3965 return power_well->count;
3968 static const struct i915_power_well_ops tgl_tc_cold_off_ops = {
3969 .sync_hw = tgl_tc_cold_off_power_well_sync_hw,
3970 .enable = tgl_tc_cold_off_power_well_enable,
3971 .disable = tgl_tc_cold_off_power_well_disable,
3972 .is_enabled = tgl_tc_cold_off_power_well_is_enabled,
3975 static const struct i915_power_well_desc tgl_power_wells[] = {
3977 .name = "always-on",
3979 .domains = POWER_DOMAIN_MASK,
3980 .ops = &i9xx_always_on_power_well_ops,
3981 .id = DISP_PW_ID_NONE,
3984 .name = "power well 1",
3985 /* Handled by the DMC firmware */
3988 .ops = &hsw_power_well_ops,
3989 .id = SKL_DISP_PW_1,
3991 .hsw.regs = &hsw_power_well_regs,
3992 .hsw.idx = ICL_PW_CTL_IDX_PW_1,
3993 .hsw.has_fuses = true,
3998 .domains = TGL_DISPLAY_DC_OFF_POWER_DOMAINS,
3999 .ops = &gen9_dc_off_power_well_ops,
4000 .id = SKL_DISP_DC_OFF,
4003 .name = "power well 2",
4004 .domains = TGL_PW_2_POWER_DOMAINS,
4005 .ops = &hsw_power_well_ops,
4006 .id = SKL_DISP_PW_2,
4008 .hsw.regs = &hsw_power_well_regs,
4009 .hsw.idx = ICL_PW_CTL_IDX_PW_2,
4010 .hsw.has_fuses = true,
4014 .name = "power well 3",
4015 .domains = TGL_PW_3_POWER_DOMAINS,
4016 .ops = &hsw_power_well_ops,
4017 .id = ICL_DISP_PW_3,
4019 .hsw.regs = &hsw_power_well_regs,
4020 .hsw.idx = ICL_PW_CTL_IDX_PW_3,
4021 .hsw.irq_pipe_mask = BIT(PIPE_B),
4022 .hsw.has_vga = true,
4023 .hsw.has_fuses = true,
4028 .domains = ICL_DDI_IO_A_POWER_DOMAINS,
4029 .ops = &hsw_power_well_ops,
4030 .id = DISP_PW_ID_NONE,
4032 .hsw.regs = &icl_ddi_power_well_regs,
4033 .hsw.idx = ICL_PW_CTL_IDX_DDI_A,
4038 .domains = ICL_DDI_IO_B_POWER_DOMAINS,
4039 .ops = &hsw_power_well_ops,
4040 .id = DISP_PW_ID_NONE,
4042 .hsw.regs = &icl_ddi_power_well_regs,
4043 .hsw.idx = ICL_PW_CTL_IDX_DDI_B,
4048 .domains = ICL_DDI_IO_C_POWER_DOMAINS,
4049 .ops = &hsw_power_well_ops,
4050 .id = DISP_PW_ID_NONE,
4052 .hsw.regs = &icl_ddi_power_well_regs,
4053 .hsw.idx = ICL_PW_CTL_IDX_DDI_C,
4057 .name = "DDI D TC1 IO",
4058 .domains = TGL_DDI_IO_D_TC1_POWER_DOMAINS,
4059 .ops = &hsw_power_well_ops,
4060 .id = DISP_PW_ID_NONE,
4062 .hsw.regs = &icl_ddi_power_well_regs,
4063 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1,
4067 .name = "DDI E TC2 IO",
4068 .domains = TGL_DDI_IO_E_TC2_POWER_DOMAINS,
4069 .ops = &hsw_power_well_ops,
4070 .id = DISP_PW_ID_NONE,
4072 .hsw.regs = &icl_ddi_power_well_regs,
4073 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2,
4077 .name = "DDI F TC3 IO",
4078 .domains = TGL_DDI_IO_F_TC3_POWER_DOMAINS,
4079 .ops = &hsw_power_well_ops,
4080 .id = DISP_PW_ID_NONE,
4082 .hsw.regs = &icl_ddi_power_well_regs,
4083 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC3,
4087 .name = "DDI G TC4 IO",
4088 .domains = TGL_DDI_IO_G_TC4_POWER_DOMAINS,
4089 .ops = &hsw_power_well_ops,
4090 .id = DISP_PW_ID_NONE,
4092 .hsw.regs = &icl_ddi_power_well_regs,
4093 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC4,
4097 .name = "DDI H TC5 IO",
4098 .domains = TGL_DDI_IO_H_TC5_POWER_DOMAINS,
4099 .ops = &hsw_power_well_ops,
4100 .id = DISP_PW_ID_NONE,
4102 .hsw.regs = &icl_ddi_power_well_regs,
4103 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC5,
4107 .name = "DDI I TC6 IO",
4108 .domains = TGL_DDI_IO_I_TC6_POWER_DOMAINS,
4109 .ops = &hsw_power_well_ops,
4110 .id = DISP_PW_ID_NONE,
4112 .hsw.regs = &icl_ddi_power_well_regs,
4113 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC6,
4118 .domains = TGL_AUX_A_IO_POWER_DOMAINS,
4119 .ops = &icl_aux_power_well_ops,
4120 .id = DISP_PW_ID_NONE,
4122 .hsw.regs = &icl_aux_power_well_regs,
4123 .hsw.idx = ICL_PW_CTL_IDX_AUX_A,
4128 .domains = TGL_AUX_B_IO_POWER_DOMAINS,
4129 .ops = &icl_aux_power_well_ops,
4130 .id = DISP_PW_ID_NONE,
4132 .hsw.regs = &icl_aux_power_well_regs,
4133 .hsw.idx = ICL_PW_CTL_IDX_AUX_B,
4138 .domains = TGL_AUX_C_IO_POWER_DOMAINS,
4139 .ops = &icl_aux_power_well_ops,
4140 .id = DISP_PW_ID_NONE,
4142 .hsw.regs = &icl_aux_power_well_regs,
4143 .hsw.idx = ICL_PW_CTL_IDX_AUX_C,
4147 .name = "AUX D TC1",
4148 .domains = TGL_AUX_D_TC1_IO_POWER_DOMAINS,
4149 .ops = &icl_aux_power_well_ops,
4150 .id = DISP_PW_ID_NONE,
4152 .hsw.regs = &icl_aux_power_well_regs,
4153 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1,
4154 .hsw.is_tc_tbt = false,
4158 .name = "AUX E TC2",
4159 .domains = TGL_AUX_E_TC2_IO_POWER_DOMAINS,
4160 .ops = &icl_aux_power_well_ops,
4161 .id = DISP_PW_ID_NONE,
4163 .hsw.regs = &icl_aux_power_well_regs,
4164 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2,
4165 .hsw.is_tc_tbt = false,
4169 .name = "AUX F TC3",
4170 .domains = TGL_AUX_F_TC3_IO_POWER_DOMAINS,
4171 .ops = &icl_aux_power_well_ops,
4172 .id = DISP_PW_ID_NONE,
4174 .hsw.regs = &icl_aux_power_well_regs,
4175 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC3,
4176 .hsw.is_tc_tbt = false,
4180 .name = "AUX G TC4",
4181 .domains = TGL_AUX_G_TC4_IO_POWER_DOMAINS,
4182 .ops = &icl_aux_power_well_ops,
4183 .id = DISP_PW_ID_NONE,
4185 .hsw.regs = &icl_aux_power_well_regs,
4186 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC4,
4187 .hsw.is_tc_tbt = false,
4191 .name = "AUX H TC5",
4192 .domains = TGL_AUX_H_TC5_IO_POWER_DOMAINS,
4193 .ops = &icl_aux_power_well_ops,
4194 .id = DISP_PW_ID_NONE,
4196 .hsw.regs = &icl_aux_power_well_regs,
4197 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC5,
4198 .hsw.is_tc_tbt = false,
4202 .name = "AUX I TC6",
4203 .domains = TGL_AUX_I_TC6_IO_POWER_DOMAINS,
4204 .ops = &icl_aux_power_well_ops,
4205 .id = DISP_PW_ID_NONE,
4207 .hsw.regs = &icl_aux_power_well_regs,
4208 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC6,
4209 .hsw.is_tc_tbt = false,
4213 .name = "AUX D TBT1",
4214 .domains = TGL_AUX_D_TBT1_IO_POWER_DOMAINS,
4215 .ops = &icl_aux_power_well_ops,
4216 .id = DISP_PW_ID_NONE,
4218 .hsw.regs = &icl_aux_power_well_regs,
4219 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1,
4220 .hsw.is_tc_tbt = true,
4224 .name = "AUX E TBT2",
4225 .domains = TGL_AUX_E_TBT2_IO_POWER_DOMAINS,
4226 .ops = &icl_aux_power_well_ops,
4227 .id = DISP_PW_ID_NONE,
4229 .hsw.regs = &icl_aux_power_well_regs,
4230 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2,
4231 .hsw.is_tc_tbt = true,
4235 .name = "AUX F TBT3",
4236 .domains = TGL_AUX_F_TBT3_IO_POWER_DOMAINS,
4237 .ops = &icl_aux_power_well_ops,
4238 .id = DISP_PW_ID_NONE,
4240 .hsw.regs = &icl_aux_power_well_regs,
4241 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3,
4242 .hsw.is_tc_tbt = true,
4246 .name = "AUX G TBT4",
4247 .domains = TGL_AUX_G_TBT4_IO_POWER_DOMAINS,
4248 .ops = &icl_aux_power_well_ops,
4249 .id = DISP_PW_ID_NONE,
4251 .hsw.regs = &icl_aux_power_well_regs,
4252 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4,
4253 .hsw.is_tc_tbt = true,
4257 .name = "AUX H TBT5",
4258 .domains = TGL_AUX_H_TBT5_IO_POWER_DOMAINS,
4259 .ops = &icl_aux_power_well_ops,
4260 .id = DISP_PW_ID_NONE,
4262 .hsw.regs = &icl_aux_power_well_regs,
4263 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT5,
4264 .hsw.is_tc_tbt = true,
4268 .name = "AUX I TBT6",
4269 .domains = TGL_AUX_I_TBT6_IO_POWER_DOMAINS,
4270 .ops = &icl_aux_power_well_ops,
4271 .id = DISP_PW_ID_NONE,
4273 .hsw.regs = &icl_aux_power_well_regs,
4274 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT6,
4275 .hsw.is_tc_tbt = true,
4279 .name = "power well 4",
4280 .domains = TGL_PW_4_POWER_DOMAINS,
4281 .ops = &hsw_power_well_ops,
4282 .id = DISP_PW_ID_NONE,
4284 .hsw.regs = &hsw_power_well_regs,
4285 .hsw.idx = ICL_PW_CTL_IDX_PW_4,
4286 .hsw.has_fuses = true,
4287 .hsw.irq_pipe_mask = BIT(PIPE_C),
4291 .name = "power well 5",
4292 .domains = TGL_PW_5_POWER_DOMAINS,
4293 .ops = &hsw_power_well_ops,
4294 .id = DISP_PW_ID_NONE,
4296 .hsw.regs = &hsw_power_well_regs,
4297 .hsw.idx = TGL_PW_CTL_IDX_PW_5,
4298 .hsw.has_fuses = true,
4299 .hsw.irq_pipe_mask = BIT(PIPE_D),
4303 .name = "TC cold off",
4304 .domains = TGL_TC_COLD_OFF_POWER_DOMAINS,
4305 .ops = &tgl_tc_cold_off_ops,
4306 .id = DISP_PW_ID_NONE,
4311 sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
4312 int disable_power_well)
4314 if (disable_power_well >= 0)
4315 return !!disable_power_well;
4320 static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
4327 if (INTEL_GEN(dev_priv) >= 12) {
4330 * DC9 has a separate HW flow from the rest of the DC states,
4331 * not depending on the DMC firmware. It's needed by system
4332 * suspend/resume, so allow it unconditionally.
4334 mask = DC_STATE_EN_DC9;
4335 } else if (IS_GEN(dev_priv, 11)) {
4337 mask = DC_STATE_EN_DC9;
4338 } else if (IS_GEN(dev_priv, 10) || IS_GEN9_BC(dev_priv)) {
4341 } else if (IS_GEN9_LP(dev_priv)) {
4343 mask = DC_STATE_EN_DC9;
4349 if (!i915_modparams.disable_power_well)
4352 if (enable_dc >= 0 && enable_dc <= max_dc) {
4353 requested_dc = enable_dc;
4354 } else if (enable_dc == -1) {
4355 requested_dc = max_dc;
4356 } else if (enable_dc > max_dc && enable_dc <= 4) {
4357 drm_dbg_kms(&dev_priv->drm,
4358 "Adjusting requested max DC state (%d->%d)\n",
4360 requested_dc = max_dc;
4362 drm_err(&dev_priv->drm,
4363 "Unexpected value for enable_dc (%d)\n", enable_dc);
4364 requested_dc = max_dc;
4367 switch (requested_dc) {
4369 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6;
4372 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC5;
4375 mask |= DC_STATE_EN_UPTO_DC6;
4378 mask |= DC_STATE_EN_UPTO_DC5;
4382 drm_dbg_kms(&dev_priv->drm, "Allowed DC state mask %02x\n", mask);
4388 __set_power_wells(struct i915_power_domains *power_domains,
4389 const struct i915_power_well_desc *power_well_descs,
4390 int power_well_count)
4392 u64 power_well_ids = 0;
4395 power_domains->power_well_count = power_well_count;
4396 power_domains->power_wells =
4397 kcalloc(power_well_count,
4398 sizeof(*power_domains->power_wells),
4400 if (!power_domains->power_wells)
4403 for (i = 0; i < power_well_count; i++) {
4404 enum i915_power_well_id id = power_well_descs[i].id;
4406 power_domains->power_wells[i].desc = &power_well_descs[i];
4408 if (id == DISP_PW_ID_NONE)
4411 WARN_ON(id >= sizeof(power_well_ids) * 8);
4412 WARN_ON(power_well_ids & BIT_ULL(id));
4413 power_well_ids |= BIT_ULL(id);
4419 #define set_power_wells(power_domains, __power_well_descs) \
4420 __set_power_wells(power_domains, __power_well_descs, \
4421 ARRAY_SIZE(__power_well_descs))
4424 * intel_power_domains_init - initializes the power domain structures
4425 * @dev_priv: i915 device instance
4427 * Initializes the power domain structures for @dev_priv depending upon the
4428 * supported platform.
4430 int intel_power_domains_init(struct drm_i915_private *dev_priv)
4432 struct i915_power_domains *power_domains = &dev_priv->power_domains;
4435 i915_modparams.disable_power_well =
4436 sanitize_disable_power_well_option(dev_priv,
4437 i915_modparams.disable_power_well);
4438 dev_priv->csr.allowed_dc_mask =
4439 get_allowed_dc_mask(dev_priv, i915_modparams.enable_dc);
4441 dev_priv->csr.target_dc_state =
4442 sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
4444 BUILD_BUG_ON(POWER_DOMAIN_NUM > 64);
4446 mutex_init(&power_domains->lock);
4448 INIT_DELAYED_WORK(&power_domains->async_put_work,
4449 intel_display_power_put_async_work);
4452 * The enabling order will be from lower to higher indexed wells,
4453 * the disabling order is reversed.
4455 if (IS_GEN(dev_priv, 12)) {
4456 err = set_power_wells(power_domains, tgl_power_wells);
4457 } else if (IS_GEN(dev_priv, 11)) {
4458 err = set_power_wells(power_domains, icl_power_wells);
4459 } else if (IS_CANNONLAKE(dev_priv)) {
4460 err = set_power_wells(power_domains, cnl_power_wells);
4463 * DDI and Aux IO are getting enabled for all ports
4464 * regardless the presence or use. So, in order to avoid
4465 * timeouts, lets remove them from the list
4466 * for the SKUs without port F.
4468 if (!IS_CNL_WITH_PORT_F(dev_priv))
4469 power_domains->power_well_count -= 2;
4470 } else if (IS_GEMINILAKE(dev_priv)) {
4471 err = set_power_wells(power_domains, glk_power_wells);
4472 } else if (IS_BROXTON(dev_priv)) {
4473 err = set_power_wells(power_domains, bxt_power_wells);
4474 } else if (IS_GEN9_BC(dev_priv)) {
4475 err = set_power_wells(power_domains, skl_power_wells);
4476 } else if (IS_CHERRYVIEW(dev_priv)) {
4477 err = set_power_wells(power_domains, chv_power_wells);
4478 } else if (IS_BROADWELL(dev_priv)) {
4479 err = set_power_wells(power_domains, bdw_power_wells);
4480 } else if (IS_HASWELL(dev_priv)) {
4481 err = set_power_wells(power_domains, hsw_power_wells);
4482 } else if (IS_VALLEYVIEW(dev_priv)) {
4483 err = set_power_wells(power_domains, vlv_power_wells);
4484 } else if (IS_I830(dev_priv)) {
4485 err = set_power_wells(power_domains, i830_power_wells);
4487 err = set_power_wells(power_domains, i9xx_always_on_power_well);
4494 * intel_power_domains_cleanup - clean up power domains resources
4495 * @dev_priv: i915 device instance
4497 * Release any resources acquired by intel_power_domains_init()
4499 void intel_power_domains_cleanup(struct drm_i915_private *dev_priv)
4501 kfree(dev_priv->power_domains.power_wells);
4504 static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
4506 struct i915_power_domains *power_domains = &dev_priv->power_domains;
4507 struct i915_power_well *power_well;
4509 mutex_lock(&power_domains->lock);
4510 for_each_power_well(dev_priv, power_well) {
4511 power_well->desc->ops->sync_hw(dev_priv, power_well);
4512 power_well->hw_enabled =
4513 power_well->desc->ops->is_enabled(dev_priv, power_well);
4515 mutex_unlock(&power_domains->lock);
4519 bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv,
4520 i915_reg_t reg, bool enable)
4524 val = intel_de_read(dev_priv, reg);
4525 val = enable ? (val | DBUF_POWER_REQUEST) : (val & ~DBUF_POWER_REQUEST);
4526 intel_de_write(dev_priv, reg, val);
4527 intel_de_posting_read(dev_priv, reg);
4530 status = intel_de_read(dev_priv, reg) & DBUF_POWER_STATE;
4531 if ((enable && !status) || (!enable && status)) {
4532 drm_err(&dev_priv->drm, "DBus power %s timeout!\n",
4533 enable ? "enable" : "disable");
4539 static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
4541 icl_dbuf_slices_update(dev_priv, BIT(DBUF_S1));
4544 static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
4546 icl_dbuf_slices_update(dev_priv, 0);
4549 void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
4553 int max_slices = INTEL_INFO(dev_priv)->num_supported_dbuf_slices;
4554 struct i915_power_domains *power_domains = &dev_priv->power_domains;
4556 drm_WARN(&dev_priv->drm, hweight8(req_slices) > max_slices,
4557 "Invalid number of dbuf slices requested\n");
4559 drm_dbg_kms(&dev_priv->drm, "Updating dbuf slices to 0x%x\n",
4563 * Might be running this in parallel to gen9_dc_off_power_well_enable
4564 * being called from intel_dp_detect for instance,
4565 * which causes assertion triggered by race condition,
4566 * as gen9_assert_dbuf_enabled might preempt this when registers
4567 * were already updated, while dev_priv was not.
4569 mutex_lock(&power_domains->lock);
4571 for (i = 0; i < max_slices; i++) {
4572 intel_dbuf_slice_set(dev_priv,
4574 (req_slices & BIT(i)) != 0);
4577 dev_priv->enabled_dbuf_slices_mask = req_slices;
4579 mutex_unlock(&power_domains->lock);
4582 static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
4584 skl_ddb_get_hw_state(dev_priv);
4586 * Just power up at least 1 slice, we will
4587 * figure out later which slices we have and what we need.
4589 icl_dbuf_slices_update(dev_priv, dev_priv->enabled_dbuf_slices_mask |
4593 static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
4595 icl_dbuf_slices_update(dev_priv, 0);
4598 static void icl_mbus_init(struct drm_i915_private *dev_priv)
4602 mask = MBUS_ABOX_BT_CREDIT_POOL1_MASK |
4603 MBUS_ABOX_BT_CREDIT_POOL2_MASK |
4604 MBUS_ABOX_B_CREDIT_MASK |
4605 MBUS_ABOX_BW_CREDIT_MASK;
4606 val = MBUS_ABOX_BT_CREDIT_POOL1(16) |
4607 MBUS_ABOX_BT_CREDIT_POOL2(16) |
4608 MBUS_ABOX_B_CREDIT(1) |
4609 MBUS_ABOX_BW_CREDIT(1);
4611 intel_de_rmw(dev_priv, MBUS_ABOX_CTL, mask, val);
4612 if (INTEL_GEN(dev_priv) >= 12) {
4613 intel_de_rmw(dev_priv, MBUS_ABOX1_CTL, mask, val);
4614 intel_de_rmw(dev_priv, MBUS_ABOX2_CTL, mask, val);
4618 static void hsw_assert_cdclk(struct drm_i915_private *dev_priv)
4620 u32 val = intel_de_read(dev_priv, LCPLL_CTL);
4623 * The LCPLL register should be turned on by the BIOS. For now
4624 * let's just check its state and print errors in case
4625 * something is wrong. Don't even try to turn it on.
4628 if (val & LCPLL_CD_SOURCE_FCLK)
4629 drm_err(&dev_priv->drm, "CDCLK source is not LCPLL\n");
4631 if (val & LCPLL_PLL_DISABLE)
4632 drm_err(&dev_priv->drm, "LCPLL is disabled\n");
4634 if ((val & LCPLL_REF_MASK) != LCPLL_REF_NON_SSC)
4635 drm_err(&dev_priv->drm, "LCPLL not using non-SSC reference\n");
4638 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
4640 struct drm_device *dev = &dev_priv->drm;
4641 struct intel_crtc *crtc;
4643 for_each_intel_crtc(dev, crtc)
4644 I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
4645 pipe_name(crtc->pipe));
4647 I915_STATE_WARN(intel_de_read(dev_priv, HSW_PWR_WELL_CTL2),
4648 "Display power well on\n");
4649 I915_STATE_WARN(intel_de_read(dev_priv, SPLL_CTL) & SPLL_PLL_ENABLE,
4651 I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(0)) & WRPLL_PLL_ENABLE,
4652 "WRPLL1 enabled\n");
4653 I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(1)) & WRPLL_PLL_ENABLE,
4654 "WRPLL2 enabled\n");
4655 I915_STATE_WARN(intel_de_read(dev_priv, PP_STATUS(0)) & PP_ON,
4656 "Panel power on\n");
4657 I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
4658 "CPU PWM1 enabled\n");
4659 if (IS_HASWELL(dev_priv))
4660 I915_STATE_WARN(intel_de_read(dev_priv, HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
4661 "CPU PWM2 enabled\n");
4662 I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
4663 "PCH PWM1 enabled\n");
4664 I915_STATE_WARN(intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
4665 "Utility pin enabled\n");
4666 I915_STATE_WARN(intel_de_read(dev_priv, PCH_GTC_CTL) & PCH_GTC_ENABLE,
4667 "PCH GTC enabled\n");
4670 * In theory we can still leave IRQs enabled, as long as only the HPD
4671 * interrupts remain enabled. We used to check for that, but since it's
4672 * gen-specific and since we only disable LCPLL after we fully disable
4673 * the interrupts, the check below should be enough.
4675 I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
4678 static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv)
4680 if (IS_HASWELL(dev_priv))
4681 return intel_de_read(dev_priv, D_COMP_HSW);
4683 return intel_de_read(dev_priv, D_COMP_BDW);
4686 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val)
4688 if (IS_HASWELL(dev_priv)) {
4689 if (sandybridge_pcode_write(dev_priv,
4690 GEN6_PCODE_WRITE_D_COMP, val))
4691 drm_dbg_kms(&dev_priv->drm,
4692 "Failed to write to D_COMP\n");
4694 intel_de_write(dev_priv, D_COMP_BDW, val);
4695 intel_de_posting_read(dev_priv, D_COMP_BDW);
4700 * This function implements pieces of two sequences from BSpec:
4701 * - Sequence for display software to disable LCPLL
4702 * - Sequence for display software to allow package C8+
4703 * The steps implemented here are just the steps that actually touch the LCPLL
4704 * register. Callers should take care of disabling all the display engine
4705 * functions, doing the mode unset, fixing interrupts, etc.
4707 static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
4708 bool switch_to_fclk, bool allow_power_down)
4712 assert_can_disable_lcpll(dev_priv);
4714 val = intel_de_read(dev_priv, LCPLL_CTL);
4716 if (switch_to_fclk) {
4717 val |= LCPLL_CD_SOURCE_FCLK;
4718 intel_de_write(dev_priv, LCPLL_CTL, val);
4720 if (wait_for_us(intel_de_read(dev_priv, LCPLL_CTL) &
4721 LCPLL_CD_SOURCE_FCLK_DONE, 1))
4722 drm_err(&dev_priv->drm, "Switching to FCLK failed\n");
4724 val = intel_de_read(dev_priv, LCPLL_CTL);
4727 val |= LCPLL_PLL_DISABLE;
4728 intel_de_write(dev_priv, LCPLL_CTL, val);
4729 intel_de_posting_read(dev_priv, LCPLL_CTL);
4731 if (intel_de_wait_for_clear(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 1))
4732 drm_err(&dev_priv->drm, "LCPLL still locked\n");
4734 val = hsw_read_dcomp(dev_priv);
4735 val |= D_COMP_COMP_DISABLE;
4736 hsw_write_dcomp(dev_priv, val);
4739 if (wait_for((hsw_read_dcomp(dev_priv) &
4740 D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
4741 drm_err(&dev_priv->drm, "D_COMP RCOMP still in progress\n");
4743 if (allow_power_down) {
4744 val = intel_de_read(dev_priv, LCPLL_CTL);
4745 val |= LCPLL_POWER_DOWN_ALLOW;
4746 intel_de_write(dev_priv, LCPLL_CTL, val);
4747 intel_de_posting_read(dev_priv, LCPLL_CTL);
4752 * Fully restores LCPLL, disallowing power down and switching back to LCPLL
4755 static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
4759 val = intel_de_read(dev_priv, LCPLL_CTL);
4761 if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
4762 LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
4766 * Make sure we're not on PC8 state before disabling PC8, otherwise
4767 * we'll hang the machine. To prevent PC8 state, just enable force_wake.
4769 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
4771 if (val & LCPLL_POWER_DOWN_ALLOW) {
4772 val &= ~LCPLL_POWER_DOWN_ALLOW;
4773 intel_de_write(dev_priv, LCPLL_CTL, val);
4774 intel_de_posting_read(dev_priv, LCPLL_CTL);
4777 val = hsw_read_dcomp(dev_priv);
4778 val |= D_COMP_COMP_FORCE;
4779 val &= ~D_COMP_COMP_DISABLE;
4780 hsw_write_dcomp(dev_priv, val);
4782 val = intel_de_read(dev_priv, LCPLL_CTL);
4783 val &= ~LCPLL_PLL_DISABLE;
4784 intel_de_write(dev_priv, LCPLL_CTL, val);
4786 if (intel_de_wait_for_set(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 5))
4787 drm_err(&dev_priv->drm, "LCPLL not locked yet\n");
4789 if (val & LCPLL_CD_SOURCE_FCLK) {
4790 val = intel_de_read(dev_priv, LCPLL_CTL);
4791 val &= ~LCPLL_CD_SOURCE_FCLK;
4792 intel_de_write(dev_priv, LCPLL_CTL, val);
4794 if (wait_for_us((intel_de_read(dev_priv, LCPLL_CTL) &
4795 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
4796 drm_err(&dev_priv->drm,
4797 "Switching back to LCPLL failed\n");
4800 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
4802 intel_update_cdclk(dev_priv);
4803 intel_dump_cdclk_config(&dev_priv->cdclk.hw, "Current CDCLK");
4807 * Package states C8 and deeper are really deep PC states that can only be
4808 * reached when all the devices on the system allow it, so even if the graphics
4809 * device allows PC8+, it doesn't mean the system will actually get to these
4810 * states. Our driver only allows PC8+ when going into runtime PM.
4812 * The requirements for PC8+ are that all the outputs are disabled, the power
4813 * well is disabled and most interrupts are disabled, and these are also
4814 * requirements for runtime PM. When these conditions are met, we manually do
4815 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
4816 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
4819 * When we really reach PC8 or deeper states (not just when we allow it) we lose
4820 * the state of some registers, so when we come back from PC8+ we need to
4821 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
4822 * need to take care of the registers kept by RC6. Notice that this happens even
4823 * if we don't put the device in PCI D3 state (which is what currently happens
4824 * because of the runtime PM support).
4826 * For more, read "Display Sequences for Package C8" on the hardware
4829 static void hsw_enable_pc8(struct drm_i915_private *dev_priv)
4833 drm_dbg_kms(&dev_priv->drm, "Enabling package C8+\n");
4835 if (HAS_PCH_LPT_LP(dev_priv)) {
4836 val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D);
4837 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
4838 intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val);
4841 lpt_disable_clkout_dp(dev_priv);
4842 hsw_disable_lcpll(dev_priv, true, true);
4845 static void hsw_disable_pc8(struct drm_i915_private *dev_priv)
4849 drm_dbg_kms(&dev_priv->drm, "Disabling package C8+\n");
4851 hsw_restore_lcpll(dev_priv);
4852 intel_init_pch_refclk(dev_priv);
4854 if (HAS_PCH_LPT_LP(dev_priv)) {
4855 val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D);
4856 val |= PCH_LP_PARTITION_LEVEL_DISABLE;
4857 intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val);
4861 static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
4865 u32 reset_bits, val;
4867 if (IS_IVYBRIDGE(dev_priv)) {
4869 reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK;
4871 reg = HSW_NDE_RSTWRN_OPT;
4872 reset_bits = RESET_PCH_HANDSHAKE_ENABLE;
4875 val = intel_de_read(dev_priv, reg);
4882 intel_de_write(dev_priv, reg, val);
4885 static void skl_display_core_init(struct drm_i915_private *dev_priv,
4888 struct i915_power_domains *power_domains = &dev_priv->power_domains;
4889 struct i915_power_well *well;
4891 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
4893 /* enable PCH reset handshake */
4894 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
4896 /* enable PG1 and Misc I/O */
4897 mutex_lock(&power_domains->lock);
4899 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4900 intel_power_well_enable(dev_priv, well);
4902 well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
4903 intel_power_well_enable(dev_priv, well);
4905 mutex_unlock(&power_domains->lock);
4907 intel_cdclk_init_hw(dev_priv);
4909 gen9_dbuf_enable(dev_priv);
4911 if (resume && dev_priv->csr.dmc_payload)
4912 intel_csr_load_program(dev_priv);
4915 static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
4917 struct i915_power_domains *power_domains = &dev_priv->power_domains;
4918 struct i915_power_well *well;
4920 gen9_disable_dc_states(dev_priv);
4922 gen9_dbuf_disable(dev_priv);
4924 intel_cdclk_uninit_hw(dev_priv);
4926 /* The spec doesn't call for removing the reset handshake flag */
4927 /* disable PG1 and Misc I/O */
4929 mutex_lock(&power_domains->lock);
4932 * BSpec says to keep the MISC IO power well enabled here, only
4933 * remove our request for power well 1.
4934 * Note that even though the driver's request is removed power well 1
4935 * may stay enabled after this due to DMC's own request on it.
4937 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4938 intel_power_well_disable(dev_priv, well);
4940 mutex_unlock(&power_domains->lock);
4942 usleep_range(10, 30); /* 10 us delay per Bspec */
4945 static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume)
4947 struct i915_power_domains *power_domains = &dev_priv->power_domains;
4948 struct i915_power_well *well;
4950 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
4953 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
4954 * or else the reset will hang because there is no PCH to respond.
4955 * Move the handshake programming to initialization sequence.
4956 * Previously was left up to BIOS.
4958 intel_pch_reset_handshake(dev_priv, false);
4961 mutex_lock(&power_domains->lock);
4963 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4964 intel_power_well_enable(dev_priv, well);
4966 mutex_unlock(&power_domains->lock);
4968 intel_cdclk_init_hw(dev_priv);
4970 gen9_dbuf_enable(dev_priv);
4972 if (resume && dev_priv->csr.dmc_payload)
4973 intel_csr_load_program(dev_priv);
4976 static void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
4978 struct i915_power_domains *power_domains = &dev_priv->power_domains;
4979 struct i915_power_well *well;
4981 gen9_disable_dc_states(dev_priv);
4983 gen9_dbuf_disable(dev_priv);
4985 intel_cdclk_uninit_hw(dev_priv);
4987 /* The spec doesn't call for removing the reset handshake flag */
4990 * Disable PW1 (PG1).
4991 * Note that even though the driver's request is removed power well 1
4992 * may stay enabled after this due to DMC's own request on it.
4994 mutex_lock(&power_domains->lock);
4996 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4997 intel_power_well_disable(dev_priv, well);
4999 mutex_unlock(&power_domains->lock);
5001 usleep_range(10, 30); /* 10 us delay per Bspec */
5004 static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume)
5006 struct i915_power_domains *power_domains = &dev_priv->power_domains;
5007 struct i915_power_well *well;
5009 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
5011 /* 1. Enable PCH Reset Handshake */
5012 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
5015 intel_combo_phy_init(dev_priv);
5018 * 4. Enable Power Well 1 (PG1).
5019 * The AUX IO power wells will be enabled on demand.
5021 mutex_lock(&power_domains->lock);
5022 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5023 intel_power_well_enable(dev_priv, well);
5024 mutex_unlock(&power_domains->lock);
5026 /* 5. Enable CD clock */
5027 intel_cdclk_init_hw(dev_priv);
5029 /* 6. Enable DBUF */
5030 gen9_dbuf_enable(dev_priv);
5032 if (resume && dev_priv->csr.dmc_payload)
5033 intel_csr_load_program(dev_priv);
5036 static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
5038 struct i915_power_domains *power_domains = &dev_priv->power_domains;
5039 struct i915_power_well *well;
5041 gen9_disable_dc_states(dev_priv);
5043 /* 1. Disable all display engine functions -> aready done */
5045 /* 2. Disable DBUF */
5046 gen9_dbuf_disable(dev_priv);
5048 /* 3. Disable CD clock */
5049 intel_cdclk_uninit_hw(dev_priv);
5052 * 4. Disable Power Well 1 (PG1).
5053 * The AUX IO power wells are toggled on demand, so they are already
5054 * disabled at this point.
5056 mutex_lock(&power_domains->lock);
5057 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5058 intel_power_well_disable(dev_priv, well);
5059 mutex_unlock(&power_domains->lock);
5061 usleep_range(10, 30); /* 10 us delay per Bspec */
5064 intel_combo_phy_uninit(dev_priv);
5067 struct buddy_page_mask {
5073 static const struct buddy_page_mask tgl_buddy_page_masks[] = {
5074 { .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0xE },
5075 { .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0xF },
5076 { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1C },
5077 { .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x1F },
5081 static const struct buddy_page_mask wa_1409767108_buddy_page_masks[] = {
5082 { .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1 },
5083 { .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0x1 },
5084 { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x3 },
5085 { .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x3 },
5089 static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv)
5091 enum intel_dram_type type = dev_priv->dram_info.type;
5092 u8 num_channels = dev_priv->dram_info.num_channels;
5093 const struct buddy_page_mask *table;
5096 if (IS_TGL_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_B0))
5097 /* Wa_1409767108: tgl */
5098 table = wa_1409767108_buddy_page_masks;
5100 table = tgl_buddy_page_masks;
5102 for (i = 0; table[i].page_mask != 0; i++)
5103 if (table[i].num_channels == num_channels &&
5104 table[i].type == type)
5107 if (table[i].page_mask == 0) {
5108 drm_dbg(&dev_priv->drm,
5109 "Unknown memory configuration; disabling address buddy logic.\n");
5110 intel_de_write(dev_priv, BW_BUDDY1_CTL, BW_BUDDY_DISABLE);
5111 intel_de_write(dev_priv, BW_BUDDY2_CTL, BW_BUDDY_DISABLE);
5113 intel_de_write(dev_priv, BW_BUDDY1_PAGE_MASK,
5114 table[i].page_mask);
5115 intel_de_write(dev_priv, BW_BUDDY2_PAGE_MASK,
5116 table[i].page_mask);
5118 /* Wa_22010178259:tgl */
5119 intel_de_rmw(dev_priv, BW_BUDDY1_CTL,
5120 BW_BUDDY_TLB_REQ_TIMER_MASK,
5121 REG_FIELD_PREP(BW_BUDDY_TLB_REQ_TIMER_MASK, 0x8));
5122 intel_de_rmw(dev_priv, BW_BUDDY2_CTL,
5123 BW_BUDDY_TLB_REQ_TIMER_MASK,
5124 REG_FIELD_PREP(BW_BUDDY_TLB_REQ_TIMER_MASK, 0x8));
5128 static void icl_display_core_init(struct drm_i915_private *dev_priv,
5131 struct i915_power_domains *power_domains = &dev_priv->power_domains;
5132 struct i915_power_well *well;
5134 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
5136 /* 1. Enable PCH reset handshake. */
5137 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
5139 /* 2. Initialize all combo phys */
5140 intel_combo_phy_init(dev_priv);
5143 * 3. Enable Power Well 1 (PG1).
5144 * The AUX IO power wells will be enabled on demand.
5146 mutex_lock(&power_domains->lock);
5147 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5148 intel_power_well_enable(dev_priv, well);
5149 mutex_unlock(&power_domains->lock);
5151 /* 4. Enable CDCLK. */
5152 intel_cdclk_init_hw(dev_priv);
5154 /* 5. Enable DBUF. */
5155 icl_dbuf_enable(dev_priv);
5157 /* 6. Setup MBUS. */
5158 icl_mbus_init(dev_priv);
5160 /* 7. Program arbiter BW_BUDDY registers */
5161 if (INTEL_GEN(dev_priv) >= 12)
5162 tgl_bw_buddy_init(dev_priv);
5164 if (resume && dev_priv->csr.dmc_payload)
5165 intel_csr_load_program(dev_priv);
5168 static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
5170 struct i915_power_domains *power_domains = &dev_priv->power_domains;
5171 struct i915_power_well *well;
5173 gen9_disable_dc_states(dev_priv);
5175 /* 1. Disable all display engine functions -> aready done */
5177 /* 2. Disable DBUF */
5178 icl_dbuf_disable(dev_priv);
5180 /* 3. Disable CD clock */
5181 intel_cdclk_uninit_hw(dev_priv);
5184 * 4. Disable Power Well 1 (PG1).
5185 * The AUX IO power wells are toggled on demand, so they are already
5186 * disabled at this point.
5188 mutex_lock(&power_domains->lock);
5189 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
5190 intel_power_well_disable(dev_priv, well);
5191 mutex_unlock(&power_domains->lock);
5194 intel_combo_phy_uninit(dev_priv);
5197 static void chv_phy_control_init(struct drm_i915_private *dev_priv)
5199 struct i915_power_well *cmn_bc =
5200 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
5201 struct i915_power_well *cmn_d =
5202 lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
5205 * DISPLAY_PHY_CONTROL can get corrupted if read. As a
5206 * workaround never ever read DISPLAY_PHY_CONTROL, and
5207 * instead maintain a shadow copy ourselves. Use the actual
5208 * power well state and lane status to reconstruct the
5209 * expected initial value.
5211 dev_priv->chv_phy_control =
5212 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
5213 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
5214 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
5215 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
5216 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
5219 * If all lanes are disabled we leave the override disabled
5220 * with all power down bits cleared to match the state we
5221 * would use after disabling the port. Otherwise enable the
5222 * override and set the lane powerdown bits accding to the
5223 * current lane status.
5225 if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
5226 u32 status = intel_de_read(dev_priv, DPLL(PIPE_A));
5229 mask = status & DPLL_PORTB_READY_MASK;
5233 dev_priv->chv_phy_control |=
5234 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
5236 dev_priv->chv_phy_control |=
5237 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
5239 mask = (status & DPLL_PORTC_READY_MASK) >> 4;
5243 dev_priv->chv_phy_control |=
5244 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
5246 dev_priv->chv_phy_control |=
5247 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
5249 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
5251 dev_priv->chv_phy_assert[DPIO_PHY0] = false;
5253 dev_priv->chv_phy_assert[DPIO_PHY0] = true;
5256 if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
5257 u32 status = intel_de_read(dev_priv, DPIO_PHY_STATUS);
5260 mask = status & DPLL_PORTD_READY_MASK;
5265 dev_priv->chv_phy_control |=
5266 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
5268 dev_priv->chv_phy_control |=
5269 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
5271 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
5273 dev_priv->chv_phy_assert[DPIO_PHY1] = false;
5275 dev_priv->chv_phy_assert[DPIO_PHY1] = true;
5278 drm_dbg_kms(&dev_priv->drm, "Initial PHY_CONTROL=0x%08x\n",
5279 dev_priv->chv_phy_control);
5281 /* Defer application of initial phy_control to enabling the powerwell */
5284 static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
5286 struct i915_power_well *cmn =
5287 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
5288 struct i915_power_well *disp2d =
5289 lookup_power_well(dev_priv, VLV_DISP_PW_DISP2D);
5291 /* If the display might be already active skip this */
5292 if (cmn->desc->ops->is_enabled(dev_priv, cmn) &&
5293 disp2d->desc->ops->is_enabled(dev_priv, disp2d) &&
5294 intel_de_read(dev_priv, DPIO_CTL) & DPIO_CMNRST)
5297 drm_dbg_kms(&dev_priv->drm, "toggling display PHY side reset\n");
5299 /* cmnlane needs DPLL registers */
5300 disp2d->desc->ops->enable(dev_priv, disp2d);
5303 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
5304 * Need to assert and de-assert PHY SB reset by gating the
5305 * common lane power, then un-gating it.
5306 * Simply ungating isn't enough to reset the PHY enough to get
5307 * ports and lanes running.
5309 cmn->desc->ops->disable(dev_priv, cmn);
5312 static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0)
5316 vlv_punit_get(dev_priv);
5317 ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE;
5318 vlv_punit_put(dev_priv);
5323 static void assert_ved_power_gated(struct drm_i915_private *dev_priv)
5325 drm_WARN(&dev_priv->drm,
5326 !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0),
5327 "VED not power gated\n");
5330 static void assert_isp_power_gated(struct drm_i915_private *dev_priv)
5332 static const struct pci_device_id isp_ids[] = {
5333 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38)},
5334 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x22b8)},
5338 drm_WARN(&dev_priv->drm, !pci_dev_present(isp_ids) &&
5339 !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0),
5340 "ISP not power gated\n");
5343 static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
5346 * intel_power_domains_init_hw - initialize hardware power domain state
5347 * @i915: i915 device instance
5348 * @resume: Called from resume code paths or not
5350 * This function initializes the hardware power domain state and enables all
5351 * power wells belonging to the INIT power domain. Power wells in other
5352 * domains (and not in the INIT domain) are referenced or disabled by
5353 * intel_modeset_readout_hw_state(). After that the reference count of each
5354 * power well must match its HW enabled state, see
5355 * intel_power_domains_verify_state().
5357 * It will return with power domains disabled (to be enabled later by
5358 * intel_power_domains_enable()) and must be paired with
5359 * intel_power_domains_driver_remove().
5361 void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
5363 struct i915_power_domains *power_domains = &i915->power_domains;
5365 power_domains->initializing = true;
5367 if (INTEL_GEN(i915) >= 11) {
5368 icl_display_core_init(i915, resume);
5369 } else if (IS_CANNONLAKE(i915)) {
5370 cnl_display_core_init(i915, resume);
5371 } else if (IS_GEN9_BC(i915)) {
5372 skl_display_core_init(i915, resume);
5373 } else if (IS_GEN9_LP(i915)) {
5374 bxt_display_core_init(i915, resume);
5375 } else if (IS_CHERRYVIEW(i915)) {
5376 mutex_lock(&power_domains->lock);
5377 chv_phy_control_init(i915);
5378 mutex_unlock(&power_domains->lock);
5379 assert_isp_power_gated(i915);
5380 } else if (IS_VALLEYVIEW(i915)) {
5381 mutex_lock(&power_domains->lock);
5382 vlv_cmnlane_wa(i915);
5383 mutex_unlock(&power_domains->lock);
5384 assert_ved_power_gated(i915);
5385 assert_isp_power_gated(i915);
5386 } else if (IS_BROADWELL(i915) || IS_HASWELL(i915)) {
5387 hsw_assert_cdclk(i915);
5388 intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
5389 } else if (IS_IVYBRIDGE(i915)) {
5390 intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
5394 * Keep all power wells enabled for any dependent HW access during
5395 * initialization and to make sure we keep BIOS enabled display HW
5396 * resources powered until display HW readout is complete. We drop
5397 * this reference in intel_power_domains_enable().
5399 power_domains->wakeref =
5400 intel_display_power_get(i915, POWER_DOMAIN_INIT);
5402 /* Disable power support if the user asked so. */
5403 if (!i915_modparams.disable_power_well)
5404 intel_display_power_get(i915, POWER_DOMAIN_INIT);
5405 intel_power_domains_sync_hw(i915);
5407 power_domains->initializing = false;
5411 * intel_power_domains_driver_remove - deinitialize hw power domain state
5412 * @i915: i915 device instance
5414 * De-initializes the display power domain HW state. It also ensures that the
5415 * device stays powered up so that the driver can be reloaded.
5417 * It must be called with power domains already disabled (after a call to
5418 * intel_power_domains_disable()) and must be paired with
5419 * intel_power_domains_init_hw().
5421 void intel_power_domains_driver_remove(struct drm_i915_private *i915)
5423 intel_wakeref_t wakeref __maybe_unused =
5424 fetch_and_zero(&i915->power_domains.wakeref);
5426 /* Remove the refcount we took to keep power well support disabled. */
5427 if (!i915_modparams.disable_power_well)
5428 intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
5430 intel_display_power_flush_work_sync(i915);
5432 intel_power_domains_verify_state(i915);
5434 /* Keep the power well enabled, but cancel its rpm wakeref. */
5435 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
5439 * intel_power_domains_enable - enable toggling of display power wells
5440 * @i915: i915 device instance
5442 * Enable the ondemand enabling/disabling of the display power wells. Note that
5443 * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled
5444 * only at specific points of the display modeset sequence, thus they are not
5445 * affected by the intel_power_domains_enable()/disable() calls. The purpose
5446 * of these function is to keep the rest of power wells enabled until the end
5447 * of display HW readout (which will acquire the power references reflecting
5448 * the current HW state).
5450 void intel_power_domains_enable(struct drm_i915_private *i915)
5452 intel_wakeref_t wakeref __maybe_unused =
5453 fetch_and_zero(&i915->power_domains.wakeref);
5455 intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
5456 intel_power_domains_verify_state(i915);
5460 * intel_power_domains_disable - disable toggling of display power wells
5461 * @i915: i915 device instance
5463 * Disable the ondemand enabling/disabling of the display power wells. See
5464 * intel_power_domains_enable() for which power wells this call controls.
5466 void intel_power_domains_disable(struct drm_i915_private *i915)
5468 struct i915_power_domains *power_domains = &i915->power_domains;
5470 drm_WARN_ON(&i915->drm, power_domains->wakeref);
5471 power_domains->wakeref =
5472 intel_display_power_get(i915, POWER_DOMAIN_INIT);
5474 intel_power_domains_verify_state(i915);
5478 * intel_power_domains_suspend - suspend power domain state
5479 * @i915: i915 device instance
5480 * @suspend_mode: specifies the target suspend state (idle, mem, hibernation)
5482 * This function prepares the hardware power domain state before entering
5485 * It must be called with power domains already disabled (after a call to
5486 * intel_power_domains_disable()) and paired with intel_power_domains_resume().
5488 void intel_power_domains_suspend(struct drm_i915_private *i915,
5489 enum i915_drm_suspend_mode suspend_mode)
5491 struct i915_power_domains *power_domains = &i915->power_domains;
5492 intel_wakeref_t wakeref __maybe_unused =
5493 fetch_and_zero(&power_domains->wakeref);
5495 intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
5498 * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9
5499 * support don't manually deinit the power domains. This also means the
5500 * CSR/DMC firmware will stay active, it will power down any HW
5501 * resources as required and also enable deeper system power states
5502 * that would be blocked if the firmware was inactive.
5504 if (!(i915->csr.allowed_dc_mask & DC_STATE_EN_DC9) &&
5505 suspend_mode == I915_DRM_SUSPEND_IDLE &&
5506 i915->csr.dmc_payload) {
5507 intel_display_power_flush_work(i915);
5508 intel_power_domains_verify_state(i915);
5513 * Even if power well support was disabled we still want to disable
5514 * power wells if power domains must be deinitialized for suspend.
5516 if (!i915_modparams.disable_power_well)
5517 intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
5519 intel_display_power_flush_work(i915);
5520 intel_power_domains_verify_state(i915);
5522 if (INTEL_GEN(i915) >= 11)
5523 icl_display_core_uninit(i915);
5524 else if (IS_CANNONLAKE(i915))
5525 cnl_display_core_uninit(i915);
5526 else if (IS_GEN9_BC(i915))
5527 skl_display_core_uninit(i915);
5528 else if (IS_GEN9_LP(i915))
5529 bxt_display_core_uninit(i915);
5531 power_domains->display_core_suspended = true;
5535 * intel_power_domains_resume - resume power domain state
5536 * @i915: i915 device instance
5538 * This function resume the hardware power domain state during system resume.
5540 * It will return with power domain support disabled (to be enabled later by
5541 * intel_power_domains_enable()) and must be paired with
5542 * intel_power_domains_suspend().
5544 void intel_power_domains_resume(struct drm_i915_private *i915)
5546 struct i915_power_domains *power_domains = &i915->power_domains;
5548 if (power_domains->display_core_suspended) {
5549 intel_power_domains_init_hw(i915, true);
5550 power_domains->display_core_suspended = false;
5552 drm_WARN_ON(&i915->drm, power_domains->wakeref);
5553 power_domains->wakeref =
5554 intel_display_power_get(i915, POWER_DOMAIN_INIT);
5557 intel_power_domains_verify_state(i915);
5560 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
5562 static void intel_power_domains_dump_info(struct drm_i915_private *i915)
5564 struct i915_power_domains *power_domains = &i915->power_domains;
5565 struct i915_power_well *power_well;
5567 for_each_power_well(i915, power_well) {
5568 enum intel_display_power_domain domain;
5570 drm_dbg(&i915->drm, "%-25s %d\n",
5571 power_well->desc->name, power_well->count);
5573 for_each_power_domain(domain, power_well->desc->domains)
5574 drm_dbg(&i915->drm, " %-23s %d\n",
5575 intel_display_power_domain_str(domain),
5576 power_domains->domain_use_count[domain]);
5581 * intel_power_domains_verify_state - verify the HW/SW state for all power wells
5582 * @i915: i915 device instance
5584 * Verify if the reference count of each power well matches its HW enabled
5585 * state and the total refcount of the domains it belongs to. This must be
5586 * called after modeset HW state sanitization, which is responsible for
5587 * acquiring reference counts for any power wells in use and disabling the
5588 * ones left on by BIOS but not required by any active output.
5590 static void intel_power_domains_verify_state(struct drm_i915_private *i915)
5592 struct i915_power_domains *power_domains = &i915->power_domains;
5593 struct i915_power_well *power_well;
5594 bool dump_domain_info;
5596 mutex_lock(&power_domains->lock);
5598 verify_async_put_domains_state(power_domains);
5600 dump_domain_info = false;
5601 for_each_power_well(i915, power_well) {
5602 enum intel_display_power_domain domain;
5606 enabled = power_well->desc->ops->is_enabled(i915, power_well);
5607 if ((power_well->count || power_well->desc->always_on) !=
5610 "power well %s state mismatch (refcount %d/enabled %d)",
5611 power_well->desc->name,
5612 power_well->count, enabled);
5615 for_each_power_domain(domain, power_well->desc->domains)
5616 domains_count += power_domains->domain_use_count[domain];
5618 if (power_well->count != domains_count) {
5620 "power well %s refcount/domain refcount mismatch "
5621 "(refcount %d/domains refcount %d)\n",
5622 power_well->desc->name, power_well->count,
5624 dump_domain_info = true;
5628 if (dump_domain_info) {
5632 intel_power_domains_dump_info(i915);
5637 mutex_unlock(&power_domains->lock);
5642 static void intel_power_domains_verify_state(struct drm_i915_private *i915)
5648 void intel_display_power_suspend_late(struct drm_i915_private *i915)
5650 if (INTEL_GEN(i915) >= 11 || IS_GEN9_LP(i915))
5651 bxt_enable_dc9(i915);
5652 else if (IS_HASWELL(i915) || IS_BROADWELL(i915))
5653 hsw_enable_pc8(i915);
5656 void intel_display_power_resume_early(struct drm_i915_private *i915)
5658 if (INTEL_GEN(i915) >= 11 || IS_GEN9_LP(i915)) {
5659 gen9_sanitize_dc_state(i915);
5660 bxt_disable_dc9(i915);
5661 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
5662 hsw_disable_pc8(i915);
5666 void intel_display_power_suspend(struct drm_i915_private *i915)
5668 if (INTEL_GEN(i915) >= 11) {
5669 icl_display_core_uninit(i915);
5670 bxt_enable_dc9(i915);
5671 } else if (IS_GEN9_LP(i915)) {
5672 bxt_display_core_uninit(i915);
5673 bxt_enable_dc9(i915);
5674 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
5675 hsw_enable_pc8(i915);
5679 void intel_display_power_resume(struct drm_i915_private *i915)
5681 if (INTEL_GEN(i915) >= 11) {
5682 bxt_disable_dc9(i915);
5683 icl_display_core_init(i915, true);
5684 if (i915->csr.dmc_payload) {
5685 if (i915->csr.allowed_dc_mask &
5686 DC_STATE_EN_UPTO_DC6)
5687 skl_enable_dc6(i915);
5688 else if (i915->csr.allowed_dc_mask &
5689 DC_STATE_EN_UPTO_DC5)
5690 gen9_enable_dc5(i915);
5692 } else if (IS_GEN9_LP(i915)) {
5693 bxt_disable_dc9(i915);
5694 bxt_display_core_init(i915, true);
5695 if (i915->csr.dmc_payload &&
5696 (i915->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
5697 gen9_enable_dc5(i915);
5698 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
5699 hsw_disable_pc8(i915);