1 /* SPDX-License-Identifier: MIT */
3 * Copyright © 2019 Intel Corporation
6 #include <linux/vgaarb.h>
8 #include "display/intel_crt.h"
9 #include "display/intel_dp.h"
13 #include "intel_cdclk.h"
14 #include "intel_combo_phy.h"
15 #include "intel_csr.h"
16 #include "intel_display_power.h"
17 #include "intel_display_types.h"
18 #include "intel_dpio_phy.h"
19 #include "intel_hotplug.h"
20 #include "intel_sideband.h"
23 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
24 enum i915_power_well_id power_well_id);
27 intel_display_power_domain_str(enum intel_display_power_domain domain)
30 case POWER_DOMAIN_DISPLAY_CORE:
31 return "DISPLAY_CORE";
32 case POWER_DOMAIN_PIPE_A:
34 case POWER_DOMAIN_PIPE_B:
36 case POWER_DOMAIN_PIPE_C:
38 case POWER_DOMAIN_PIPE_D:
40 case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
41 return "PIPE_A_PANEL_FITTER";
42 case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
43 return "PIPE_B_PANEL_FITTER";
44 case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
45 return "PIPE_C_PANEL_FITTER";
46 case POWER_DOMAIN_PIPE_D_PANEL_FITTER:
47 return "PIPE_D_PANEL_FITTER";
48 case POWER_DOMAIN_TRANSCODER_A:
49 return "TRANSCODER_A";
50 case POWER_DOMAIN_TRANSCODER_B:
51 return "TRANSCODER_B";
52 case POWER_DOMAIN_TRANSCODER_C:
53 return "TRANSCODER_C";
54 case POWER_DOMAIN_TRANSCODER_D:
55 return "TRANSCODER_D";
56 case POWER_DOMAIN_TRANSCODER_EDP:
57 return "TRANSCODER_EDP";
58 case POWER_DOMAIN_TRANSCODER_VDSC_PW2:
59 return "TRANSCODER_VDSC_PW2";
60 case POWER_DOMAIN_TRANSCODER_DSI_A:
61 return "TRANSCODER_DSI_A";
62 case POWER_DOMAIN_TRANSCODER_DSI_C:
63 return "TRANSCODER_DSI_C";
64 case POWER_DOMAIN_PORT_DDI_A_LANES:
65 return "PORT_DDI_A_LANES";
66 case POWER_DOMAIN_PORT_DDI_B_LANES:
67 return "PORT_DDI_B_LANES";
68 case POWER_DOMAIN_PORT_DDI_C_LANES:
69 return "PORT_DDI_C_LANES";
70 case POWER_DOMAIN_PORT_DDI_D_LANES:
71 return "PORT_DDI_D_LANES";
72 case POWER_DOMAIN_PORT_DDI_E_LANES:
73 return "PORT_DDI_E_LANES";
74 case POWER_DOMAIN_PORT_DDI_F_LANES:
75 return "PORT_DDI_F_LANES";
76 case POWER_DOMAIN_PORT_DDI_G_LANES:
77 return "PORT_DDI_G_LANES";
78 case POWER_DOMAIN_PORT_DDI_H_LANES:
79 return "PORT_DDI_H_LANES";
80 case POWER_DOMAIN_PORT_DDI_I_LANES:
81 return "PORT_DDI_I_LANES";
82 case POWER_DOMAIN_PORT_DDI_A_IO:
83 return "PORT_DDI_A_IO";
84 case POWER_DOMAIN_PORT_DDI_B_IO:
85 return "PORT_DDI_B_IO";
86 case POWER_DOMAIN_PORT_DDI_C_IO:
87 return "PORT_DDI_C_IO";
88 case POWER_DOMAIN_PORT_DDI_D_IO:
89 return "PORT_DDI_D_IO";
90 case POWER_DOMAIN_PORT_DDI_E_IO:
91 return "PORT_DDI_E_IO";
92 case POWER_DOMAIN_PORT_DDI_F_IO:
93 return "PORT_DDI_F_IO";
94 case POWER_DOMAIN_PORT_DDI_G_IO:
95 return "PORT_DDI_G_IO";
96 case POWER_DOMAIN_PORT_DDI_H_IO:
97 return "PORT_DDI_H_IO";
98 case POWER_DOMAIN_PORT_DDI_I_IO:
99 return "PORT_DDI_I_IO";
100 case POWER_DOMAIN_PORT_DSI:
102 case POWER_DOMAIN_PORT_CRT:
104 case POWER_DOMAIN_PORT_OTHER:
106 case POWER_DOMAIN_VGA:
108 case POWER_DOMAIN_AUDIO:
110 case POWER_DOMAIN_AUX_A:
112 case POWER_DOMAIN_AUX_B:
114 case POWER_DOMAIN_AUX_C:
116 case POWER_DOMAIN_AUX_D:
118 case POWER_DOMAIN_AUX_E:
120 case POWER_DOMAIN_AUX_F:
122 case POWER_DOMAIN_AUX_G:
124 case POWER_DOMAIN_AUX_H:
126 case POWER_DOMAIN_AUX_I:
128 case POWER_DOMAIN_AUX_IO_A:
130 case POWER_DOMAIN_AUX_C_TBT:
132 case POWER_DOMAIN_AUX_D_TBT:
134 case POWER_DOMAIN_AUX_E_TBT:
136 case POWER_DOMAIN_AUX_F_TBT:
138 case POWER_DOMAIN_AUX_G_TBT:
140 case POWER_DOMAIN_AUX_H_TBT:
142 case POWER_DOMAIN_AUX_I_TBT:
144 case POWER_DOMAIN_GMBUS:
146 case POWER_DOMAIN_INIT:
148 case POWER_DOMAIN_MODESET:
150 case POWER_DOMAIN_GT_IRQ:
152 case POWER_DOMAIN_DPLL_DC_OFF:
153 return "DPLL_DC_OFF";
155 MISSING_CASE(domain);
160 static void intel_power_well_enable(struct drm_i915_private *dev_priv,
161 struct i915_power_well *power_well)
163 DRM_DEBUG_KMS("enabling %s\n", power_well->desc->name);
164 power_well->desc->ops->enable(dev_priv, power_well);
165 power_well->hw_enabled = true;
168 static void intel_power_well_disable(struct drm_i915_private *dev_priv,
169 struct i915_power_well *power_well)
171 DRM_DEBUG_KMS("disabling %s\n", power_well->desc->name);
172 power_well->hw_enabled = false;
173 power_well->desc->ops->disable(dev_priv, power_well);
176 static void intel_power_well_get(struct drm_i915_private *dev_priv,
177 struct i915_power_well *power_well)
179 if (!power_well->count++)
180 intel_power_well_enable(dev_priv, power_well);
183 static void intel_power_well_put(struct drm_i915_private *dev_priv,
184 struct i915_power_well *power_well)
186 WARN(!power_well->count, "Use count on power well %s is already zero",
187 power_well->desc->name);
189 if (!--power_well->count)
190 intel_power_well_disable(dev_priv, power_well);
194 * __intel_display_power_is_enabled - unlocked check for a power domain
195 * @dev_priv: i915 device instance
196 * @domain: power domain to check
198 * This is the unlocked version of intel_display_power_is_enabled() and should
199 * only be used from error capture and recovery code where deadlocks are
203 * True when the power domain is enabled, false otherwise.
205 bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
206 enum intel_display_power_domain domain)
208 struct i915_power_well *power_well;
211 if (dev_priv->runtime_pm.suspended)
216 for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) {
217 if (power_well->desc->always_on)
220 if (!power_well->hw_enabled) {
230 * intel_display_power_is_enabled - check for a power domain
231 * @dev_priv: i915 device instance
232 * @domain: power domain to check
234 * This function can be used to check the hw power domain state. It is mostly
235 * used in hardware state readout functions. Everywhere else code should rely
236 * upon explicit power domain reference counting to ensure that the hardware
237 * block is powered up before accessing it.
239 * Callers must hold the relevant modesetting locks to ensure that concurrent
240 * threads can't disable the power well while the caller tries to read a few
244 * True when the power domain is enabled, false otherwise.
246 bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
247 enum intel_display_power_domain domain)
249 struct i915_power_domains *power_domains;
252 power_domains = &dev_priv->power_domains;
254 mutex_lock(&power_domains->lock);
255 ret = __intel_display_power_is_enabled(dev_priv, domain);
256 mutex_unlock(&power_domains->lock);
262 * Starting with Haswell, we have a "Power Down Well" that can be turned off
263 * when not needed anymore. We have 4 registers that can request the power well
264 * to be enabled, and it will only be disabled if none of the registers is
265 * requesting it to be enabled.
267 static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv,
268 u8 irq_pipe_mask, bool has_vga)
270 struct pci_dev *pdev = dev_priv->drm.pdev;
273 * After we re-enable the power well, if we touch VGA register 0x3d5
274 * we'll get unclaimed register interrupts. This stops after we write
275 * anything to the VGA MSR register. The vgacon module uses this
276 * register all the time, so if we unbind our driver and, as a
277 * consequence, bind vgacon, we'll get stuck in an infinite loop at
278 * console_unlock(). So make here we touch the VGA MSR register, making
279 * sure vgacon can keep working normally without triggering interrupts
280 * and error messages.
283 vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
284 outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
285 vga_put(pdev, VGA_RSRC_LEGACY_IO);
289 gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask);
292 static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv,
296 gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask);
299 static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
300 struct i915_power_well *power_well)
302 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
303 int pw_idx = power_well->desc->hsw.idx;
305 /* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
306 if (intel_de_wait_for_set(dev_priv, regs->driver,
307 HSW_PWR_WELL_CTL_STATE(pw_idx), 1)) {
308 DRM_DEBUG_KMS("%s power well enable timeout\n",
309 power_well->desc->name);
311 /* An AUX timeout is expected if the TBT DP tunnel is down. */
312 WARN_ON(!power_well->desc->hsw.is_tc_tbt);
316 static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv,
317 const struct i915_power_well_regs *regs,
320 u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
323 ret = I915_READ(regs->bios) & req_mask ? 1 : 0;
324 ret |= I915_READ(regs->driver) & req_mask ? 2 : 0;
326 ret |= I915_READ(regs->kvmr) & req_mask ? 4 : 0;
327 ret |= I915_READ(regs->debug) & req_mask ? 8 : 0;
332 static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv,
333 struct i915_power_well *power_well)
335 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
336 int pw_idx = power_well->desc->hsw.idx;
341 * Bspec doesn't require waiting for PWs to get disabled, but still do
342 * this for paranoia. The known cases where a PW will be forced on:
343 * - a KVMR request on any power well via the KVMR request register
344 * - a DMC request on PW1 and MISC_IO power wells via the BIOS and
345 * DEBUG request registers
346 * Skip the wait in case any of the request bits are set and print a
347 * diagnostic message.
349 wait_for((disabled = !(I915_READ(regs->driver) &
350 HSW_PWR_WELL_CTL_STATE(pw_idx))) ||
351 (reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1);
355 DRM_DEBUG_KMS("%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n",
356 power_well->desc->name,
357 !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8));
360 static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv,
361 enum skl_power_gate pg)
363 /* Timeout 5us for PG#0, for other PGs 1us */
364 WARN_ON(intel_de_wait_for_set(dev_priv, SKL_FUSE_STATUS,
365 SKL_FUSE_PG_DIST_STATUS(pg), 1));
368 static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
369 struct i915_power_well *power_well)
371 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
372 int pw_idx = power_well->desc->hsw.idx;
373 bool wait_fuses = power_well->desc->hsw.has_fuses;
374 enum skl_power_gate uninitialized_var(pg);
378 pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
379 SKL_PW_CTL_IDX_TO_PG(pw_idx);
381 * For PW1 we have to wait both for the PW0/PG0 fuse state
382 * before enabling the power well and PW1/PG1's own fuse
383 * state after the enabling. For all other power wells with
384 * fuses we only have to wait for that PW/PG's fuse state
385 * after the enabling.
388 gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0);
391 val = I915_READ(regs->driver);
392 I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx));
393 hsw_wait_for_power_well_enable(dev_priv, power_well);
395 /* Display WA #1178: cnl */
396 if (IS_CANNONLAKE(dev_priv) &&
397 pw_idx >= GLK_PW_CTL_IDX_AUX_B &&
398 pw_idx <= CNL_PW_CTL_IDX_AUX_F) {
399 val = I915_READ(CNL_AUX_ANAOVRD1(pw_idx));
400 val |= CNL_AUX_ANAOVRD1_ENABLE | CNL_AUX_ANAOVRD1_LDO_BYPASS;
401 I915_WRITE(CNL_AUX_ANAOVRD1(pw_idx), val);
405 gen9_wait_for_power_well_fuses(dev_priv, pg);
407 hsw_power_well_post_enable(dev_priv,
408 power_well->desc->hsw.irq_pipe_mask,
409 power_well->desc->hsw.has_vga);
412 static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
413 struct i915_power_well *power_well)
415 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
416 int pw_idx = power_well->desc->hsw.idx;
419 hsw_power_well_pre_disable(dev_priv,
420 power_well->desc->hsw.irq_pipe_mask);
422 val = I915_READ(regs->driver);
423 I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
424 hsw_wait_for_power_well_disable(dev_priv, power_well);
427 #define ICL_AUX_PW_TO_PHY(pw_idx) ((pw_idx) - ICL_PW_CTL_IDX_AUX_A)
430 icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
431 struct i915_power_well *power_well)
433 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
434 int pw_idx = power_well->desc->hsw.idx;
435 enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx);
439 val = I915_READ(regs->driver);
440 I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx));
442 if (INTEL_GEN(dev_priv) < 12) {
443 val = I915_READ(ICL_PORT_CL_DW12(phy));
444 I915_WRITE(ICL_PORT_CL_DW12(phy), val | ICL_LANE_ENABLE_AUX);
447 hsw_wait_for_power_well_enable(dev_priv, power_well);
449 /* Display WA #1178: icl, tgl */
450 if (IS_TIGERLAKE(dev_priv))
451 wa_idx_max = ICL_PW_CTL_IDX_AUX_C;
453 wa_idx_max = ICL_PW_CTL_IDX_AUX_B;
455 if (!IS_ELKHARTLAKE(dev_priv) &&
456 pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= wa_idx_max &&
457 !intel_bios_is_port_edp(dev_priv, (enum port)phy)) {
458 val = I915_READ(ICL_AUX_ANAOVRD1(pw_idx));
459 val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS;
460 I915_WRITE(ICL_AUX_ANAOVRD1(pw_idx), val);
465 icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
466 struct i915_power_well *power_well)
468 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
469 int pw_idx = power_well->desc->hsw.idx;
470 enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx);
473 if (INTEL_GEN(dev_priv) < 12) {
474 val = I915_READ(ICL_PORT_CL_DW12(phy));
475 I915_WRITE(ICL_PORT_CL_DW12(phy), val & ~ICL_LANE_ENABLE_AUX);
478 val = I915_READ(regs->driver);
479 I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
481 hsw_wait_for_power_well_disable(dev_priv, power_well);
484 #define ICL_AUX_PW_TO_CH(pw_idx) \
485 ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A)
487 #define ICL_TBT_AUX_PW_TO_CH(pw_idx) \
488 ((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C)
490 static enum aux_ch icl_tc_phy_aux_ch(struct drm_i915_private *dev_priv,
491 struct i915_power_well *power_well)
493 int pw_idx = power_well->desc->hsw.idx;
495 return power_well->desc->hsw.is_tc_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) :
496 ICL_AUX_PW_TO_CH(pw_idx);
499 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
501 static u64 async_put_domains_mask(struct i915_power_domains *power_domains);
503 static int power_well_async_ref_count(struct drm_i915_private *dev_priv,
504 struct i915_power_well *power_well)
506 int refs = hweight64(power_well->desc->domains &
507 async_put_domains_mask(&dev_priv->power_domains));
509 WARN_ON(refs > power_well->count);
514 static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
515 struct i915_power_well *power_well)
517 enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
518 struct intel_digital_port *dig_port = NULL;
519 struct intel_encoder *encoder;
521 /* Bypass the check if all references are released asynchronously */
522 if (power_well_async_ref_count(dev_priv, power_well) ==
526 aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
528 for_each_intel_encoder(&dev_priv->drm, encoder) {
529 enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
531 if (!intel_phy_is_tc(dev_priv, phy))
534 /* We'll check the MST primary port */
535 if (encoder->type == INTEL_OUTPUT_DP_MST)
538 dig_port = enc_to_dig_port(&encoder->base);
539 if (WARN_ON(!dig_port))
542 if (dig_port->aux_ch != aux_ch) {
550 if (WARN_ON(!dig_port))
553 WARN_ON(!intel_tc_port_ref_held(dig_port));
558 static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
559 struct i915_power_well *power_well)
566 icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
567 struct i915_power_well *power_well)
569 enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
572 icl_tc_port_assert_ref_held(dev_priv, power_well);
574 val = I915_READ(DP_AUX_CH_CTL(aux_ch));
575 val &= ~DP_AUX_CH_CTL_TBT_IO;
576 if (power_well->desc->hsw.is_tc_tbt)
577 val |= DP_AUX_CH_CTL_TBT_IO;
578 I915_WRITE(DP_AUX_CH_CTL(aux_ch), val);
580 hsw_power_well_enable(dev_priv, power_well);
584 icl_tc_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
585 struct i915_power_well *power_well)
587 icl_tc_port_assert_ref_held(dev_priv, power_well);
589 hsw_power_well_disable(dev_priv, power_well);
593 * We should only use the power well if we explicitly asked the hardware to
594 * enable it, so check if it's enabled and also check if we've requested it to
597 static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
598 struct i915_power_well *power_well)
600 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
601 enum i915_power_well_id id = power_well->desc->id;
602 int pw_idx = power_well->desc->hsw.idx;
603 u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) |
604 HSW_PWR_WELL_CTL_STATE(pw_idx);
607 val = I915_READ(regs->driver);
610 * On GEN9 big core due to a DMC bug the driver's request bits for PW1
611 * and the MISC_IO PW will be not restored, so check instead for the
612 * BIOS's own request bits, which are forced-on for these power wells
613 * when exiting DC5/6.
615 if (IS_GEN(dev_priv, 9) && !IS_GEN9_LP(dev_priv) &&
616 (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO))
617 val |= I915_READ(regs->bios);
619 return (val & mask) == mask;
622 static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
624 WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
625 "DC9 already programmed to be enabled.\n");
626 WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
627 "DC5 still not disabled to enable DC9.\n");
628 WARN_ONCE(I915_READ(HSW_PWR_WELL_CTL2) &
629 HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2),
630 "Power well 2 on.\n");
631 WARN_ONCE(intel_irqs_enabled(dev_priv),
632 "Interrupts not disabled yet.\n");
635 * TODO: check for the following to verify the conditions to enter DC9
636 * state are satisfied:
637 * 1] Check relevant display engine registers to verify if mode set
638 * disable sequence was followed.
639 * 2] Check if display uninitialize sequence is initialized.
643 static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
645 WARN_ONCE(intel_irqs_enabled(dev_priv),
646 "Interrupts not disabled yet.\n");
647 WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
648 "DC5 still not disabled.\n");
651 * TODO: check for the following to verify DC9 state was indeed
652 * entered before programming to disable it:
653 * 1] Check relevant display engine registers to verify if mode
654 * set disable sequence was followed.
655 * 2] Check if display uninitialize sequence is initialized.
659 static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
666 I915_WRITE(DC_STATE_EN, state);
668 /* It has been observed that disabling the dc6 state sometimes
669 * doesn't stick and dmc keeps returning old value. Make sure
670 * the write really sticks enough times and also force rewrite until
671 * we are confident that state is exactly what we want.
674 v = I915_READ(DC_STATE_EN);
677 I915_WRITE(DC_STATE_EN, state);
680 } else if (rereads++ > 5) {
684 } while (rewrites < 100);
687 DRM_ERROR("Writing dc state to 0x%x failed, now 0x%x\n",
690 /* Most of the times we need one retry, avoid spam */
692 DRM_DEBUG_KMS("Rewrote dc state to 0x%x %d times\n",
696 static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
700 mask = DC_STATE_EN_UPTO_DC5;
701 if (INTEL_GEN(dev_priv) >= 11)
702 mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9;
703 else if (IS_GEN9_LP(dev_priv))
704 mask |= DC_STATE_EN_DC9;
706 mask |= DC_STATE_EN_UPTO_DC6;
711 static void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
715 val = I915_READ(DC_STATE_EN) & gen9_dc_mask(dev_priv);
717 DRM_DEBUG_KMS("Resetting DC state tracking from %02x to %02x\n",
718 dev_priv->csr.dc_state, val);
719 dev_priv->csr.dc_state = val;
723 * gen9_set_dc_state - set target display C power state
724 * @dev_priv: i915 device instance
725 * @state: target DC power state
727 * - DC_STATE_EN_UPTO_DC5
728 * - DC_STATE_EN_UPTO_DC6
731 * Signal to DMC firmware/HW the target DC power state passed in @state.
732 * DMC/HW can turn off individual display clocks and power rails when entering
733 * a deeper DC power state (higher in number) and turns these back when exiting
734 * that state to a shallower power state (lower in number). The HW will decide
735 * when to actually enter a given state on an on-demand basis, for instance
736 * depending on the active state of display pipes. The state of display
737 * registers backed by affected power rails are saved/restored as needed.
739 * Based on the above enabling a deeper DC power state is asynchronous wrt.
740 * enabling it. Disabling a deeper power state is synchronous: for instance
741 * setting %DC_STATE_DISABLE won't complete until all HW resources are turned
742 * back on and register state is restored. This is guaranteed by the MMIO write
743 * to DC_STATE_EN blocking until the state is restored.
745 static void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
750 if (WARN_ON_ONCE(state & ~dev_priv->csr.allowed_dc_mask))
751 state &= dev_priv->csr.allowed_dc_mask;
753 val = I915_READ(DC_STATE_EN);
754 mask = gen9_dc_mask(dev_priv);
755 DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n",
758 /* Check if DMC is ignoring our DC state requests */
759 if ((val & mask) != dev_priv->csr.dc_state)
760 DRM_ERROR("DC state mismatch (0x%x -> 0x%x)\n",
761 dev_priv->csr.dc_state, val & mask);
766 gen9_write_dc_state(dev_priv, val);
768 dev_priv->csr.dc_state = val & mask;
771 static void bxt_enable_dc9(struct drm_i915_private *dev_priv)
773 assert_can_enable_dc9(dev_priv);
775 DRM_DEBUG_KMS("Enabling DC9\n");
777 * Power sequencer reset is not needed on
778 * platforms with South Display Engine on PCH,
779 * because PPS registers are always on.
781 if (!HAS_PCH_SPLIT(dev_priv))
782 intel_power_sequencer_reset(dev_priv);
783 gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
786 static void bxt_disable_dc9(struct drm_i915_private *dev_priv)
788 assert_can_disable_dc9(dev_priv);
790 DRM_DEBUG_KMS("Disabling DC9\n");
792 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
794 intel_pps_unlock_regs_wa(dev_priv);
797 static void assert_csr_loaded(struct drm_i915_private *dev_priv)
799 WARN_ONCE(!I915_READ(CSR_PROGRAM(0)),
800 "CSR program storage start is NULL\n");
801 WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n");
802 WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n");
805 static struct i915_power_well *
806 lookup_power_well(struct drm_i915_private *dev_priv,
807 enum i915_power_well_id power_well_id)
809 struct i915_power_well *power_well;
811 for_each_power_well(dev_priv, power_well)
812 if (power_well->desc->id == power_well_id)
816 * It's not feasible to add error checking code to the callers since
817 * this condition really shouldn't happen and it doesn't even make sense
818 * to abort things like display initialization sequences. Just return
819 * the first power well and hope the WARN gets reported so we can fix
822 WARN(1, "Power well %d not defined for this platform\n", power_well_id);
823 return &dev_priv->power_domains.power_wells[0];
826 static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
828 bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
831 WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n");
833 WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5),
834 "DC5 already programmed to be enabled.\n");
835 assert_rpm_wakelock_held(&dev_priv->runtime_pm);
837 assert_csr_loaded(dev_priv);
840 static void gen9_enable_dc5(struct drm_i915_private *dev_priv)
842 assert_can_enable_dc5(dev_priv);
844 DRM_DEBUG_KMS("Enabling DC5\n");
846 /* Wa Display #1183: skl,kbl,cfl */
847 if (IS_GEN9_BC(dev_priv))
848 I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
849 SKL_SELECT_ALTERNATE_DC_EXIT);
851 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
854 static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
856 WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
857 "Backlight is not disabled.\n");
858 WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
859 "DC6 already programmed to be enabled.\n");
861 assert_csr_loaded(dev_priv);
864 static void skl_enable_dc6(struct drm_i915_private *dev_priv)
866 assert_can_enable_dc6(dev_priv);
868 DRM_DEBUG_KMS("Enabling DC6\n");
870 /* Wa Display #1183: skl,kbl,cfl */
871 if (IS_GEN9_BC(dev_priv))
872 I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
873 SKL_SELECT_ALTERNATE_DC_EXIT);
875 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
878 static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
879 struct i915_power_well *power_well)
881 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
882 int pw_idx = power_well->desc->hsw.idx;
883 u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
884 u32 bios_req = I915_READ(regs->bios);
886 /* Take over the request bit if set by BIOS. */
887 if (bios_req & mask) {
888 u32 drv_req = I915_READ(regs->driver);
890 if (!(drv_req & mask))
891 I915_WRITE(regs->driver, drv_req | mask);
892 I915_WRITE(regs->bios, bios_req & ~mask);
896 static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
897 struct i915_power_well *power_well)
899 bxt_ddi_phy_init(dev_priv, power_well->desc->bxt.phy);
902 static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
903 struct i915_power_well *power_well)
905 bxt_ddi_phy_uninit(dev_priv, power_well->desc->bxt.phy);
908 static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
909 struct i915_power_well *power_well)
911 return bxt_ddi_phy_is_enabled(dev_priv, power_well->desc->bxt.phy);
914 static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
916 struct i915_power_well *power_well;
918 power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A);
919 if (power_well->count > 0)
920 bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
922 power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
923 if (power_well->count > 0)
924 bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
926 if (IS_GEMINILAKE(dev_priv)) {
927 power_well = lookup_power_well(dev_priv,
928 GLK_DISP_PW_DPIO_CMN_C);
929 if (power_well->count > 0)
930 bxt_ddi_phy_verify_state(dev_priv,
931 power_well->desc->bxt.phy);
935 static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
936 struct i915_power_well *power_well)
938 return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0;
941 static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
943 u32 tmp = I915_READ(DBUF_CTL);
945 WARN((tmp & (DBUF_POWER_STATE | DBUF_POWER_REQUEST)) !=
946 (DBUF_POWER_STATE | DBUF_POWER_REQUEST),
947 "Unexpected DBuf power power state (0x%08x)\n", tmp);
950 static void gen9_disable_dc_states(struct drm_i915_private *dev_priv)
952 struct intel_cdclk_state cdclk_state = {};
954 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
956 dev_priv->display.get_cdclk(dev_priv, &cdclk_state);
957 /* Can't read out voltage_level so can't use intel_cdclk_changed() */
958 WARN_ON(intel_cdclk_needs_modeset(&dev_priv->cdclk.hw, &cdclk_state));
960 gen9_assert_dbuf_enabled(dev_priv);
962 if (IS_GEN9_LP(dev_priv))
963 bxt_verify_ddi_phy_power_wells(dev_priv);
965 if (INTEL_GEN(dev_priv) >= 11)
967 * DMC retains HW context only for port A, the other combo
968 * PHY's HW context for port B is lost after DC transitions,
969 * so we need to restore it manually.
971 intel_combo_phy_init(dev_priv);
974 static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
975 struct i915_power_well *power_well)
977 gen9_disable_dc_states(dev_priv);
980 static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
981 struct i915_power_well *power_well)
983 if (!dev_priv->csr.dmc_payload)
986 if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC6)
987 skl_enable_dc6(dev_priv);
988 else if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)
989 gen9_enable_dc5(dev_priv);
992 static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv,
993 struct i915_power_well *power_well)
997 static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
998 struct i915_power_well *power_well)
1002 static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
1003 struct i915_power_well *power_well)
1008 static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv,
1009 struct i915_power_well *power_well)
1011 if ((I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0)
1012 i830_enable_pipe(dev_priv, PIPE_A);
1013 if ((I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0)
1014 i830_enable_pipe(dev_priv, PIPE_B);
1017 static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv,
1018 struct i915_power_well *power_well)
1020 i830_disable_pipe(dev_priv, PIPE_B);
1021 i830_disable_pipe(dev_priv, PIPE_A);
1024 static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv,
1025 struct i915_power_well *power_well)
1027 return I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE &&
1028 I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
1031 static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv,
1032 struct i915_power_well *power_well)
1034 if (power_well->count > 0)
1035 i830_pipes_power_well_enable(dev_priv, power_well);
1037 i830_pipes_power_well_disable(dev_priv, power_well);
1040 static void vlv_set_power_well(struct drm_i915_private *dev_priv,
1041 struct i915_power_well *power_well, bool enable)
1043 int pw_idx = power_well->desc->vlv.idx;
1048 mask = PUNIT_PWRGT_MASK(pw_idx);
1049 state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) :
1050 PUNIT_PWRGT_PWR_GATE(pw_idx);
1052 vlv_punit_get(dev_priv);
1055 ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
1060 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
1063 vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
1065 if (wait_for(COND, 100))
1066 DRM_ERROR("timeout setting power well state %08x (%08x)\n",
1068 vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
1073 vlv_punit_put(dev_priv);
1076 static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
1077 struct i915_power_well *power_well)
1079 vlv_set_power_well(dev_priv, power_well, true);
1082 static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
1083 struct i915_power_well *power_well)
1085 vlv_set_power_well(dev_priv, power_well, false);
1088 static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
1089 struct i915_power_well *power_well)
1091 int pw_idx = power_well->desc->vlv.idx;
1092 bool enabled = false;
1097 mask = PUNIT_PWRGT_MASK(pw_idx);
1098 ctrl = PUNIT_PWRGT_PWR_ON(pw_idx);
1100 vlv_punit_get(dev_priv);
1102 state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
1104 * We only ever set the power-on and power-gate states, anything
1105 * else is unexpected.
1107 WARN_ON(state != PUNIT_PWRGT_PWR_ON(pw_idx) &&
1108 state != PUNIT_PWRGT_PWR_GATE(pw_idx));
1113 * A transient state at this point would mean some unexpected party
1114 * is poking at the power controls too.
1116 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
1117 WARN_ON(ctrl != state);
1119 vlv_punit_put(dev_priv);
1124 static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
1129 * On driver load, a pipe may be active and driving a DSI display.
1130 * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
1131 * (and never recovering) in this case. intel_dsi_post_disable() will
1132 * clear it when we turn off the display.
1134 val = I915_READ(DSPCLK_GATE_D);
1135 val &= DPOUNIT_CLOCK_GATE_DISABLE;
1136 val |= VRHUNIT_CLOCK_GATE_DISABLE;
1137 I915_WRITE(DSPCLK_GATE_D, val);
1140 * Disable trickle feed and enable pnd deadline calculation
1142 I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
1143 I915_WRITE(CBR1_VLV, 0);
1145 WARN_ON(dev_priv->rawclk_freq == 0);
1147 I915_WRITE(RAWCLK_FREQ_VLV,
1148 DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 1000));
1151 static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
1153 struct intel_encoder *encoder;
1157 * Enable the CRI clock source so we can get at the
1158 * display and the reference clock for VGA
1159 * hotplug / manual detection. Supposedly DSI also
1160 * needs the ref clock up and running.
1162 * CHV DPLL B/C have some issues if VGA mode is enabled.
1164 for_each_pipe(dev_priv, pipe) {
1165 u32 val = I915_READ(DPLL(pipe));
1167 val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1169 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1171 I915_WRITE(DPLL(pipe), val);
1174 vlv_init_display_clock_gating(dev_priv);
1176 spin_lock_irq(&dev_priv->irq_lock);
1177 valleyview_enable_display_irqs(dev_priv);
1178 spin_unlock_irq(&dev_priv->irq_lock);
1181 * During driver initialization/resume we can avoid restoring the
1182 * part of the HW/SW state that will be inited anyway explicitly.
1184 if (dev_priv->power_domains.initializing)
1187 intel_hpd_init(dev_priv);
1189 /* Re-enable the ADPA, if we have one */
1190 for_each_intel_encoder(&dev_priv->drm, encoder) {
1191 if (encoder->type == INTEL_OUTPUT_ANALOG)
1192 intel_crt_reset(&encoder->base);
1195 i915_redisable_vga_power_on(dev_priv);
1197 intel_pps_unlock_regs_wa(dev_priv);
1200 static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
1202 spin_lock_irq(&dev_priv->irq_lock);
1203 valleyview_disable_display_irqs(dev_priv);
1204 spin_unlock_irq(&dev_priv->irq_lock);
1206 /* make sure we're done processing display irqs */
1207 intel_synchronize_irq(dev_priv);
1209 intel_power_sequencer_reset(dev_priv);
1211 /* Prevent us from re-enabling polling on accident in late suspend */
1212 if (!dev_priv->drm.dev->power.is_suspended)
1213 intel_hpd_poll_init(dev_priv);
1216 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
1217 struct i915_power_well *power_well)
1219 vlv_set_power_well(dev_priv, power_well, true);
1221 vlv_display_power_well_init(dev_priv);
1224 static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
1225 struct i915_power_well *power_well)
1227 vlv_display_power_well_deinit(dev_priv);
1229 vlv_set_power_well(dev_priv, power_well, false);
1232 static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1233 struct i915_power_well *power_well)
1235 /* since ref/cri clock was enabled */
1236 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1238 vlv_set_power_well(dev_priv, power_well, true);
1241 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
1242 * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
1243 * a. GUnit 0x2110 bit[0] set to 1 (def 0)
1244 * b. The other bits such as sfr settings / modesel may all
1247 * This should only be done on init and resume from S3 with
1248 * both PLLs disabled, or we risk losing DPIO and PLL
1251 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
1254 static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1255 struct i915_power_well *power_well)
1259 for_each_pipe(dev_priv, pipe)
1260 assert_pll_disabled(dev_priv, pipe);
1262 /* Assert common reset */
1263 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
1265 vlv_set_power_well(dev_priv, power_well, false);
1268 #define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0))
1270 #define BITS_SET(val, bits) (((val) & (bits)) == (bits))
1272 static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
1274 struct i915_power_well *cmn_bc =
1275 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
1276 struct i915_power_well *cmn_d =
1277 lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
1278 u32 phy_control = dev_priv->chv_phy_control;
1280 u32 phy_status_mask = 0xffffffff;
1283 * The BIOS can leave the PHY is some weird state
1284 * where it doesn't fully power down some parts.
1285 * Disable the asserts until the PHY has been fully
1286 * reset (ie. the power well has been disabled at
1289 if (!dev_priv->chv_phy_assert[DPIO_PHY0])
1290 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
1291 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
1292 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
1293 PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
1294 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
1295 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
1297 if (!dev_priv->chv_phy_assert[DPIO_PHY1])
1298 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
1299 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
1300 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
1302 if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
1303 phy_status |= PHY_POWERGOOD(DPIO_PHY0);
1305 /* this assumes override is only used to enable lanes */
1306 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
1307 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
1309 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
1310 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
1312 /* CL1 is on whenever anything is on in either channel */
1313 if (BITS_SET(phy_control,
1314 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
1315 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
1316 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
1319 * The DPLLB check accounts for the pipe B + port A usage
1320 * with CL2 powered up but all the lanes in the second channel
1323 if (BITS_SET(phy_control,
1324 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
1325 (I915_READ(DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
1326 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
1328 if (BITS_SET(phy_control,
1329 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
1330 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
1331 if (BITS_SET(phy_control,
1332 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
1333 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
1335 if (BITS_SET(phy_control,
1336 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
1337 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
1338 if (BITS_SET(phy_control,
1339 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
1340 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
1343 if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
1344 phy_status |= PHY_POWERGOOD(DPIO_PHY1);
1346 /* this assumes override is only used to enable lanes */
1347 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
1348 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
1350 if (BITS_SET(phy_control,
1351 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
1352 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
1354 if (BITS_SET(phy_control,
1355 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
1356 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
1357 if (BITS_SET(phy_control,
1358 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
1359 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
1362 phy_status &= phy_status_mask;
1365 * The PHY may be busy with some initial calibration and whatnot,
1366 * so the power state can take a while to actually change.
1368 if (intel_de_wait_for_register(dev_priv, DISPLAY_PHY_STATUS,
1369 phy_status_mask, phy_status, 10))
1370 DRM_ERROR("Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
1371 I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask,
1372 phy_status, dev_priv->chv_phy_control);
1377 static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1378 struct i915_power_well *power_well)
1384 WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
1385 power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
1387 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1395 /* since ref/cri clock was enabled */
1396 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
1397 vlv_set_power_well(dev_priv, power_well, true);
1399 /* Poll for phypwrgood signal */
1400 if (intel_de_wait_for_set(dev_priv, DISPLAY_PHY_STATUS,
1401 PHY_POWERGOOD(phy), 1))
1402 DRM_ERROR("Display PHY %d is not power up\n", phy);
1404 vlv_dpio_get(dev_priv);
1406 /* Enable dynamic power down */
1407 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
1408 tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
1409 DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
1410 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
1412 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1413 tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
1414 tmp |= DPIO_DYNPWRDOWNEN_CH1;
1415 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
1418 * Force the non-existing CL2 off. BXT does this
1419 * too, so maybe it saves some power even though
1420 * CL2 doesn't exist?
1422 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
1423 tmp |= DPIO_CL2_LDOFUSE_PWRENB;
1424 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
1427 vlv_dpio_put(dev_priv);
1429 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
1430 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1432 DRM_DEBUG_KMS("Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1433 phy, dev_priv->chv_phy_control);
1435 assert_chv_phy_status(dev_priv);
1438 static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
1439 struct i915_power_well *power_well)
1443 WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
1444 power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
1446 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
1448 assert_pll_disabled(dev_priv, PIPE_A);
1449 assert_pll_disabled(dev_priv, PIPE_B);
1452 assert_pll_disabled(dev_priv, PIPE_C);
1455 dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
1456 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1458 vlv_set_power_well(dev_priv, power_well, false);
1460 DRM_DEBUG_KMS("Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
1461 phy, dev_priv->chv_phy_control);
1463 /* PHY is fully reset now, so we can enable the PHY state asserts */
1464 dev_priv->chv_phy_assert[phy] = true;
1466 assert_chv_phy_status(dev_priv);
1469 static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1470 enum dpio_channel ch, bool override, unsigned int mask)
1472 enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
1473 u32 reg, val, expected, actual;
1476 * The BIOS can leave the PHY is some weird state
1477 * where it doesn't fully power down some parts.
1478 * Disable the asserts until the PHY has been fully
1479 * reset (ie. the power well has been disabled at
1482 if (!dev_priv->chv_phy_assert[phy])
1486 reg = _CHV_CMN_DW0_CH0;
1488 reg = _CHV_CMN_DW6_CH1;
1490 vlv_dpio_get(dev_priv);
1491 val = vlv_dpio_read(dev_priv, pipe, reg);
1492 vlv_dpio_put(dev_priv);
1495 * This assumes !override is only used when the port is disabled.
1496 * All lanes should power down even without the override when
1497 * the port is disabled.
1499 if (!override || mask == 0xf) {
1500 expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1502 * If CH1 common lane is not active anymore
1503 * (eg. for pipe B DPLL) the entire channel will
1504 * shut down, which causes the common lane registers
1505 * to read as 0. That means we can't actually check
1506 * the lane power down status bits, but as the entire
1507 * register reads as 0 it's a good indication that the
1508 * channel is indeed entirely powered down.
1510 if (ch == DPIO_CH1 && val == 0)
1512 } else if (mask != 0x0) {
1513 expected = DPIO_ANYDL_POWERDOWN;
1519 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0;
1521 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
1522 actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
1524 WARN(actual != expected,
1525 "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
1526 !!(actual & DPIO_ALLDL_POWERDOWN), !!(actual & DPIO_ANYDL_POWERDOWN),
1527 !!(expected & DPIO_ALLDL_POWERDOWN), !!(expected & DPIO_ANYDL_POWERDOWN),
1531 bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1532 enum dpio_channel ch, bool override)
1534 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1537 mutex_lock(&power_domains->lock);
1539 was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1541 if (override == was_override)
1545 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1547 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1549 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1551 DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
1552 phy, ch, dev_priv->chv_phy_control);
1554 assert_chv_phy_status(dev_priv);
1557 mutex_unlock(&power_domains->lock);
1559 return was_override;
1562 void chv_phy_powergate_lanes(struct intel_encoder *encoder,
1563 bool override, unsigned int mask)
1565 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1566 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1567 enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(&encoder->base));
1568 enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
1570 mutex_lock(&power_domains->lock);
1572 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
1573 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
1576 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1578 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
1580 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
1582 DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
1583 phy, ch, mask, dev_priv->chv_phy_control);
1585 assert_chv_phy_status(dev_priv);
1587 assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
1589 mutex_unlock(&power_domains->lock);
1592 static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
1593 struct i915_power_well *power_well)
1595 enum pipe pipe = PIPE_A;
1599 vlv_punit_get(dev_priv);
1601 state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe);
1603 * We only ever set the power-on and power-gate states, anything
1604 * else is unexpected.
1606 WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
1607 enabled = state == DP_SSS_PWR_ON(pipe);
1610 * A transient state at this point would mean some unexpected party
1611 * is poking at the power controls too.
1613 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe);
1614 WARN_ON(ctrl << 16 != state);
1616 vlv_punit_put(dev_priv);
1621 static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
1622 struct i915_power_well *power_well,
1625 enum pipe pipe = PIPE_A;
1629 state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
1631 vlv_punit_get(dev_priv);
1634 ((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state)
1639 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
1640 ctrl &= ~DP_SSC_MASK(pipe);
1641 ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
1642 vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl);
1644 if (wait_for(COND, 100))
1645 DRM_ERROR("timeout setting power well state %08x (%08x)\n",
1647 vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM));
1652 vlv_punit_put(dev_priv);
1655 static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
1656 struct i915_power_well *power_well)
1658 chv_set_pipe_power_well(dev_priv, power_well, true);
1660 vlv_display_power_well_init(dev_priv);
1663 static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
1664 struct i915_power_well *power_well)
1666 vlv_display_power_well_deinit(dev_priv);
1668 chv_set_pipe_power_well(dev_priv, power_well, false);
1671 static u64 __async_put_domains_mask(struct i915_power_domains *power_domains)
1673 return power_domains->async_put_domains[0] |
1674 power_domains->async_put_domains[1];
1677 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
1680 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
1682 return !WARN_ON(power_domains->async_put_domains[0] &
1683 power_domains->async_put_domains[1]);
1687 __async_put_domains_state_ok(struct i915_power_domains *power_domains)
1689 enum intel_display_power_domain domain;
1692 err |= !assert_async_put_domain_masks_disjoint(power_domains);
1693 err |= WARN_ON(!!power_domains->async_put_wakeref !=
1694 !!__async_put_domains_mask(power_domains));
1696 for_each_power_domain(domain, __async_put_domains_mask(power_domains))
1697 err |= WARN_ON(power_domains->domain_use_count[domain] != 1);
1702 static void print_power_domains(struct i915_power_domains *power_domains,
1703 const char *prefix, u64 mask)
1705 enum intel_display_power_domain domain;
1707 DRM_DEBUG_DRIVER("%s (%lu):\n", prefix, hweight64(mask));
1708 for_each_power_domain(domain, mask)
1709 DRM_DEBUG_DRIVER("%s use_count %d\n",
1710 intel_display_power_domain_str(domain),
1711 power_domains->domain_use_count[domain]);
1715 print_async_put_domains_state(struct i915_power_domains *power_domains)
1717 DRM_DEBUG_DRIVER("async_put_wakeref %u\n",
1718 power_domains->async_put_wakeref);
1720 print_power_domains(power_domains, "async_put_domains[0]",
1721 power_domains->async_put_domains[0]);
1722 print_power_domains(power_domains, "async_put_domains[1]",
1723 power_domains->async_put_domains[1]);
1727 verify_async_put_domains_state(struct i915_power_domains *power_domains)
1729 if (!__async_put_domains_state_ok(power_domains))
1730 print_async_put_domains_state(power_domains);
1736 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
1741 verify_async_put_domains_state(struct i915_power_domains *power_domains)
1745 #endif /* CONFIG_DRM_I915_DEBUG_RUNTIME_PM */
1747 static u64 async_put_domains_mask(struct i915_power_domains *power_domains)
1749 assert_async_put_domain_masks_disjoint(power_domains);
1751 return __async_put_domains_mask(power_domains);
1755 async_put_domains_clear_domain(struct i915_power_domains *power_domains,
1756 enum intel_display_power_domain domain)
1758 assert_async_put_domain_masks_disjoint(power_domains);
1760 power_domains->async_put_domains[0] &= ~BIT_ULL(domain);
1761 power_domains->async_put_domains[1] &= ~BIT_ULL(domain);
1765 intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv,
1766 enum intel_display_power_domain domain)
1768 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1771 if (!(async_put_domains_mask(power_domains) & BIT_ULL(domain)))
1774 async_put_domains_clear_domain(power_domains, domain);
1778 if (async_put_domains_mask(power_domains))
1781 cancel_delayed_work(&power_domains->async_put_work);
1782 intel_runtime_pm_put_raw(&dev_priv->runtime_pm,
1783 fetch_and_zero(&power_domains->async_put_wakeref));
1785 verify_async_put_domains_state(power_domains);
1791 __intel_display_power_get_domain(struct drm_i915_private *dev_priv,
1792 enum intel_display_power_domain domain)
1794 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1795 struct i915_power_well *power_well;
1797 if (intel_display_power_grab_async_put_ref(dev_priv, domain))
1800 for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain))
1801 intel_power_well_get(dev_priv, power_well);
1803 power_domains->domain_use_count[domain]++;
1807 * intel_display_power_get - grab a power domain reference
1808 * @dev_priv: i915 device instance
1809 * @domain: power domain to reference
1811 * This function grabs a power domain reference for @domain and ensures that the
1812 * power domain and all its parents are powered up. Therefore users should only
1813 * grab a reference to the innermost power domain they need.
1815 * Any power domain reference obtained by this function must have a symmetric
1816 * call to intel_display_power_put() to release the reference again.
1818 intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
1819 enum intel_display_power_domain domain)
1821 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1822 intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1824 mutex_lock(&power_domains->lock);
1825 __intel_display_power_get_domain(dev_priv, domain);
1826 mutex_unlock(&power_domains->lock);
1832 * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
1833 * @dev_priv: i915 device instance
1834 * @domain: power domain to reference
1836 * This function grabs a power domain reference for @domain and ensures that the
1837 * power domain and all its parents are powered up. Therefore users should only
1838 * grab a reference to the innermost power domain they need.
1840 * Any power domain reference obtained by this function must have a symmetric
1841 * call to intel_display_power_put() to release the reference again.
1844 intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
1845 enum intel_display_power_domain domain)
1847 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1848 intel_wakeref_t wakeref;
1851 wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm);
1855 mutex_lock(&power_domains->lock);
1857 if (__intel_display_power_is_enabled(dev_priv, domain)) {
1858 __intel_display_power_get_domain(dev_priv, domain);
1864 mutex_unlock(&power_domains->lock);
1867 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1875 __intel_display_power_put_domain(struct drm_i915_private *dev_priv,
1876 enum intel_display_power_domain domain)
1878 struct i915_power_domains *power_domains;
1879 struct i915_power_well *power_well;
1880 const char *name = intel_display_power_domain_str(domain);
1882 power_domains = &dev_priv->power_domains;
1884 WARN(!power_domains->domain_use_count[domain],
1885 "Use count on domain %s is already zero\n",
1887 WARN(async_put_domains_mask(power_domains) & BIT_ULL(domain),
1888 "Async disabling of domain %s is pending\n",
1891 power_domains->domain_use_count[domain]--;
1893 for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain))
1894 intel_power_well_put(dev_priv, power_well);
1897 static void __intel_display_power_put(struct drm_i915_private *dev_priv,
1898 enum intel_display_power_domain domain)
1900 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1902 mutex_lock(&power_domains->lock);
1903 __intel_display_power_put_domain(dev_priv, domain);
1904 mutex_unlock(&power_domains->lock);
1908 * intel_display_power_put_unchecked - release an unchecked power domain reference
1909 * @dev_priv: i915 device instance
1910 * @domain: power domain to reference
1912 * This function drops the power domain reference obtained by
1913 * intel_display_power_get() and might power down the corresponding hardware
1914 * block right away if this is the last reference.
1916 * This function exists only for historical reasons and should be avoided in
1917 * new code, as the correctness of its use cannot be checked. Always use
1918 * intel_display_power_put() instead.
1920 void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
1921 enum intel_display_power_domain domain)
1923 __intel_display_power_put(dev_priv, domain);
1924 intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
1928 queue_async_put_domains_work(struct i915_power_domains *power_domains,
1929 intel_wakeref_t wakeref)
1931 WARN_ON(power_domains->async_put_wakeref);
1932 power_domains->async_put_wakeref = wakeref;
1933 WARN_ON(!queue_delayed_work(system_unbound_wq,
1934 &power_domains->async_put_work,
1935 msecs_to_jiffies(100)));
1939 release_async_put_domains(struct i915_power_domains *power_domains, u64 mask)
1941 struct drm_i915_private *dev_priv =
1942 container_of(power_domains, struct drm_i915_private,
1944 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
1945 enum intel_display_power_domain domain;
1946 intel_wakeref_t wakeref;
1949 * The caller must hold already raw wakeref, upgrade that to a proper
1950 * wakeref to make the state checker happy about the HW access during
1951 * power well disabling.
1953 assert_rpm_raw_wakeref_held(rpm);
1954 wakeref = intel_runtime_pm_get(rpm);
1956 for_each_power_domain(domain, mask) {
1957 /* Clear before put, so put's sanity check is happy. */
1958 async_put_domains_clear_domain(power_domains, domain);
1959 __intel_display_power_put_domain(dev_priv, domain);
1962 intel_runtime_pm_put(rpm, wakeref);
1966 intel_display_power_put_async_work(struct work_struct *work)
1968 struct drm_i915_private *dev_priv =
1969 container_of(work, struct drm_i915_private,
1970 power_domains.async_put_work.work);
1971 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1972 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
1973 intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm);
1974 intel_wakeref_t old_work_wakeref = 0;
1976 mutex_lock(&power_domains->lock);
1979 * Bail out if all the domain refs pending to be released were grabbed
1980 * by subsequent gets or a flush_work.
1982 old_work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
1983 if (!old_work_wakeref)
1986 release_async_put_domains(power_domains,
1987 power_domains->async_put_domains[0]);
1989 /* Requeue the work if more domains were async put meanwhile. */
1990 if (power_domains->async_put_domains[1]) {
1991 power_domains->async_put_domains[0] =
1992 fetch_and_zero(&power_domains->async_put_domains[1]);
1993 queue_async_put_domains_work(power_domains,
1994 fetch_and_zero(&new_work_wakeref));
1998 verify_async_put_domains_state(power_domains);
2000 mutex_unlock(&power_domains->lock);
2002 if (old_work_wakeref)
2003 intel_runtime_pm_put_raw(rpm, old_work_wakeref);
2004 if (new_work_wakeref)
2005 intel_runtime_pm_put_raw(rpm, new_work_wakeref);
2009 * intel_display_power_put_async - release a power domain reference asynchronously
2010 * @i915: i915 device instance
2011 * @domain: power domain to reference
2012 * @wakeref: wakeref acquired for the reference that is being released
2014 * This function drops the power domain reference obtained by
2015 * intel_display_power_get*() and schedules a work to power down the
2016 * corresponding hardware block if this is the last reference.
2018 void __intel_display_power_put_async(struct drm_i915_private *i915,
2019 enum intel_display_power_domain domain,
2020 intel_wakeref_t wakeref)
2022 struct i915_power_domains *power_domains = &i915->power_domains;
2023 struct intel_runtime_pm *rpm = &i915->runtime_pm;
2024 intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm);
2026 mutex_lock(&power_domains->lock);
2028 if (power_domains->domain_use_count[domain] > 1) {
2029 __intel_display_power_put_domain(i915, domain);
2034 WARN_ON(power_domains->domain_use_count[domain] != 1);
2036 /* Let a pending work requeue itself or queue a new one. */
2037 if (power_domains->async_put_wakeref) {
2038 power_domains->async_put_domains[1] |= BIT_ULL(domain);
2040 power_domains->async_put_domains[0] |= BIT_ULL(domain);
2041 queue_async_put_domains_work(power_domains,
2042 fetch_and_zero(&work_wakeref));
2046 verify_async_put_domains_state(power_domains);
2048 mutex_unlock(&power_domains->lock);
2051 intel_runtime_pm_put_raw(rpm, work_wakeref);
2053 intel_runtime_pm_put(rpm, wakeref);
2057 * intel_display_power_flush_work - flushes the async display power disabling work
2058 * @i915: i915 device instance
2060 * Flushes any pending work that was scheduled by a preceding
2061 * intel_display_power_put_async() call, completing the disabling of the
2062 * corresponding power domains.
2064 * Note that the work handler function may still be running after this
2065 * function returns; to ensure that the work handler isn't running use
2066 * intel_display_power_flush_work_sync() instead.
2068 void intel_display_power_flush_work(struct drm_i915_private *i915)
2070 struct i915_power_domains *power_domains = &i915->power_domains;
2071 intel_wakeref_t work_wakeref;
2073 mutex_lock(&power_domains->lock);
2075 work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref);
2079 release_async_put_domains(power_domains,
2080 async_put_domains_mask(power_domains));
2081 cancel_delayed_work(&power_domains->async_put_work);
2084 verify_async_put_domains_state(power_domains);
2086 mutex_unlock(&power_domains->lock);
2089 intel_runtime_pm_put_raw(&i915->runtime_pm, work_wakeref);
2093 * intel_display_power_flush_work_sync - flushes and syncs the async display power disabling work
2094 * @i915: i915 device instance
2096 * Like intel_display_power_flush_work(), but also ensure that the work
2097 * handler function is not running any more when this function returns.
2100 intel_display_power_flush_work_sync(struct drm_i915_private *i915)
2102 struct i915_power_domains *power_domains = &i915->power_domains;
2104 intel_display_power_flush_work(i915);
2105 cancel_delayed_work_sync(&power_domains->async_put_work);
2107 verify_async_put_domains_state(power_domains);
2109 WARN_ON(power_domains->async_put_wakeref);
2112 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
2114 * intel_display_power_put - release a power domain reference
2115 * @dev_priv: i915 device instance
2116 * @domain: power domain to reference
2117 * @wakeref: wakeref acquired for the reference that is being released
2119 * This function drops the power domain reference obtained by
2120 * intel_display_power_get() and might power down the corresponding hardware
2121 * block right away if this is the last reference.
2123 void intel_display_power_put(struct drm_i915_private *dev_priv,
2124 enum intel_display_power_domain domain,
2125 intel_wakeref_t wakeref)
2127 __intel_display_power_put(dev_priv, domain);
2128 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2132 #define I830_PIPES_POWER_DOMAINS ( \
2133 BIT_ULL(POWER_DOMAIN_PIPE_A) | \
2134 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2135 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
2136 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2137 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2138 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2139 BIT_ULL(POWER_DOMAIN_INIT))
2141 #define VLV_DISPLAY_POWER_DOMAINS ( \
2142 BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) | \
2143 BIT_ULL(POWER_DOMAIN_PIPE_A) | \
2144 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2145 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
2146 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2147 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2148 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2149 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2150 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2151 BIT_ULL(POWER_DOMAIN_PORT_DSI) | \
2152 BIT_ULL(POWER_DOMAIN_PORT_CRT) | \
2153 BIT_ULL(POWER_DOMAIN_VGA) | \
2154 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2155 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2156 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2157 BIT_ULL(POWER_DOMAIN_GMBUS) | \
2158 BIT_ULL(POWER_DOMAIN_INIT))
2160 #define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \
2161 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2162 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2163 BIT_ULL(POWER_DOMAIN_PORT_CRT) | \
2164 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2165 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2166 BIT_ULL(POWER_DOMAIN_INIT))
2168 #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
2169 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2170 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2171 BIT_ULL(POWER_DOMAIN_INIT))
2173 #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
2174 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2175 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2176 BIT_ULL(POWER_DOMAIN_INIT))
2178 #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
2179 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2180 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2181 BIT_ULL(POWER_DOMAIN_INIT))
2183 #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
2184 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2185 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2186 BIT_ULL(POWER_DOMAIN_INIT))
2188 #define CHV_DISPLAY_POWER_DOMAINS ( \
2189 BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) | \
2190 BIT_ULL(POWER_DOMAIN_PIPE_A) | \
2191 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2192 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2193 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
2194 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2195 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2196 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2197 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2198 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2199 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2200 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2201 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2202 BIT_ULL(POWER_DOMAIN_PORT_DSI) | \
2203 BIT_ULL(POWER_DOMAIN_VGA) | \
2204 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2205 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2206 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2207 BIT_ULL(POWER_DOMAIN_AUX_D) | \
2208 BIT_ULL(POWER_DOMAIN_GMBUS) | \
2209 BIT_ULL(POWER_DOMAIN_INIT))
2211 #define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \
2212 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2213 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2214 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2215 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2216 BIT_ULL(POWER_DOMAIN_INIT))
2218 #define CHV_DPIO_CMN_D_POWER_DOMAINS ( \
2219 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2220 BIT_ULL(POWER_DOMAIN_AUX_D) | \
2221 BIT_ULL(POWER_DOMAIN_INIT))
2223 #define HSW_DISPLAY_POWER_DOMAINS ( \
2224 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2225 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2226 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
2227 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2228 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2229 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2230 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2231 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2232 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2233 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2234 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2235 BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \
2236 BIT_ULL(POWER_DOMAIN_VGA) | \
2237 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2238 BIT_ULL(POWER_DOMAIN_INIT))
2240 #define BDW_DISPLAY_POWER_DOMAINS ( \
2241 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2242 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2243 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2244 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2245 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2246 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2247 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2248 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2249 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2250 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2251 BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \
2252 BIT_ULL(POWER_DOMAIN_VGA) | \
2253 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2254 BIT_ULL(POWER_DOMAIN_INIT))
2256 #define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
2257 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2258 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2259 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2260 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2261 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2262 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2263 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2264 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2265 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2266 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2267 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \
2268 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2269 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2270 BIT_ULL(POWER_DOMAIN_AUX_D) | \
2271 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2272 BIT_ULL(POWER_DOMAIN_VGA) | \
2273 BIT_ULL(POWER_DOMAIN_INIT))
2274 #define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS ( \
2275 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \
2276 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) | \
2277 BIT_ULL(POWER_DOMAIN_INIT))
2278 #define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \
2279 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \
2280 BIT_ULL(POWER_DOMAIN_INIT))
2281 #define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \
2282 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \
2283 BIT_ULL(POWER_DOMAIN_INIT))
2284 #define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS ( \
2285 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \
2286 BIT_ULL(POWER_DOMAIN_INIT))
2287 #define SKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
2288 SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
2289 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
2290 BIT_ULL(POWER_DOMAIN_MODESET) | \
2291 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2292 BIT_ULL(POWER_DOMAIN_INIT))
2294 #define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
2295 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2296 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2297 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2298 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2299 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2300 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2301 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2302 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2303 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2304 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2305 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2306 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2307 BIT_ULL(POWER_DOMAIN_VGA) | \
2308 BIT_ULL(POWER_DOMAIN_INIT))
2309 #define BXT_DISPLAY_DC_OFF_POWER_DOMAINS ( \
2310 BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
2311 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
2312 BIT_ULL(POWER_DOMAIN_MODESET) | \
2313 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2314 BIT_ULL(POWER_DOMAIN_GMBUS) | \
2315 BIT_ULL(POWER_DOMAIN_INIT))
2316 #define BXT_DPIO_CMN_A_POWER_DOMAINS ( \
2317 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \
2318 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2319 BIT_ULL(POWER_DOMAIN_INIT))
2320 #define BXT_DPIO_CMN_BC_POWER_DOMAINS ( \
2321 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2322 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2323 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2324 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2325 BIT_ULL(POWER_DOMAIN_INIT))
2327 #define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
2328 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2329 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2330 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2331 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2332 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2333 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2334 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2335 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2336 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2337 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2338 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2339 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2340 BIT_ULL(POWER_DOMAIN_VGA) | \
2341 BIT_ULL(POWER_DOMAIN_INIT))
2342 #define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS ( \
2343 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
2344 #define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \
2345 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
2346 #define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \
2347 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
2348 #define GLK_DPIO_CMN_A_POWER_DOMAINS ( \
2349 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \
2350 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2351 BIT_ULL(POWER_DOMAIN_INIT))
2352 #define GLK_DPIO_CMN_B_POWER_DOMAINS ( \
2353 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2354 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2355 BIT_ULL(POWER_DOMAIN_INIT))
2356 #define GLK_DPIO_CMN_C_POWER_DOMAINS ( \
2357 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2358 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2359 BIT_ULL(POWER_DOMAIN_INIT))
2360 #define GLK_DISPLAY_AUX_A_POWER_DOMAINS ( \
2361 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2362 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \
2363 BIT_ULL(POWER_DOMAIN_INIT))
2364 #define GLK_DISPLAY_AUX_B_POWER_DOMAINS ( \
2365 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2366 BIT_ULL(POWER_DOMAIN_INIT))
2367 #define GLK_DISPLAY_AUX_C_POWER_DOMAINS ( \
2368 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2369 BIT_ULL(POWER_DOMAIN_INIT))
2370 #define GLK_DISPLAY_DC_OFF_POWER_DOMAINS ( \
2371 GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
2372 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
2373 BIT_ULL(POWER_DOMAIN_MODESET) | \
2374 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2375 BIT_ULL(POWER_DOMAIN_GMBUS) | \
2376 BIT_ULL(POWER_DOMAIN_INIT))
2378 #define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
2379 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2380 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2381 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2382 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2383 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2384 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2385 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2386 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2387 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2388 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2389 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \
2390 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2391 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2392 BIT_ULL(POWER_DOMAIN_AUX_D) | \
2393 BIT_ULL(POWER_DOMAIN_AUX_F) | \
2394 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2395 BIT_ULL(POWER_DOMAIN_VGA) | \
2396 BIT_ULL(POWER_DOMAIN_INIT))
2397 #define CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS ( \
2398 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \
2399 BIT_ULL(POWER_DOMAIN_INIT))
2400 #define CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS ( \
2401 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \
2402 BIT_ULL(POWER_DOMAIN_INIT))
2403 #define CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS ( \
2404 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \
2405 BIT_ULL(POWER_DOMAIN_INIT))
2406 #define CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS ( \
2407 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \
2408 BIT_ULL(POWER_DOMAIN_INIT))
2409 #define CNL_DISPLAY_AUX_A_POWER_DOMAINS ( \
2410 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2411 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \
2412 BIT_ULL(POWER_DOMAIN_INIT))
2413 #define CNL_DISPLAY_AUX_B_POWER_DOMAINS ( \
2414 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2415 BIT_ULL(POWER_DOMAIN_INIT))
2416 #define CNL_DISPLAY_AUX_C_POWER_DOMAINS ( \
2417 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2418 BIT_ULL(POWER_DOMAIN_INIT))
2419 #define CNL_DISPLAY_AUX_D_POWER_DOMAINS ( \
2420 BIT_ULL(POWER_DOMAIN_AUX_D) | \
2421 BIT_ULL(POWER_DOMAIN_INIT))
2422 #define CNL_DISPLAY_AUX_F_POWER_DOMAINS ( \
2423 BIT_ULL(POWER_DOMAIN_AUX_F) | \
2424 BIT_ULL(POWER_DOMAIN_INIT))
2425 #define CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS ( \
2426 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) | \
2427 BIT_ULL(POWER_DOMAIN_INIT))
2428 #define CNL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
2429 CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
2430 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
2431 BIT_ULL(POWER_DOMAIN_MODESET) | \
2432 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2433 BIT_ULL(POWER_DOMAIN_INIT))
2436 * ICL PW_0/PG_0 domains (HW/DMC control):
2438 * - clocks except port PLL
2439 * - central power except FBC
2440 * - shared functions except pipe interrupts, pipe MBUS, DBUF registers
2441 * ICL PW_1/PG_1 domains (HW/DMC control):
2443 * - PIPE_A and its planes, except VGA
2444 * - transcoder EDP + PSR
2449 #define ICL_PW_4_POWER_DOMAINS ( \
2450 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2451 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2452 BIT_ULL(POWER_DOMAIN_INIT))
2454 #define ICL_PW_3_POWER_DOMAINS ( \
2455 ICL_PW_4_POWER_DOMAINS | \
2456 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2457 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
2458 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2459 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2460 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2461 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
2462 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
2463 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2464 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \
2465 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \
2466 BIT_ULL(POWER_DOMAIN_AUX_B) | \
2467 BIT_ULL(POWER_DOMAIN_AUX_C) | \
2468 BIT_ULL(POWER_DOMAIN_AUX_D) | \
2469 BIT_ULL(POWER_DOMAIN_AUX_E) | \
2470 BIT_ULL(POWER_DOMAIN_AUX_F) | \
2471 BIT_ULL(POWER_DOMAIN_AUX_C_TBT) | \
2472 BIT_ULL(POWER_DOMAIN_AUX_D_TBT) | \
2473 BIT_ULL(POWER_DOMAIN_AUX_E_TBT) | \
2474 BIT_ULL(POWER_DOMAIN_AUX_F_TBT) | \
2475 BIT_ULL(POWER_DOMAIN_VGA) | \
2476 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2477 BIT_ULL(POWER_DOMAIN_INIT))
2480 * - KVMR (HW control)
2482 #define ICL_PW_2_POWER_DOMAINS ( \
2483 ICL_PW_3_POWER_DOMAINS | \
2484 BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) | \
2485 BIT_ULL(POWER_DOMAIN_INIT))
2487 * - KVMR (HW control)
2489 #define ICL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
2490 ICL_PW_2_POWER_DOMAINS | \
2491 BIT_ULL(POWER_DOMAIN_MODESET) | \
2492 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2493 BIT_ULL(POWER_DOMAIN_DPLL_DC_OFF) | \
2494 BIT_ULL(POWER_DOMAIN_INIT))
2496 #define ICL_DDI_IO_A_POWER_DOMAINS ( \
2497 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
2498 #define ICL_DDI_IO_B_POWER_DOMAINS ( \
2499 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
2500 #define ICL_DDI_IO_C_POWER_DOMAINS ( \
2501 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
2502 #define ICL_DDI_IO_D_POWER_DOMAINS ( \
2503 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO))
2504 #define ICL_DDI_IO_E_POWER_DOMAINS ( \
2505 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO))
2506 #define ICL_DDI_IO_F_POWER_DOMAINS ( \
2507 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO))
2509 #define ICL_AUX_A_IO_POWER_DOMAINS ( \
2510 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \
2511 BIT_ULL(POWER_DOMAIN_AUX_A))
2512 #define ICL_AUX_B_IO_POWER_DOMAINS ( \
2513 BIT_ULL(POWER_DOMAIN_AUX_B))
2514 #define ICL_AUX_C_TC1_IO_POWER_DOMAINS ( \
2515 BIT_ULL(POWER_DOMAIN_AUX_C))
2516 #define ICL_AUX_D_TC2_IO_POWER_DOMAINS ( \
2517 BIT_ULL(POWER_DOMAIN_AUX_D))
2518 #define ICL_AUX_E_TC3_IO_POWER_DOMAINS ( \
2519 BIT_ULL(POWER_DOMAIN_AUX_E))
2520 #define ICL_AUX_F_TC4_IO_POWER_DOMAINS ( \
2521 BIT_ULL(POWER_DOMAIN_AUX_F))
2522 #define ICL_AUX_C_TBT1_IO_POWER_DOMAINS ( \
2523 BIT_ULL(POWER_DOMAIN_AUX_C_TBT))
2524 #define ICL_AUX_D_TBT2_IO_POWER_DOMAINS ( \
2525 BIT_ULL(POWER_DOMAIN_AUX_D_TBT))
2526 #define ICL_AUX_E_TBT3_IO_POWER_DOMAINS ( \
2527 BIT_ULL(POWER_DOMAIN_AUX_E_TBT))
2528 #define ICL_AUX_F_TBT4_IO_POWER_DOMAINS ( \
2529 BIT_ULL(POWER_DOMAIN_AUX_F_TBT))
2531 #define TGL_PW_5_POWER_DOMAINS ( \
2532 BIT_ULL(POWER_DOMAIN_PIPE_D) | \
2533 BIT_ULL(POWER_DOMAIN_TRANSCODER_D) | \
2534 BIT_ULL(POWER_DOMAIN_PIPE_D_PANEL_FITTER) | \
2535 BIT_ULL(POWER_DOMAIN_INIT))
2537 #define TGL_PW_4_POWER_DOMAINS ( \
2538 TGL_PW_5_POWER_DOMAINS | \
2539 BIT_ULL(POWER_DOMAIN_PIPE_C) | \
2540 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
2541 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
2542 BIT_ULL(POWER_DOMAIN_INIT))
2544 #define TGL_PW_3_POWER_DOMAINS ( \
2545 TGL_PW_4_POWER_DOMAINS | \
2546 BIT_ULL(POWER_DOMAIN_PIPE_B) | \
2547 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
2548 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
2549 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
2550 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \
2551 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \
2552 BIT_ULL(POWER_DOMAIN_PORT_DDI_G_LANES) | \
2553 BIT_ULL(POWER_DOMAIN_PORT_DDI_H_LANES) | \
2554 BIT_ULL(POWER_DOMAIN_PORT_DDI_I_LANES) | \
2555 BIT_ULL(POWER_DOMAIN_AUX_D) | \
2556 BIT_ULL(POWER_DOMAIN_AUX_E) | \
2557 BIT_ULL(POWER_DOMAIN_AUX_F) | \
2558 BIT_ULL(POWER_DOMAIN_AUX_G) | \
2559 BIT_ULL(POWER_DOMAIN_AUX_H) | \
2560 BIT_ULL(POWER_DOMAIN_AUX_I) | \
2561 BIT_ULL(POWER_DOMAIN_AUX_D_TBT) | \
2562 BIT_ULL(POWER_DOMAIN_AUX_E_TBT) | \
2563 BIT_ULL(POWER_DOMAIN_AUX_F_TBT) | \
2564 BIT_ULL(POWER_DOMAIN_AUX_G_TBT) | \
2565 BIT_ULL(POWER_DOMAIN_AUX_H_TBT) | \
2566 BIT_ULL(POWER_DOMAIN_AUX_I_TBT) | \
2567 BIT_ULL(POWER_DOMAIN_VGA) | \
2568 BIT_ULL(POWER_DOMAIN_AUDIO) | \
2569 BIT_ULL(POWER_DOMAIN_INIT))
2571 #define TGL_PW_2_POWER_DOMAINS ( \
2572 TGL_PW_3_POWER_DOMAINS | \
2573 BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) | \
2574 BIT_ULL(POWER_DOMAIN_INIT))
2576 #define TGL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
2577 TGL_PW_2_POWER_DOMAINS | \
2578 BIT_ULL(POWER_DOMAIN_MODESET) | \
2579 BIT_ULL(POWER_DOMAIN_AUX_A) | \
2580 BIT_ULL(POWER_DOMAIN_INIT))
2582 #define TGL_DDI_IO_D_TC1_POWER_DOMAINS ( \
2583 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO))
2584 #define TGL_DDI_IO_E_TC2_POWER_DOMAINS ( \
2585 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO))
2586 #define TGL_DDI_IO_F_TC3_POWER_DOMAINS ( \
2587 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO))
2588 #define TGL_DDI_IO_G_TC4_POWER_DOMAINS ( \
2589 BIT_ULL(POWER_DOMAIN_PORT_DDI_G_IO))
2590 #define TGL_DDI_IO_H_TC5_POWER_DOMAINS ( \
2591 BIT_ULL(POWER_DOMAIN_PORT_DDI_H_IO))
2592 #define TGL_DDI_IO_I_TC6_POWER_DOMAINS ( \
2593 BIT_ULL(POWER_DOMAIN_PORT_DDI_I_IO))
2595 #define TGL_AUX_A_IO_POWER_DOMAINS ( \
2596 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \
2597 BIT_ULL(POWER_DOMAIN_AUX_A))
2598 #define TGL_AUX_B_IO_POWER_DOMAINS ( \
2599 BIT_ULL(POWER_DOMAIN_AUX_B))
2600 #define TGL_AUX_C_IO_POWER_DOMAINS ( \
2601 BIT_ULL(POWER_DOMAIN_AUX_C))
2602 #define TGL_AUX_D_TC1_IO_POWER_DOMAINS ( \
2603 BIT_ULL(POWER_DOMAIN_AUX_D))
2604 #define TGL_AUX_E_TC2_IO_POWER_DOMAINS ( \
2605 BIT_ULL(POWER_DOMAIN_AUX_E))
2606 #define TGL_AUX_F_TC3_IO_POWER_DOMAINS ( \
2607 BIT_ULL(POWER_DOMAIN_AUX_F))
2608 #define TGL_AUX_G_TC4_IO_POWER_DOMAINS ( \
2609 BIT_ULL(POWER_DOMAIN_AUX_G))
2610 #define TGL_AUX_H_TC5_IO_POWER_DOMAINS ( \
2611 BIT_ULL(POWER_DOMAIN_AUX_H))
2612 #define TGL_AUX_I_TC6_IO_POWER_DOMAINS ( \
2613 BIT_ULL(POWER_DOMAIN_AUX_I))
2614 #define TGL_AUX_D_TBT1_IO_POWER_DOMAINS ( \
2615 BIT_ULL(POWER_DOMAIN_AUX_D_TBT))
2616 #define TGL_AUX_E_TBT2_IO_POWER_DOMAINS ( \
2617 BIT_ULL(POWER_DOMAIN_AUX_E_TBT))
2618 #define TGL_AUX_F_TBT3_IO_POWER_DOMAINS ( \
2619 BIT_ULL(POWER_DOMAIN_AUX_F_TBT))
2620 #define TGL_AUX_G_TBT4_IO_POWER_DOMAINS ( \
2621 BIT_ULL(POWER_DOMAIN_AUX_G_TBT))
2622 #define TGL_AUX_H_TBT5_IO_POWER_DOMAINS ( \
2623 BIT_ULL(POWER_DOMAIN_AUX_H_TBT))
2624 #define TGL_AUX_I_TBT6_IO_POWER_DOMAINS ( \
2625 BIT_ULL(POWER_DOMAIN_AUX_I_TBT))
2627 static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
2628 .sync_hw = i9xx_power_well_sync_hw_noop,
2629 .enable = i9xx_always_on_power_well_noop,
2630 .disable = i9xx_always_on_power_well_noop,
2631 .is_enabled = i9xx_always_on_power_well_enabled,
2634 static const struct i915_power_well_ops chv_pipe_power_well_ops = {
2635 .sync_hw = i9xx_power_well_sync_hw_noop,
2636 .enable = chv_pipe_power_well_enable,
2637 .disable = chv_pipe_power_well_disable,
2638 .is_enabled = chv_pipe_power_well_enabled,
2641 static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
2642 .sync_hw = i9xx_power_well_sync_hw_noop,
2643 .enable = chv_dpio_cmn_power_well_enable,
2644 .disable = chv_dpio_cmn_power_well_disable,
2645 .is_enabled = vlv_power_well_enabled,
2648 static const struct i915_power_well_desc i9xx_always_on_power_well[] = {
2650 .name = "always-on",
2652 .domains = POWER_DOMAIN_MASK,
2653 .ops = &i9xx_always_on_power_well_ops,
2654 .id = DISP_PW_ID_NONE,
2658 static const struct i915_power_well_ops i830_pipes_power_well_ops = {
2659 .sync_hw = i830_pipes_power_well_sync_hw,
2660 .enable = i830_pipes_power_well_enable,
2661 .disable = i830_pipes_power_well_disable,
2662 .is_enabled = i830_pipes_power_well_enabled,
2665 static const struct i915_power_well_desc i830_power_wells[] = {
2667 .name = "always-on",
2669 .domains = POWER_DOMAIN_MASK,
2670 .ops = &i9xx_always_on_power_well_ops,
2671 .id = DISP_PW_ID_NONE,
2675 .domains = I830_PIPES_POWER_DOMAINS,
2676 .ops = &i830_pipes_power_well_ops,
2677 .id = DISP_PW_ID_NONE,
2681 static const struct i915_power_well_ops hsw_power_well_ops = {
2682 .sync_hw = hsw_power_well_sync_hw,
2683 .enable = hsw_power_well_enable,
2684 .disable = hsw_power_well_disable,
2685 .is_enabled = hsw_power_well_enabled,
2688 static const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
2689 .sync_hw = i9xx_power_well_sync_hw_noop,
2690 .enable = gen9_dc_off_power_well_enable,
2691 .disable = gen9_dc_off_power_well_disable,
2692 .is_enabled = gen9_dc_off_power_well_enabled,
2695 static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = {
2696 .sync_hw = i9xx_power_well_sync_hw_noop,
2697 .enable = bxt_dpio_cmn_power_well_enable,
2698 .disable = bxt_dpio_cmn_power_well_disable,
2699 .is_enabled = bxt_dpio_cmn_power_well_enabled,
2702 static const struct i915_power_well_regs hsw_power_well_regs = {
2703 .bios = HSW_PWR_WELL_CTL1,
2704 .driver = HSW_PWR_WELL_CTL2,
2705 .kvmr = HSW_PWR_WELL_CTL3,
2706 .debug = HSW_PWR_WELL_CTL4,
2709 static const struct i915_power_well_desc hsw_power_wells[] = {
2711 .name = "always-on",
2713 .domains = POWER_DOMAIN_MASK,
2714 .ops = &i9xx_always_on_power_well_ops,
2715 .id = DISP_PW_ID_NONE,
2719 .domains = HSW_DISPLAY_POWER_DOMAINS,
2720 .ops = &hsw_power_well_ops,
2721 .id = HSW_DISP_PW_GLOBAL,
2723 .hsw.regs = &hsw_power_well_regs,
2724 .hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
2725 .hsw.has_vga = true,
2730 static const struct i915_power_well_desc bdw_power_wells[] = {
2732 .name = "always-on",
2734 .domains = POWER_DOMAIN_MASK,
2735 .ops = &i9xx_always_on_power_well_ops,
2736 .id = DISP_PW_ID_NONE,
2740 .domains = BDW_DISPLAY_POWER_DOMAINS,
2741 .ops = &hsw_power_well_ops,
2742 .id = HSW_DISP_PW_GLOBAL,
2744 .hsw.regs = &hsw_power_well_regs,
2745 .hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
2746 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2747 .hsw.has_vga = true,
2752 static const struct i915_power_well_ops vlv_display_power_well_ops = {
2753 .sync_hw = i9xx_power_well_sync_hw_noop,
2754 .enable = vlv_display_power_well_enable,
2755 .disable = vlv_display_power_well_disable,
2756 .is_enabled = vlv_power_well_enabled,
2759 static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
2760 .sync_hw = i9xx_power_well_sync_hw_noop,
2761 .enable = vlv_dpio_cmn_power_well_enable,
2762 .disable = vlv_dpio_cmn_power_well_disable,
2763 .is_enabled = vlv_power_well_enabled,
2766 static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
2767 .sync_hw = i9xx_power_well_sync_hw_noop,
2768 .enable = vlv_power_well_enable,
2769 .disable = vlv_power_well_disable,
2770 .is_enabled = vlv_power_well_enabled,
2773 static const struct i915_power_well_desc vlv_power_wells[] = {
2775 .name = "always-on",
2777 .domains = POWER_DOMAIN_MASK,
2778 .ops = &i9xx_always_on_power_well_ops,
2779 .id = DISP_PW_ID_NONE,
2783 .domains = VLV_DISPLAY_POWER_DOMAINS,
2784 .ops = &vlv_display_power_well_ops,
2785 .id = VLV_DISP_PW_DISP2D,
2787 .vlv.idx = PUNIT_PWGT_IDX_DISP2D,
2791 .name = "dpio-tx-b-01",
2792 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2793 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2794 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2795 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2796 .ops = &vlv_dpio_power_well_ops,
2797 .id = DISP_PW_ID_NONE,
2799 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_01,
2803 .name = "dpio-tx-b-23",
2804 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2805 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2806 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2807 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2808 .ops = &vlv_dpio_power_well_ops,
2809 .id = DISP_PW_ID_NONE,
2811 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_23,
2815 .name = "dpio-tx-c-01",
2816 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2817 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2818 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2819 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2820 .ops = &vlv_dpio_power_well_ops,
2821 .id = DISP_PW_ID_NONE,
2823 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_01,
2827 .name = "dpio-tx-c-23",
2828 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
2829 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
2830 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
2831 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
2832 .ops = &vlv_dpio_power_well_ops,
2833 .id = DISP_PW_ID_NONE,
2835 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_23,
2839 .name = "dpio-common",
2840 .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
2841 .ops = &vlv_dpio_cmn_power_well_ops,
2842 .id = VLV_DISP_PW_DPIO_CMN_BC,
2844 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
2849 static const struct i915_power_well_desc chv_power_wells[] = {
2851 .name = "always-on",
2853 .domains = POWER_DOMAIN_MASK,
2854 .ops = &i9xx_always_on_power_well_ops,
2855 .id = DISP_PW_ID_NONE,
2860 * Pipe A power well is the new disp2d well. Pipe B and C
2861 * power wells don't actually exist. Pipe A power well is
2862 * required for any pipe to work.
2864 .domains = CHV_DISPLAY_POWER_DOMAINS,
2865 .ops = &chv_pipe_power_well_ops,
2866 .id = DISP_PW_ID_NONE,
2869 .name = "dpio-common-bc",
2870 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
2871 .ops = &chv_dpio_cmn_power_well_ops,
2872 .id = VLV_DISP_PW_DPIO_CMN_BC,
2874 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
2878 .name = "dpio-common-d",
2879 .domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
2880 .ops = &chv_dpio_cmn_power_well_ops,
2881 .id = CHV_DISP_PW_DPIO_CMN_D,
2883 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_D,
2888 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
2889 enum i915_power_well_id power_well_id)
2891 struct i915_power_well *power_well;
2894 power_well = lookup_power_well(dev_priv, power_well_id);
2895 ret = power_well->desc->ops->is_enabled(dev_priv, power_well);
2900 static const struct i915_power_well_desc skl_power_wells[] = {
2902 .name = "always-on",
2904 .domains = POWER_DOMAIN_MASK,
2905 .ops = &i9xx_always_on_power_well_ops,
2906 .id = DISP_PW_ID_NONE,
2909 .name = "power well 1",
2910 /* Handled by the DMC firmware */
2913 .ops = &hsw_power_well_ops,
2914 .id = SKL_DISP_PW_1,
2916 .hsw.regs = &hsw_power_well_regs,
2917 .hsw.idx = SKL_PW_CTL_IDX_PW_1,
2918 .hsw.has_fuses = true,
2922 .name = "MISC IO power well",
2923 /* Handled by the DMC firmware */
2926 .ops = &hsw_power_well_ops,
2927 .id = SKL_DISP_PW_MISC_IO,
2929 .hsw.regs = &hsw_power_well_regs,
2930 .hsw.idx = SKL_PW_CTL_IDX_MISC_IO,
2935 .domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS,
2936 .ops = &gen9_dc_off_power_well_ops,
2937 .id = DISP_PW_ID_NONE,
2940 .name = "power well 2",
2941 .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
2942 .ops = &hsw_power_well_ops,
2943 .id = SKL_DISP_PW_2,
2945 .hsw.regs = &hsw_power_well_regs,
2946 .hsw.idx = SKL_PW_CTL_IDX_PW_2,
2947 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
2948 .hsw.has_vga = true,
2949 .hsw.has_fuses = true,
2953 .name = "DDI A/E IO power well",
2954 .domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS,
2955 .ops = &hsw_power_well_ops,
2956 .id = DISP_PW_ID_NONE,
2958 .hsw.regs = &hsw_power_well_regs,
2959 .hsw.idx = SKL_PW_CTL_IDX_DDI_A_E,
2963 .name = "DDI B IO power well",
2964 .domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS,
2965 .ops = &hsw_power_well_ops,
2966 .id = DISP_PW_ID_NONE,
2968 .hsw.regs = &hsw_power_well_regs,
2969 .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
2973 .name = "DDI C IO power well",
2974 .domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS,
2975 .ops = &hsw_power_well_ops,
2976 .id = DISP_PW_ID_NONE,
2978 .hsw.regs = &hsw_power_well_regs,
2979 .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
2983 .name = "DDI D IO power well",
2984 .domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS,
2985 .ops = &hsw_power_well_ops,
2986 .id = DISP_PW_ID_NONE,
2988 .hsw.regs = &hsw_power_well_regs,
2989 .hsw.idx = SKL_PW_CTL_IDX_DDI_D,
2994 static const struct i915_power_well_desc bxt_power_wells[] = {
2996 .name = "always-on",
2998 .domains = POWER_DOMAIN_MASK,
2999 .ops = &i9xx_always_on_power_well_ops,
3000 .id = DISP_PW_ID_NONE,
3003 .name = "power well 1",
3004 /* Handled by the DMC firmware */
3007 .ops = &hsw_power_well_ops,
3008 .id = SKL_DISP_PW_1,
3010 .hsw.regs = &hsw_power_well_regs,
3011 .hsw.idx = SKL_PW_CTL_IDX_PW_1,
3012 .hsw.has_fuses = true,
3017 .domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS,
3018 .ops = &gen9_dc_off_power_well_ops,
3019 .id = DISP_PW_ID_NONE,
3022 .name = "power well 2",
3023 .domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3024 .ops = &hsw_power_well_ops,
3025 .id = SKL_DISP_PW_2,
3027 .hsw.regs = &hsw_power_well_regs,
3028 .hsw.idx = SKL_PW_CTL_IDX_PW_2,
3029 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3030 .hsw.has_vga = true,
3031 .hsw.has_fuses = true,
3035 .name = "dpio-common-a",
3036 .domains = BXT_DPIO_CMN_A_POWER_DOMAINS,
3037 .ops = &bxt_dpio_cmn_power_well_ops,
3038 .id = BXT_DISP_PW_DPIO_CMN_A,
3040 .bxt.phy = DPIO_PHY1,
3044 .name = "dpio-common-bc",
3045 .domains = BXT_DPIO_CMN_BC_POWER_DOMAINS,
3046 .ops = &bxt_dpio_cmn_power_well_ops,
3047 .id = VLV_DISP_PW_DPIO_CMN_BC,
3049 .bxt.phy = DPIO_PHY0,
3054 static const struct i915_power_well_desc glk_power_wells[] = {
3056 .name = "always-on",
3058 .domains = POWER_DOMAIN_MASK,
3059 .ops = &i9xx_always_on_power_well_ops,
3060 .id = DISP_PW_ID_NONE,
3063 .name = "power well 1",
3064 /* Handled by the DMC firmware */
3067 .ops = &hsw_power_well_ops,
3068 .id = SKL_DISP_PW_1,
3070 .hsw.regs = &hsw_power_well_regs,
3071 .hsw.idx = SKL_PW_CTL_IDX_PW_1,
3072 .hsw.has_fuses = true,
3077 .domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS,
3078 .ops = &gen9_dc_off_power_well_ops,
3079 .id = DISP_PW_ID_NONE,
3082 .name = "power well 2",
3083 .domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3084 .ops = &hsw_power_well_ops,
3085 .id = SKL_DISP_PW_2,
3087 .hsw.regs = &hsw_power_well_regs,
3088 .hsw.idx = SKL_PW_CTL_IDX_PW_2,
3089 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3090 .hsw.has_vga = true,
3091 .hsw.has_fuses = true,
3095 .name = "dpio-common-a",
3096 .domains = GLK_DPIO_CMN_A_POWER_DOMAINS,
3097 .ops = &bxt_dpio_cmn_power_well_ops,
3098 .id = BXT_DISP_PW_DPIO_CMN_A,
3100 .bxt.phy = DPIO_PHY1,
3104 .name = "dpio-common-b",
3105 .domains = GLK_DPIO_CMN_B_POWER_DOMAINS,
3106 .ops = &bxt_dpio_cmn_power_well_ops,
3107 .id = VLV_DISP_PW_DPIO_CMN_BC,
3109 .bxt.phy = DPIO_PHY0,
3113 .name = "dpio-common-c",
3114 .domains = GLK_DPIO_CMN_C_POWER_DOMAINS,
3115 .ops = &bxt_dpio_cmn_power_well_ops,
3116 .id = GLK_DISP_PW_DPIO_CMN_C,
3118 .bxt.phy = DPIO_PHY2,
3123 .domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS,
3124 .ops = &hsw_power_well_ops,
3125 .id = DISP_PW_ID_NONE,
3127 .hsw.regs = &hsw_power_well_regs,
3128 .hsw.idx = GLK_PW_CTL_IDX_AUX_A,
3133 .domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS,
3134 .ops = &hsw_power_well_ops,
3135 .id = DISP_PW_ID_NONE,
3137 .hsw.regs = &hsw_power_well_regs,
3138 .hsw.idx = GLK_PW_CTL_IDX_AUX_B,
3143 .domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS,
3144 .ops = &hsw_power_well_ops,
3145 .id = DISP_PW_ID_NONE,
3147 .hsw.regs = &hsw_power_well_regs,
3148 .hsw.idx = GLK_PW_CTL_IDX_AUX_C,
3152 .name = "DDI A IO power well",
3153 .domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS,
3154 .ops = &hsw_power_well_ops,
3155 .id = DISP_PW_ID_NONE,
3157 .hsw.regs = &hsw_power_well_regs,
3158 .hsw.idx = GLK_PW_CTL_IDX_DDI_A,
3162 .name = "DDI B IO power well",
3163 .domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS,
3164 .ops = &hsw_power_well_ops,
3165 .id = DISP_PW_ID_NONE,
3167 .hsw.regs = &hsw_power_well_regs,
3168 .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
3172 .name = "DDI C IO power well",
3173 .domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS,
3174 .ops = &hsw_power_well_ops,
3175 .id = DISP_PW_ID_NONE,
3177 .hsw.regs = &hsw_power_well_regs,
3178 .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
3183 static const struct i915_power_well_desc cnl_power_wells[] = {
3185 .name = "always-on",
3187 .domains = POWER_DOMAIN_MASK,
3188 .ops = &i9xx_always_on_power_well_ops,
3189 .id = DISP_PW_ID_NONE,
3192 .name = "power well 1",
3193 /* Handled by the DMC firmware */
3196 .ops = &hsw_power_well_ops,
3197 .id = SKL_DISP_PW_1,
3199 .hsw.regs = &hsw_power_well_regs,
3200 .hsw.idx = SKL_PW_CTL_IDX_PW_1,
3201 .hsw.has_fuses = true,
3206 .domains = CNL_DISPLAY_AUX_A_POWER_DOMAINS,
3207 .ops = &hsw_power_well_ops,
3208 .id = DISP_PW_ID_NONE,
3210 .hsw.regs = &hsw_power_well_regs,
3211 .hsw.idx = GLK_PW_CTL_IDX_AUX_A,
3216 .domains = CNL_DISPLAY_AUX_B_POWER_DOMAINS,
3217 .ops = &hsw_power_well_ops,
3218 .id = DISP_PW_ID_NONE,
3220 .hsw.regs = &hsw_power_well_regs,
3221 .hsw.idx = GLK_PW_CTL_IDX_AUX_B,
3226 .domains = CNL_DISPLAY_AUX_C_POWER_DOMAINS,
3227 .ops = &hsw_power_well_ops,
3228 .id = DISP_PW_ID_NONE,
3230 .hsw.regs = &hsw_power_well_regs,
3231 .hsw.idx = GLK_PW_CTL_IDX_AUX_C,
3236 .domains = CNL_DISPLAY_AUX_D_POWER_DOMAINS,
3237 .ops = &hsw_power_well_ops,
3238 .id = DISP_PW_ID_NONE,
3240 .hsw.regs = &hsw_power_well_regs,
3241 .hsw.idx = CNL_PW_CTL_IDX_AUX_D,
3246 .domains = CNL_DISPLAY_DC_OFF_POWER_DOMAINS,
3247 .ops = &gen9_dc_off_power_well_ops,
3248 .id = DISP_PW_ID_NONE,
3251 .name = "power well 2",
3252 .domains = CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
3253 .ops = &hsw_power_well_ops,
3254 .id = SKL_DISP_PW_2,
3256 .hsw.regs = &hsw_power_well_regs,
3257 .hsw.idx = SKL_PW_CTL_IDX_PW_2,
3258 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
3259 .hsw.has_vga = true,
3260 .hsw.has_fuses = true,
3264 .name = "DDI A IO power well",
3265 .domains = CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS,
3266 .ops = &hsw_power_well_ops,
3267 .id = DISP_PW_ID_NONE,
3269 .hsw.regs = &hsw_power_well_regs,
3270 .hsw.idx = GLK_PW_CTL_IDX_DDI_A,
3274 .name = "DDI B IO power well",
3275 .domains = CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS,
3276 .ops = &hsw_power_well_ops,
3277 .id = DISP_PW_ID_NONE,
3279 .hsw.regs = &hsw_power_well_regs,
3280 .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
3284 .name = "DDI C IO power well",
3285 .domains = CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS,
3286 .ops = &hsw_power_well_ops,
3287 .id = DISP_PW_ID_NONE,
3289 .hsw.regs = &hsw_power_well_regs,
3290 .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
3294 .name = "DDI D IO power well",
3295 .domains = CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS,
3296 .ops = &hsw_power_well_ops,
3297 .id = DISP_PW_ID_NONE,
3299 .hsw.regs = &hsw_power_well_regs,
3300 .hsw.idx = SKL_PW_CTL_IDX_DDI_D,
3304 .name = "DDI F IO power well",
3305 .domains = CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS,
3306 .ops = &hsw_power_well_ops,
3307 .id = DISP_PW_ID_NONE,
3309 .hsw.regs = &hsw_power_well_regs,
3310 .hsw.idx = CNL_PW_CTL_IDX_DDI_F,
3315 .domains = CNL_DISPLAY_AUX_F_POWER_DOMAINS,
3316 .ops = &hsw_power_well_ops,
3317 .id = DISP_PW_ID_NONE,
3319 .hsw.regs = &hsw_power_well_regs,
3320 .hsw.idx = CNL_PW_CTL_IDX_AUX_F,
3325 static const struct i915_power_well_ops icl_combo_phy_aux_power_well_ops = {
3326 .sync_hw = hsw_power_well_sync_hw,
3327 .enable = icl_combo_phy_aux_power_well_enable,
3328 .disable = icl_combo_phy_aux_power_well_disable,
3329 .is_enabled = hsw_power_well_enabled,
3332 static const struct i915_power_well_ops icl_tc_phy_aux_power_well_ops = {
3333 .sync_hw = hsw_power_well_sync_hw,
3334 .enable = icl_tc_phy_aux_power_well_enable,
3335 .disable = icl_tc_phy_aux_power_well_disable,
3336 .is_enabled = hsw_power_well_enabled,
3339 static const struct i915_power_well_regs icl_aux_power_well_regs = {
3340 .bios = ICL_PWR_WELL_CTL_AUX1,
3341 .driver = ICL_PWR_WELL_CTL_AUX2,
3342 .debug = ICL_PWR_WELL_CTL_AUX4,
3345 static const struct i915_power_well_regs icl_ddi_power_well_regs = {
3346 .bios = ICL_PWR_WELL_CTL_DDI1,
3347 .driver = ICL_PWR_WELL_CTL_DDI2,
3348 .debug = ICL_PWR_WELL_CTL_DDI4,
3351 static const struct i915_power_well_desc icl_power_wells[] = {
3353 .name = "always-on",
3355 .domains = POWER_DOMAIN_MASK,
3356 .ops = &i9xx_always_on_power_well_ops,
3357 .id = DISP_PW_ID_NONE,
3360 .name = "power well 1",
3361 /* Handled by the DMC firmware */
3364 .ops = &hsw_power_well_ops,
3365 .id = SKL_DISP_PW_1,
3367 .hsw.regs = &hsw_power_well_regs,
3368 .hsw.idx = ICL_PW_CTL_IDX_PW_1,
3369 .hsw.has_fuses = true,
3374 .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
3375 .ops = &gen9_dc_off_power_well_ops,
3376 .id = DISP_PW_ID_NONE,
3379 .name = "power well 2",
3380 .domains = ICL_PW_2_POWER_DOMAINS,
3381 .ops = &hsw_power_well_ops,
3382 .id = SKL_DISP_PW_2,
3384 .hsw.regs = &hsw_power_well_regs,
3385 .hsw.idx = ICL_PW_CTL_IDX_PW_2,
3386 .hsw.has_fuses = true,
3390 .name = "power well 3",
3391 .domains = ICL_PW_3_POWER_DOMAINS,
3392 .ops = &hsw_power_well_ops,
3393 .id = DISP_PW_ID_NONE,
3395 .hsw.regs = &hsw_power_well_regs,
3396 .hsw.idx = ICL_PW_CTL_IDX_PW_3,
3397 .hsw.irq_pipe_mask = BIT(PIPE_B),
3398 .hsw.has_vga = true,
3399 .hsw.has_fuses = true,
3404 .domains = ICL_DDI_IO_A_POWER_DOMAINS,
3405 .ops = &hsw_power_well_ops,
3406 .id = DISP_PW_ID_NONE,
3408 .hsw.regs = &icl_ddi_power_well_regs,
3409 .hsw.idx = ICL_PW_CTL_IDX_DDI_A,
3414 .domains = ICL_DDI_IO_B_POWER_DOMAINS,
3415 .ops = &hsw_power_well_ops,
3416 .id = DISP_PW_ID_NONE,
3418 .hsw.regs = &icl_ddi_power_well_regs,
3419 .hsw.idx = ICL_PW_CTL_IDX_DDI_B,
3424 .domains = ICL_DDI_IO_C_POWER_DOMAINS,
3425 .ops = &hsw_power_well_ops,
3426 .id = DISP_PW_ID_NONE,
3428 .hsw.regs = &icl_ddi_power_well_regs,
3429 .hsw.idx = ICL_PW_CTL_IDX_DDI_C,
3434 .domains = ICL_DDI_IO_D_POWER_DOMAINS,
3435 .ops = &hsw_power_well_ops,
3436 .id = DISP_PW_ID_NONE,
3438 .hsw.regs = &icl_ddi_power_well_regs,
3439 .hsw.idx = ICL_PW_CTL_IDX_DDI_D,
3444 .domains = ICL_DDI_IO_E_POWER_DOMAINS,
3445 .ops = &hsw_power_well_ops,
3446 .id = DISP_PW_ID_NONE,
3448 .hsw.regs = &icl_ddi_power_well_regs,
3449 .hsw.idx = ICL_PW_CTL_IDX_DDI_E,
3454 .domains = ICL_DDI_IO_F_POWER_DOMAINS,
3455 .ops = &hsw_power_well_ops,
3456 .id = DISP_PW_ID_NONE,
3458 .hsw.regs = &icl_ddi_power_well_regs,
3459 .hsw.idx = ICL_PW_CTL_IDX_DDI_F,
3464 .domains = ICL_AUX_A_IO_POWER_DOMAINS,
3465 .ops = &icl_combo_phy_aux_power_well_ops,
3466 .id = DISP_PW_ID_NONE,
3468 .hsw.regs = &icl_aux_power_well_regs,
3469 .hsw.idx = ICL_PW_CTL_IDX_AUX_A,
3474 .domains = ICL_AUX_B_IO_POWER_DOMAINS,
3475 .ops = &icl_combo_phy_aux_power_well_ops,
3476 .id = DISP_PW_ID_NONE,
3478 .hsw.regs = &icl_aux_power_well_regs,
3479 .hsw.idx = ICL_PW_CTL_IDX_AUX_B,
3483 .name = "AUX C TC1",
3484 .domains = ICL_AUX_C_TC1_IO_POWER_DOMAINS,
3485 .ops = &icl_tc_phy_aux_power_well_ops,
3486 .id = DISP_PW_ID_NONE,
3488 .hsw.regs = &icl_aux_power_well_regs,
3489 .hsw.idx = ICL_PW_CTL_IDX_AUX_C,
3490 .hsw.is_tc_tbt = false,
3494 .name = "AUX D TC2",
3495 .domains = ICL_AUX_D_TC2_IO_POWER_DOMAINS,
3496 .ops = &icl_tc_phy_aux_power_well_ops,
3497 .id = DISP_PW_ID_NONE,
3499 .hsw.regs = &icl_aux_power_well_regs,
3500 .hsw.idx = ICL_PW_CTL_IDX_AUX_D,
3501 .hsw.is_tc_tbt = false,
3505 .name = "AUX E TC3",
3506 .domains = ICL_AUX_E_TC3_IO_POWER_DOMAINS,
3507 .ops = &icl_tc_phy_aux_power_well_ops,
3508 .id = DISP_PW_ID_NONE,
3510 .hsw.regs = &icl_aux_power_well_regs,
3511 .hsw.idx = ICL_PW_CTL_IDX_AUX_E,
3512 .hsw.is_tc_tbt = false,
3516 .name = "AUX F TC4",
3517 .domains = ICL_AUX_F_TC4_IO_POWER_DOMAINS,
3518 .ops = &icl_tc_phy_aux_power_well_ops,
3519 .id = DISP_PW_ID_NONE,
3521 .hsw.regs = &icl_aux_power_well_regs,
3522 .hsw.idx = ICL_PW_CTL_IDX_AUX_F,
3523 .hsw.is_tc_tbt = false,
3527 .name = "AUX C TBT1",
3528 .domains = ICL_AUX_C_TBT1_IO_POWER_DOMAINS,
3529 .ops = &icl_tc_phy_aux_power_well_ops,
3530 .id = DISP_PW_ID_NONE,
3532 .hsw.regs = &icl_aux_power_well_regs,
3533 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1,
3534 .hsw.is_tc_tbt = true,
3538 .name = "AUX D TBT2",
3539 .domains = ICL_AUX_D_TBT2_IO_POWER_DOMAINS,
3540 .ops = &icl_tc_phy_aux_power_well_ops,
3541 .id = DISP_PW_ID_NONE,
3543 .hsw.regs = &icl_aux_power_well_regs,
3544 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2,
3545 .hsw.is_tc_tbt = true,
3549 .name = "AUX E TBT3",
3550 .domains = ICL_AUX_E_TBT3_IO_POWER_DOMAINS,
3551 .ops = &icl_tc_phy_aux_power_well_ops,
3552 .id = DISP_PW_ID_NONE,
3554 .hsw.regs = &icl_aux_power_well_regs,
3555 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3,
3556 .hsw.is_tc_tbt = true,
3560 .name = "AUX F TBT4",
3561 .domains = ICL_AUX_F_TBT4_IO_POWER_DOMAINS,
3562 .ops = &icl_tc_phy_aux_power_well_ops,
3563 .id = DISP_PW_ID_NONE,
3565 .hsw.regs = &icl_aux_power_well_regs,
3566 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4,
3567 .hsw.is_tc_tbt = true,
3571 .name = "power well 4",
3572 .domains = ICL_PW_4_POWER_DOMAINS,
3573 .ops = &hsw_power_well_ops,
3574 .id = DISP_PW_ID_NONE,
3576 .hsw.regs = &hsw_power_well_regs,
3577 .hsw.idx = ICL_PW_CTL_IDX_PW_4,
3578 .hsw.has_fuses = true,
3579 .hsw.irq_pipe_mask = BIT(PIPE_C),
3584 static const struct i915_power_well_desc tgl_power_wells[] = {
3586 .name = "always-on",
3588 .domains = POWER_DOMAIN_MASK,
3589 .ops = &i9xx_always_on_power_well_ops,
3590 .id = DISP_PW_ID_NONE,
3593 .name = "power well 1",
3594 /* Handled by the DMC firmware */
3597 .ops = &hsw_power_well_ops,
3598 .id = SKL_DISP_PW_1,
3600 .hsw.regs = &hsw_power_well_regs,
3601 .hsw.idx = ICL_PW_CTL_IDX_PW_1,
3602 .hsw.has_fuses = true,
3607 .domains = TGL_DISPLAY_DC_OFF_POWER_DOMAINS,
3608 .ops = &gen9_dc_off_power_well_ops,
3609 .id = DISP_PW_ID_NONE,
3612 .name = "power well 2",
3613 .domains = TGL_PW_2_POWER_DOMAINS,
3614 .ops = &hsw_power_well_ops,
3615 .id = SKL_DISP_PW_2,
3617 .hsw.regs = &hsw_power_well_regs,
3618 .hsw.idx = ICL_PW_CTL_IDX_PW_2,
3619 .hsw.has_fuses = true,
3623 .name = "power well 3",
3624 .domains = TGL_PW_3_POWER_DOMAINS,
3625 .ops = &hsw_power_well_ops,
3626 .id = DISP_PW_ID_NONE,
3628 .hsw.regs = &hsw_power_well_regs,
3629 .hsw.idx = ICL_PW_CTL_IDX_PW_3,
3630 .hsw.irq_pipe_mask = BIT(PIPE_B),
3631 .hsw.has_vga = true,
3632 .hsw.has_fuses = true,
3637 .domains = ICL_DDI_IO_A_POWER_DOMAINS,
3638 .ops = &hsw_power_well_ops,
3639 .id = DISP_PW_ID_NONE,
3641 .hsw.regs = &icl_ddi_power_well_regs,
3642 .hsw.idx = ICL_PW_CTL_IDX_DDI_A,
3647 .domains = ICL_DDI_IO_B_POWER_DOMAINS,
3648 .ops = &hsw_power_well_ops,
3649 .id = DISP_PW_ID_NONE,
3651 .hsw.regs = &icl_ddi_power_well_regs,
3652 .hsw.idx = ICL_PW_CTL_IDX_DDI_B,
3657 .domains = ICL_DDI_IO_C_POWER_DOMAINS,
3658 .ops = &hsw_power_well_ops,
3659 .id = DISP_PW_ID_NONE,
3661 .hsw.regs = &icl_ddi_power_well_regs,
3662 .hsw.idx = ICL_PW_CTL_IDX_DDI_C,
3666 .name = "DDI D TC1 IO",
3667 .domains = TGL_DDI_IO_D_TC1_POWER_DOMAINS,
3668 .ops = &hsw_power_well_ops,
3669 .id = DISP_PW_ID_NONE,
3671 .hsw.regs = &icl_ddi_power_well_regs,
3672 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1,
3676 .name = "DDI E TC2 IO",
3677 .domains = TGL_DDI_IO_E_TC2_POWER_DOMAINS,
3678 .ops = &hsw_power_well_ops,
3679 .id = DISP_PW_ID_NONE,
3681 .hsw.regs = &icl_ddi_power_well_regs,
3682 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2,
3686 .name = "DDI F TC3 IO",
3687 .domains = TGL_DDI_IO_F_TC3_POWER_DOMAINS,
3688 .ops = &hsw_power_well_ops,
3689 .id = DISP_PW_ID_NONE,
3691 .hsw.regs = &icl_ddi_power_well_regs,
3692 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC3,
3696 .name = "DDI G TC4 IO",
3697 .domains = TGL_DDI_IO_G_TC4_POWER_DOMAINS,
3698 .ops = &hsw_power_well_ops,
3699 .id = DISP_PW_ID_NONE,
3701 .hsw.regs = &icl_ddi_power_well_regs,
3702 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC4,
3706 .name = "DDI H TC5 IO",
3707 .domains = TGL_DDI_IO_H_TC5_POWER_DOMAINS,
3708 .ops = &hsw_power_well_ops,
3709 .id = DISP_PW_ID_NONE,
3711 .hsw.regs = &icl_ddi_power_well_regs,
3712 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC5,
3716 .name = "DDI I TC6 IO",
3717 .domains = TGL_DDI_IO_I_TC6_POWER_DOMAINS,
3718 .ops = &hsw_power_well_ops,
3719 .id = DISP_PW_ID_NONE,
3721 .hsw.regs = &icl_ddi_power_well_regs,
3722 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC6,
3727 .domains = TGL_AUX_A_IO_POWER_DOMAINS,
3728 .ops = &icl_combo_phy_aux_power_well_ops,
3729 .id = DISP_PW_ID_NONE,
3731 .hsw.regs = &icl_aux_power_well_regs,
3732 .hsw.idx = ICL_PW_CTL_IDX_AUX_A,
3737 .domains = TGL_AUX_B_IO_POWER_DOMAINS,
3738 .ops = &icl_combo_phy_aux_power_well_ops,
3739 .id = DISP_PW_ID_NONE,
3741 .hsw.regs = &icl_aux_power_well_regs,
3742 .hsw.idx = ICL_PW_CTL_IDX_AUX_B,
3747 .domains = TGL_AUX_C_IO_POWER_DOMAINS,
3748 .ops = &icl_combo_phy_aux_power_well_ops,
3749 .id = DISP_PW_ID_NONE,
3751 .hsw.regs = &icl_aux_power_well_regs,
3752 .hsw.idx = ICL_PW_CTL_IDX_AUX_C,
3756 .name = "AUX D TC1",
3757 .domains = TGL_AUX_D_TC1_IO_POWER_DOMAINS,
3758 .ops = &icl_tc_phy_aux_power_well_ops,
3759 .id = DISP_PW_ID_NONE,
3761 .hsw.regs = &icl_aux_power_well_regs,
3762 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1,
3763 .hsw.is_tc_tbt = false,
3767 .name = "AUX E TC2",
3768 .domains = TGL_AUX_E_TC2_IO_POWER_DOMAINS,
3769 .ops = &icl_tc_phy_aux_power_well_ops,
3770 .id = DISP_PW_ID_NONE,
3772 .hsw.regs = &icl_aux_power_well_regs,
3773 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2,
3774 .hsw.is_tc_tbt = false,
3778 .name = "AUX F TC3",
3779 .domains = TGL_AUX_F_TC3_IO_POWER_DOMAINS,
3780 .ops = &icl_tc_phy_aux_power_well_ops,
3781 .id = DISP_PW_ID_NONE,
3783 .hsw.regs = &icl_aux_power_well_regs,
3784 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC3,
3785 .hsw.is_tc_tbt = false,
3789 .name = "AUX G TC4",
3790 .domains = TGL_AUX_G_TC4_IO_POWER_DOMAINS,
3791 .ops = &icl_tc_phy_aux_power_well_ops,
3792 .id = DISP_PW_ID_NONE,
3794 .hsw.regs = &icl_aux_power_well_regs,
3795 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC4,
3796 .hsw.is_tc_tbt = false,
3800 .name = "AUX H TC5",
3801 .domains = TGL_AUX_H_TC5_IO_POWER_DOMAINS,
3802 .ops = &icl_tc_phy_aux_power_well_ops,
3803 .id = DISP_PW_ID_NONE,
3805 .hsw.regs = &icl_aux_power_well_regs,
3806 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC5,
3807 .hsw.is_tc_tbt = false,
3811 .name = "AUX I TC6",
3812 .domains = TGL_AUX_I_TC6_IO_POWER_DOMAINS,
3813 .ops = &icl_tc_phy_aux_power_well_ops,
3814 .id = DISP_PW_ID_NONE,
3816 .hsw.regs = &icl_aux_power_well_regs,
3817 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC6,
3818 .hsw.is_tc_tbt = false,
3822 .name = "AUX D TBT1",
3823 .domains = TGL_AUX_D_TBT1_IO_POWER_DOMAINS,
3824 .ops = &hsw_power_well_ops,
3825 .id = DISP_PW_ID_NONE,
3827 .hsw.regs = &icl_aux_power_well_regs,
3828 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1,
3829 .hsw.is_tc_tbt = true,
3833 .name = "AUX E TBT2",
3834 .domains = TGL_AUX_E_TBT2_IO_POWER_DOMAINS,
3835 .ops = &hsw_power_well_ops,
3836 .id = DISP_PW_ID_NONE,
3838 .hsw.regs = &icl_aux_power_well_regs,
3839 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2,
3840 .hsw.is_tc_tbt = true,
3844 .name = "AUX F TBT3",
3845 .domains = TGL_AUX_F_TBT3_IO_POWER_DOMAINS,
3846 .ops = &hsw_power_well_ops,
3847 .id = DISP_PW_ID_NONE,
3849 .hsw.regs = &icl_aux_power_well_regs,
3850 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3,
3851 .hsw.is_tc_tbt = true,
3855 .name = "AUX G TBT4",
3856 .domains = TGL_AUX_G_TBT4_IO_POWER_DOMAINS,
3857 .ops = &hsw_power_well_ops,
3858 .id = DISP_PW_ID_NONE,
3860 .hsw.regs = &icl_aux_power_well_regs,
3861 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4,
3862 .hsw.is_tc_tbt = true,
3866 .name = "AUX H TBT5",
3867 .domains = TGL_AUX_H_TBT5_IO_POWER_DOMAINS,
3868 .ops = &hsw_power_well_ops,
3869 .id = DISP_PW_ID_NONE,
3871 .hsw.regs = &icl_aux_power_well_regs,
3872 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT5,
3873 .hsw.is_tc_tbt = true,
3877 .name = "AUX I TBT6",
3878 .domains = TGL_AUX_I_TBT6_IO_POWER_DOMAINS,
3879 .ops = &hsw_power_well_ops,
3880 .id = DISP_PW_ID_NONE,
3882 .hsw.regs = &icl_aux_power_well_regs,
3883 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT6,
3884 .hsw.is_tc_tbt = true,
3888 .name = "power well 4",
3889 .domains = TGL_PW_4_POWER_DOMAINS,
3890 .ops = &hsw_power_well_ops,
3891 .id = DISP_PW_ID_NONE,
3893 .hsw.regs = &hsw_power_well_regs,
3894 .hsw.idx = ICL_PW_CTL_IDX_PW_4,
3895 .hsw.has_fuses = true,
3896 .hsw.irq_pipe_mask = BIT(PIPE_C),
3900 .name = "power well 5",
3901 .domains = TGL_PW_5_POWER_DOMAINS,
3902 .ops = &hsw_power_well_ops,
3903 .id = DISP_PW_ID_NONE,
3905 .hsw.regs = &hsw_power_well_regs,
3906 .hsw.idx = TGL_PW_CTL_IDX_PW_5,
3907 .hsw.has_fuses = true,
3908 .hsw.irq_pipe_mask = BIT(PIPE_D),
3914 sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
3915 int disable_power_well)
3917 if (disable_power_well >= 0)
3918 return !!disable_power_well;
3923 static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
3930 if (INTEL_GEN(dev_priv) >= 11) {
3933 * DC9 has a separate HW flow from the rest of the DC states,
3934 * not depending on the DMC firmware. It's needed by system
3935 * suspend/resume, so allow it unconditionally.
3937 mask = DC_STATE_EN_DC9;
3938 } else if (IS_GEN(dev_priv, 10) || IS_GEN9_BC(dev_priv)) {
3941 } else if (IS_GEN9_LP(dev_priv)) {
3943 mask = DC_STATE_EN_DC9;
3949 if (!i915_modparams.disable_power_well)
3952 if (enable_dc >= 0 && enable_dc <= max_dc) {
3953 requested_dc = enable_dc;
3954 } else if (enable_dc == -1) {
3955 requested_dc = max_dc;
3956 } else if (enable_dc > max_dc && enable_dc <= 2) {
3957 DRM_DEBUG_KMS("Adjusting requested max DC state (%d->%d)\n",
3959 requested_dc = max_dc;
3961 DRM_ERROR("Unexpected value for enable_dc (%d)\n", enable_dc);
3962 requested_dc = max_dc;
3965 if (requested_dc > 1)
3966 mask |= DC_STATE_EN_UPTO_DC6;
3967 if (requested_dc > 0)
3968 mask |= DC_STATE_EN_UPTO_DC5;
3970 DRM_DEBUG_KMS("Allowed DC state mask %02x\n", mask);
3976 __set_power_wells(struct i915_power_domains *power_domains,
3977 const struct i915_power_well_desc *power_well_descs,
3978 int power_well_count)
3980 u64 power_well_ids = 0;
3983 power_domains->power_well_count = power_well_count;
3984 power_domains->power_wells =
3985 kcalloc(power_well_count,
3986 sizeof(*power_domains->power_wells),
3988 if (!power_domains->power_wells)
3991 for (i = 0; i < power_well_count; i++) {
3992 enum i915_power_well_id id = power_well_descs[i].id;
3994 power_domains->power_wells[i].desc = &power_well_descs[i];
3996 if (id == DISP_PW_ID_NONE)
3999 WARN_ON(id >= sizeof(power_well_ids) * 8);
4000 WARN_ON(power_well_ids & BIT_ULL(id));
4001 power_well_ids |= BIT_ULL(id);
4007 #define set_power_wells(power_domains, __power_well_descs) \
4008 __set_power_wells(power_domains, __power_well_descs, \
4009 ARRAY_SIZE(__power_well_descs))
4012 * intel_power_domains_init - initializes the power domain structures
4013 * @dev_priv: i915 device instance
4015 * Initializes the power domain structures for @dev_priv depending upon the
4016 * supported platform.
4018 int intel_power_domains_init(struct drm_i915_private *dev_priv)
4020 struct i915_power_domains *power_domains = &dev_priv->power_domains;
4023 i915_modparams.disable_power_well =
4024 sanitize_disable_power_well_option(dev_priv,
4025 i915_modparams.disable_power_well);
4026 dev_priv->csr.allowed_dc_mask =
4027 get_allowed_dc_mask(dev_priv, i915_modparams.enable_dc);
4029 BUILD_BUG_ON(POWER_DOMAIN_NUM > 64);
4031 mutex_init(&power_domains->lock);
4033 INIT_DELAYED_WORK(&power_domains->async_put_work,
4034 intel_display_power_put_async_work);
4037 * The enabling order will be from lower to higher indexed wells,
4038 * the disabling order is reversed.
4040 if (IS_GEN(dev_priv, 12)) {
4041 err = set_power_wells(power_domains, tgl_power_wells);
4042 } else if (IS_GEN(dev_priv, 11)) {
4043 err = set_power_wells(power_domains, icl_power_wells);
4044 } else if (IS_CANNONLAKE(dev_priv)) {
4045 err = set_power_wells(power_domains, cnl_power_wells);
4048 * DDI and Aux IO are getting enabled for all ports
4049 * regardless the presence or use. So, in order to avoid
4050 * timeouts, lets remove them from the list
4051 * for the SKUs without port F.
4053 if (!IS_CNL_WITH_PORT_F(dev_priv))
4054 power_domains->power_well_count -= 2;
4055 } else if (IS_GEMINILAKE(dev_priv)) {
4056 err = set_power_wells(power_domains, glk_power_wells);
4057 } else if (IS_BROXTON(dev_priv)) {
4058 err = set_power_wells(power_domains, bxt_power_wells);
4059 } else if (IS_GEN9_BC(dev_priv)) {
4060 err = set_power_wells(power_domains, skl_power_wells);
4061 } else if (IS_CHERRYVIEW(dev_priv)) {
4062 err = set_power_wells(power_domains, chv_power_wells);
4063 } else if (IS_BROADWELL(dev_priv)) {
4064 err = set_power_wells(power_domains, bdw_power_wells);
4065 } else if (IS_HASWELL(dev_priv)) {
4066 err = set_power_wells(power_domains, hsw_power_wells);
4067 } else if (IS_VALLEYVIEW(dev_priv)) {
4068 err = set_power_wells(power_domains, vlv_power_wells);
4069 } else if (IS_I830(dev_priv)) {
4070 err = set_power_wells(power_domains, i830_power_wells);
4072 err = set_power_wells(power_domains, i9xx_always_on_power_well);
4079 * intel_power_domains_cleanup - clean up power domains resources
4080 * @dev_priv: i915 device instance
4082 * Release any resources acquired by intel_power_domains_init()
4084 void intel_power_domains_cleanup(struct drm_i915_private *dev_priv)
4086 kfree(dev_priv->power_domains.power_wells);
4089 static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
4091 struct i915_power_domains *power_domains = &dev_priv->power_domains;
4092 struct i915_power_well *power_well;
4094 mutex_lock(&power_domains->lock);
4095 for_each_power_well(dev_priv, power_well) {
4096 power_well->desc->ops->sync_hw(dev_priv, power_well);
4097 power_well->hw_enabled =
4098 power_well->desc->ops->is_enabled(dev_priv, power_well);
4100 mutex_unlock(&power_domains->lock);
4104 bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv,
4105 i915_reg_t reg, bool enable)
4109 val = I915_READ(reg);
4110 val = enable ? (val | DBUF_POWER_REQUEST) : (val & ~DBUF_POWER_REQUEST);
4111 I915_WRITE(reg, val);
4115 status = I915_READ(reg) & DBUF_POWER_STATE;
4116 if ((enable && !status) || (!enable && status)) {
4117 DRM_ERROR("DBus power %s timeout!\n",
4118 enable ? "enable" : "disable");
4124 static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
4126 intel_dbuf_slice_set(dev_priv, DBUF_CTL, true);
4129 static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
4131 intel_dbuf_slice_set(dev_priv, DBUF_CTL, false);
4134 static u8 intel_dbuf_max_slices(struct drm_i915_private *dev_priv)
4136 if (INTEL_GEN(dev_priv) < 11)
4141 void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
4144 const u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
4147 if (req_slices > intel_dbuf_max_slices(dev_priv)) {
4148 DRM_ERROR("Invalid number of dbuf slices requested\n");
4152 if (req_slices == hw_enabled_slices || req_slices == 0)
4155 if (req_slices > hw_enabled_slices)
4156 ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, true);
4158 ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, false);
4161 dev_priv->wm.skl_hw.ddb.enabled_slices = req_slices;
4164 static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
4166 I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) | DBUF_POWER_REQUEST);
4167 I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) | DBUF_POWER_REQUEST);
4168 POSTING_READ(DBUF_CTL_S2);
4172 if (!(I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
4173 !(I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
4174 DRM_ERROR("DBuf power enable timeout\n");
4177 * FIXME: for now pretend that we only have 1 slice, see
4178 * intel_enabled_dbuf_slices_num().
4180 dev_priv->wm.skl_hw.ddb.enabled_slices = 1;
4183 static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
4185 I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) & ~DBUF_POWER_REQUEST);
4186 I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) & ~DBUF_POWER_REQUEST);
4187 POSTING_READ(DBUF_CTL_S2);
4191 if ((I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
4192 (I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
4193 DRM_ERROR("DBuf power disable timeout!\n");
4196 * FIXME: for now pretend that the first slice is always
4197 * enabled, see intel_enabled_dbuf_slices_num().
4199 dev_priv->wm.skl_hw.ddb.enabled_slices = 1;
4202 static void icl_mbus_init(struct drm_i915_private *dev_priv)
4206 val = MBUS_ABOX_BT_CREDIT_POOL1(16) |
4207 MBUS_ABOX_BT_CREDIT_POOL2(16) |
4208 MBUS_ABOX_B_CREDIT(1) |
4209 MBUS_ABOX_BW_CREDIT(1);
4211 I915_WRITE(MBUS_ABOX_CTL, val);
4214 static void hsw_assert_cdclk(struct drm_i915_private *dev_priv)
4216 u32 val = I915_READ(LCPLL_CTL);
4219 * The LCPLL register should be turned on by the BIOS. For now
4220 * let's just check its state and print errors in case
4221 * something is wrong. Don't even try to turn it on.
4224 if (val & LCPLL_CD_SOURCE_FCLK)
4225 DRM_ERROR("CDCLK source is not LCPLL\n");
4227 if (val & LCPLL_PLL_DISABLE)
4228 DRM_ERROR("LCPLL is disabled\n");
4230 if ((val & LCPLL_REF_MASK) != LCPLL_REF_NON_SSC)
4231 DRM_ERROR("LCPLL not using non-SSC reference\n");
4234 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
4236 struct drm_device *dev = &dev_priv->drm;
4237 struct intel_crtc *crtc;
4239 for_each_intel_crtc(dev, crtc)
4240 I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
4241 pipe_name(crtc->pipe));
4243 I915_STATE_WARN(I915_READ(HSW_PWR_WELL_CTL2),
4244 "Display power well on\n");
4245 I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE,
4247 I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE,
4248 "WRPLL1 enabled\n");
4249 I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE,
4250 "WRPLL2 enabled\n");
4251 I915_STATE_WARN(I915_READ(PP_STATUS(0)) & PP_ON,
4252 "Panel power on\n");
4253 I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
4254 "CPU PWM1 enabled\n");
4255 if (IS_HASWELL(dev_priv))
4256 I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
4257 "CPU PWM2 enabled\n");
4258 I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
4259 "PCH PWM1 enabled\n");
4260 I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
4261 "Utility pin enabled\n");
4262 I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE,
4263 "PCH GTC enabled\n");
4266 * In theory we can still leave IRQs enabled, as long as only the HPD
4267 * interrupts remain enabled. We used to check for that, but since it's
4268 * gen-specific and since we only disable LCPLL after we fully disable
4269 * the interrupts, the check below should be enough.
4271 I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
4274 static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv)
4276 if (IS_HASWELL(dev_priv))
4277 return I915_READ(D_COMP_HSW);
4279 return I915_READ(D_COMP_BDW);
4282 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val)
4284 if (IS_HASWELL(dev_priv)) {
4285 if (sandybridge_pcode_write(dev_priv,
4286 GEN6_PCODE_WRITE_D_COMP, val))
4287 DRM_DEBUG_KMS("Failed to write to D_COMP\n");
4289 I915_WRITE(D_COMP_BDW, val);
4290 POSTING_READ(D_COMP_BDW);
4295 * This function implements pieces of two sequences from BSpec:
4296 * - Sequence for display software to disable LCPLL
4297 * - Sequence for display software to allow package C8+
4298 * The steps implemented here are just the steps that actually touch the LCPLL
4299 * register. Callers should take care of disabling all the display engine
4300 * functions, doing the mode unset, fixing interrupts, etc.
4302 static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
4303 bool switch_to_fclk, bool allow_power_down)
4307 assert_can_disable_lcpll(dev_priv);
4309 val = I915_READ(LCPLL_CTL);
4311 if (switch_to_fclk) {
4312 val |= LCPLL_CD_SOURCE_FCLK;
4313 I915_WRITE(LCPLL_CTL, val);
4315 if (wait_for_us(I915_READ(LCPLL_CTL) &
4316 LCPLL_CD_SOURCE_FCLK_DONE, 1))
4317 DRM_ERROR("Switching to FCLK failed\n");
4319 val = I915_READ(LCPLL_CTL);
4322 val |= LCPLL_PLL_DISABLE;
4323 I915_WRITE(LCPLL_CTL, val);
4324 POSTING_READ(LCPLL_CTL);
4326 if (intel_de_wait_for_clear(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 1))
4327 DRM_ERROR("LCPLL still locked\n");
4329 val = hsw_read_dcomp(dev_priv);
4330 val |= D_COMP_COMP_DISABLE;
4331 hsw_write_dcomp(dev_priv, val);
4334 if (wait_for((hsw_read_dcomp(dev_priv) &
4335 D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
4336 DRM_ERROR("D_COMP RCOMP still in progress\n");
4338 if (allow_power_down) {
4339 val = I915_READ(LCPLL_CTL);
4340 val |= LCPLL_POWER_DOWN_ALLOW;
4341 I915_WRITE(LCPLL_CTL, val);
4342 POSTING_READ(LCPLL_CTL);
4347 * Fully restores LCPLL, disallowing power down and switching back to LCPLL
4350 static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
4354 val = I915_READ(LCPLL_CTL);
4356 if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
4357 LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
4361 * Make sure we're not on PC8 state before disabling PC8, otherwise
4362 * we'll hang the machine. To prevent PC8 state, just enable force_wake.
4364 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
4366 if (val & LCPLL_POWER_DOWN_ALLOW) {
4367 val &= ~LCPLL_POWER_DOWN_ALLOW;
4368 I915_WRITE(LCPLL_CTL, val);
4369 POSTING_READ(LCPLL_CTL);
4372 val = hsw_read_dcomp(dev_priv);
4373 val |= D_COMP_COMP_FORCE;
4374 val &= ~D_COMP_COMP_DISABLE;
4375 hsw_write_dcomp(dev_priv, val);
4377 val = I915_READ(LCPLL_CTL);
4378 val &= ~LCPLL_PLL_DISABLE;
4379 I915_WRITE(LCPLL_CTL, val);
4381 if (intel_de_wait_for_set(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 5))
4382 DRM_ERROR("LCPLL not locked yet\n");
4384 if (val & LCPLL_CD_SOURCE_FCLK) {
4385 val = I915_READ(LCPLL_CTL);
4386 val &= ~LCPLL_CD_SOURCE_FCLK;
4387 I915_WRITE(LCPLL_CTL, val);
4389 if (wait_for_us((I915_READ(LCPLL_CTL) &
4390 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
4391 DRM_ERROR("Switching back to LCPLL failed\n");
4394 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
4396 intel_update_cdclk(dev_priv);
4397 intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
4401 * Package states C8 and deeper are really deep PC states that can only be
4402 * reached when all the devices on the system allow it, so even if the graphics
4403 * device allows PC8+, it doesn't mean the system will actually get to these
4404 * states. Our driver only allows PC8+ when going into runtime PM.
4406 * The requirements for PC8+ are that all the outputs are disabled, the power
4407 * well is disabled and most interrupts are disabled, and these are also
4408 * requirements for runtime PM. When these conditions are met, we manually do
4409 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
4410 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
4413 * When we really reach PC8 or deeper states (not just when we allow it) we lose
4414 * the state of some registers, so when we come back from PC8+ we need to
4415 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
4416 * need to take care of the registers kept by RC6. Notice that this happens even
4417 * if we don't put the device in PCI D3 state (which is what currently happens
4418 * because of the runtime PM support).
4420 * For more, read "Display Sequences for Package C8" on the hardware
4423 static void hsw_enable_pc8(struct drm_i915_private *dev_priv)
4427 DRM_DEBUG_KMS("Enabling package C8+\n");
4429 if (HAS_PCH_LPT_LP(dev_priv)) {
4430 val = I915_READ(SOUTH_DSPCLK_GATE_D);
4431 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
4432 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
4435 lpt_disable_clkout_dp(dev_priv);
4436 hsw_disable_lcpll(dev_priv, true, true);
4439 static void hsw_disable_pc8(struct drm_i915_private *dev_priv)
4443 DRM_DEBUG_KMS("Disabling package C8+\n");
4445 hsw_restore_lcpll(dev_priv);
4446 intel_init_pch_refclk(dev_priv);
4448 if (HAS_PCH_LPT_LP(dev_priv)) {
4449 val = I915_READ(SOUTH_DSPCLK_GATE_D);
4450 val |= PCH_LP_PARTITION_LEVEL_DISABLE;
4451 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
4455 static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
4459 u32 reset_bits, val;
4461 if (IS_IVYBRIDGE(dev_priv)) {
4463 reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK;
4465 reg = HSW_NDE_RSTWRN_OPT;
4466 reset_bits = RESET_PCH_HANDSHAKE_ENABLE;
4469 val = I915_READ(reg);
4476 I915_WRITE(reg, val);
4479 static void skl_display_core_init(struct drm_i915_private *dev_priv,
4482 struct i915_power_domains *power_domains = &dev_priv->power_domains;
4483 struct i915_power_well *well;
4485 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
4487 /* enable PCH reset handshake */
4488 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
4490 /* enable PG1 and Misc I/O */
4491 mutex_lock(&power_domains->lock);
4493 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4494 intel_power_well_enable(dev_priv, well);
4496 well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
4497 intel_power_well_enable(dev_priv, well);
4499 mutex_unlock(&power_domains->lock);
4501 intel_cdclk_init(dev_priv);
4503 gen9_dbuf_enable(dev_priv);
4505 if (resume && dev_priv->csr.dmc_payload)
4506 intel_csr_load_program(dev_priv);
4509 static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
4511 struct i915_power_domains *power_domains = &dev_priv->power_domains;
4512 struct i915_power_well *well;
4514 gen9_disable_dc_states(dev_priv);
4516 gen9_dbuf_disable(dev_priv);
4518 intel_cdclk_uninit(dev_priv);
4520 /* The spec doesn't call for removing the reset handshake flag */
4521 /* disable PG1 and Misc I/O */
4523 mutex_lock(&power_domains->lock);
4526 * BSpec says to keep the MISC IO power well enabled here, only
4527 * remove our request for power well 1.
4528 * Note that even though the driver's request is removed power well 1
4529 * may stay enabled after this due to DMC's own request on it.
4531 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4532 intel_power_well_disable(dev_priv, well);
4534 mutex_unlock(&power_domains->lock);
4536 usleep_range(10, 30); /* 10 us delay per Bspec */
4539 static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume)
4541 struct i915_power_domains *power_domains = &dev_priv->power_domains;
4542 struct i915_power_well *well;
4544 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
4547 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
4548 * or else the reset will hang because there is no PCH to respond.
4549 * Move the handshake programming to initialization sequence.
4550 * Previously was left up to BIOS.
4552 intel_pch_reset_handshake(dev_priv, false);
4555 mutex_lock(&power_domains->lock);
4557 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4558 intel_power_well_enable(dev_priv, well);
4560 mutex_unlock(&power_domains->lock);
4562 intel_cdclk_init(dev_priv);
4564 gen9_dbuf_enable(dev_priv);
4566 if (resume && dev_priv->csr.dmc_payload)
4567 intel_csr_load_program(dev_priv);
4570 static void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
4572 struct i915_power_domains *power_domains = &dev_priv->power_domains;
4573 struct i915_power_well *well;
4575 gen9_disable_dc_states(dev_priv);
4577 gen9_dbuf_disable(dev_priv);
4579 intel_cdclk_uninit(dev_priv);
4581 /* The spec doesn't call for removing the reset handshake flag */
4584 * Disable PW1 (PG1).
4585 * Note that even though the driver's request is removed power well 1
4586 * may stay enabled after this due to DMC's own request on it.
4588 mutex_lock(&power_domains->lock);
4590 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4591 intel_power_well_disable(dev_priv, well);
4593 mutex_unlock(&power_domains->lock);
4595 usleep_range(10, 30); /* 10 us delay per Bspec */
4598 static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume)
4600 struct i915_power_domains *power_domains = &dev_priv->power_domains;
4601 struct i915_power_well *well;
4603 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
4605 /* 1. Enable PCH Reset Handshake */
4606 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
4609 intel_combo_phy_init(dev_priv);
4612 * 4. Enable Power Well 1 (PG1).
4613 * The AUX IO power wells will be enabled on demand.
4615 mutex_lock(&power_domains->lock);
4616 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4617 intel_power_well_enable(dev_priv, well);
4618 mutex_unlock(&power_domains->lock);
4620 /* 5. Enable CD clock */
4621 intel_cdclk_init(dev_priv);
4623 /* 6. Enable DBUF */
4624 gen9_dbuf_enable(dev_priv);
4626 if (resume && dev_priv->csr.dmc_payload)
4627 intel_csr_load_program(dev_priv);
4630 static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
4632 struct i915_power_domains *power_domains = &dev_priv->power_domains;
4633 struct i915_power_well *well;
4635 gen9_disable_dc_states(dev_priv);
4637 /* 1. Disable all display engine functions -> aready done */
4639 /* 2. Disable DBUF */
4640 gen9_dbuf_disable(dev_priv);
4642 /* 3. Disable CD clock */
4643 intel_cdclk_uninit(dev_priv);
4646 * 4. Disable Power Well 1 (PG1).
4647 * The AUX IO power wells are toggled on demand, so they are already
4648 * disabled at this point.
4650 mutex_lock(&power_domains->lock);
4651 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4652 intel_power_well_disable(dev_priv, well);
4653 mutex_unlock(&power_domains->lock);
4655 usleep_range(10, 30); /* 10 us delay per Bspec */
4658 intel_combo_phy_uninit(dev_priv);
4661 static void icl_display_core_init(struct drm_i915_private *dev_priv,
4664 struct i915_power_domains *power_domains = &dev_priv->power_domains;
4665 struct i915_power_well *well;
4667 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
4669 /* 1. Enable PCH reset handshake. */
4670 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
4672 /* 2. Initialize all combo phys */
4673 intel_combo_phy_init(dev_priv);
4676 * 3. Enable Power Well 1 (PG1).
4677 * The AUX IO power wells will be enabled on demand.
4679 mutex_lock(&power_domains->lock);
4680 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4681 intel_power_well_enable(dev_priv, well);
4682 mutex_unlock(&power_domains->lock);
4684 /* 4. Enable CDCLK. */
4685 intel_cdclk_init(dev_priv);
4687 /* 5. Enable DBUF. */
4688 icl_dbuf_enable(dev_priv);
4690 /* 6. Setup MBUS. */
4691 icl_mbus_init(dev_priv);
4693 if (resume && dev_priv->csr.dmc_payload)
4694 intel_csr_load_program(dev_priv);
4697 static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
4699 struct i915_power_domains *power_domains = &dev_priv->power_domains;
4700 struct i915_power_well *well;
4702 gen9_disable_dc_states(dev_priv);
4704 /* 1. Disable all display engine functions -> aready done */
4706 /* 2. Disable DBUF */
4707 icl_dbuf_disable(dev_priv);
4709 /* 3. Disable CD clock */
4710 intel_cdclk_uninit(dev_priv);
4713 * 4. Disable Power Well 1 (PG1).
4714 * The AUX IO power wells are toggled on demand, so they are already
4715 * disabled at this point.
4717 mutex_lock(&power_domains->lock);
4718 well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
4719 intel_power_well_disable(dev_priv, well);
4720 mutex_unlock(&power_domains->lock);
4723 intel_combo_phy_uninit(dev_priv);
4726 static void chv_phy_control_init(struct drm_i915_private *dev_priv)
4728 struct i915_power_well *cmn_bc =
4729 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
4730 struct i915_power_well *cmn_d =
4731 lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
4734 * DISPLAY_PHY_CONTROL can get corrupted if read. As a
4735 * workaround never ever read DISPLAY_PHY_CONTROL, and
4736 * instead maintain a shadow copy ourselves. Use the actual
4737 * power well state and lane status to reconstruct the
4738 * expected initial value.
4740 dev_priv->chv_phy_control =
4741 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
4742 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
4743 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
4744 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) |
4745 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0);
4748 * If all lanes are disabled we leave the override disabled
4749 * with all power down bits cleared to match the state we
4750 * would use after disabling the port. Otherwise enable the
4751 * override and set the lane powerdown bits accding to the
4752 * current lane status.
4754 if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
4755 u32 status = I915_READ(DPLL(PIPE_A));
4758 mask = status & DPLL_PORTB_READY_MASK;
4762 dev_priv->chv_phy_control |=
4763 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
4765 dev_priv->chv_phy_control |=
4766 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
4768 mask = (status & DPLL_PORTC_READY_MASK) >> 4;
4772 dev_priv->chv_phy_control |=
4773 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
4775 dev_priv->chv_phy_control |=
4776 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
4778 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
4780 dev_priv->chv_phy_assert[DPIO_PHY0] = false;
4782 dev_priv->chv_phy_assert[DPIO_PHY0] = true;
4785 if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
4786 u32 status = I915_READ(DPIO_PHY_STATUS);
4789 mask = status & DPLL_PORTD_READY_MASK;
4794 dev_priv->chv_phy_control |=
4795 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
4797 dev_priv->chv_phy_control |=
4798 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
4800 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
4802 dev_priv->chv_phy_assert[DPIO_PHY1] = false;
4804 dev_priv->chv_phy_assert[DPIO_PHY1] = true;
4807 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
4809 DRM_DEBUG_KMS("Initial PHY_CONTROL=0x%08x\n",
4810 dev_priv->chv_phy_control);
4813 static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
4815 struct i915_power_well *cmn =
4816 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
4817 struct i915_power_well *disp2d =
4818 lookup_power_well(dev_priv, VLV_DISP_PW_DISP2D);
4820 /* If the display might be already active skip this */
4821 if (cmn->desc->ops->is_enabled(dev_priv, cmn) &&
4822 disp2d->desc->ops->is_enabled(dev_priv, disp2d) &&
4823 I915_READ(DPIO_CTL) & DPIO_CMNRST)
4826 DRM_DEBUG_KMS("toggling display PHY side reset\n");
4828 /* cmnlane needs DPLL registers */
4829 disp2d->desc->ops->enable(dev_priv, disp2d);
4832 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
4833 * Need to assert and de-assert PHY SB reset by gating the
4834 * common lane power, then un-gating it.
4835 * Simply ungating isn't enough to reset the PHY enough to get
4836 * ports and lanes running.
4838 cmn->desc->ops->disable(dev_priv, cmn);
4841 static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0)
4845 vlv_punit_get(dev_priv);
4846 ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE;
4847 vlv_punit_put(dev_priv);
4852 static void assert_ved_power_gated(struct drm_i915_private *dev_priv)
4854 WARN(!vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0),
4855 "VED not power gated\n");
4858 static void assert_isp_power_gated(struct drm_i915_private *dev_priv)
4860 static const struct pci_device_id isp_ids[] = {
4861 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38)},
4862 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x22b8)},
4866 WARN(!pci_dev_present(isp_ids) &&
4867 !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0),
4868 "ISP not power gated\n");
4871 static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
4874 * intel_power_domains_init_hw - initialize hardware power domain state
4875 * @i915: i915 device instance
4876 * @resume: Called from resume code paths or not
4878 * This function initializes the hardware power domain state and enables all
4879 * power wells belonging to the INIT power domain. Power wells in other
4880 * domains (and not in the INIT domain) are referenced or disabled by
4881 * intel_modeset_readout_hw_state(). After that the reference count of each
4882 * power well must match its HW enabled state, see
4883 * intel_power_domains_verify_state().
4885 * It will return with power domains disabled (to be enabled later by
4886 * intel_power_domains_enable()) and must be paired with
4887 * intel_power_domains_driver_remove().
4889 void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
4891 struct i915_power_domains *power_domains = &i915->power_domains;
4893 power_domains->initializing = true;
4895 if (INTEL_GEN(i915) >= 11) {
4896 icl_display_core_init(i915, resume);
4897 } else if (IS_CANNONLAKE(i915)) {
4898 cnl_display_core_init(i915, resume);
4899 } else if (IS_GEN9_BC(i915)) {
4900 skl_display_core_init(i915, resume);
4901 } else if (IS_GEN9_LP(i915)) {
4902 bxt_display_core_init(i915, resume);
4903 } else if (IS_CHERRYVIEW(i915)) {
4904 mutex_lock(&power_domains->lock);
4905 chv_phy_control_init(i915);
4906 mutex_unlock(&power_domains->lock);
4907 assert_isp_power_gated(i915);
4908 } else if (IS_VALLEYVIEW(i915)) {
4909 mutex_lock(&power_domains->lock);
4910 vlv_cmnlane_wa(i915);
4911 mutex_unlock(&power_domains->lock);
4912 assert_ved_power_gated(i915);
4913 assert_isp_power_gated(i915);
4914 } else if (IS_BROADWELL(i915) || IS_HASWELL(i915)) {
4915 hsw_assert_cdclk(i915);
4916 intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
4917 } else if (IS_IVYBRIDGE(i915)) {
4918 intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
4922 * Keep all power wells enabled for any dependent HW access during
4923 * initialization and to make sure we keep BIOS enabled display HW
4924 * resources powered until display HW readout is complete. We drop
4925 * this reference in intel_power_domains_enable().
4927 power_domains->wakeref =
4928 intel_display_power_get(i915, POWER_DOMAIN_INIT);
4930 /* Disable power support if the user asked so. */
4931 if (!i915_modparams.disable_power_well)
4932 intel_display_power_get(i915, POWER_DOMAIN_INIT);
4933 intel_power_domains_sync_hw(i915);
4935 power_domains->initializing = false;
4939 * intel_power_domains_driver_remove - deinitialize hw power domain state
4940 * @i915: i915 device instance
4942 * De-initializes the display power domain HW state. It also ensures that the
4943 * device stays powered up so that the driver can be reloaded.
4945 * It must be called with power domains already disabled (after a call to
4946 * intel_power_domains_disable()) and must be paired with
4947 * intel_power_domains_init_hw().
4949 void intel_power_domains_driver_remove(struct drm_i915_private *i915)
4951 intel_wakeref_t wakeref __maybe_unused =
4952 fetch_and_zero(&i915->power_domains.wakeref);
4954 /* Remove the refcount we took to keep power well support disabled. */
4955 if (!i915_modparams.disable_power_well)
4956 intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
4958 intel_display_power_flush_work_sync(i915);
4960 intel_power_domains_verify_state(i915);
4962 /* Keep the power well enabled, but cancel its rpm wakeref. */
4963 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
4967 * intel_power_domains_enable - enable toggling of display power wells
4968 * @i915: i915 device instance
4970 * Enable the ondemand enabling/disabling of the display power wells. Note that
4971 * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled
4972 * only at specific points of the display modeset sequence, thus they are not
4973 * affected by the intel_power_domains_enable()/disable() calls. The purpose
4974 * of these function is to keep the rest of power wells enabled until the end
4975 * of display HW readout (which will acquire the power references reflecting
4976 * the current HW state).
4978 void intel_power_domains_enable(struct drm_i915_private *i915)
4980 intel_wakeref_t wakeref __maybe_unused =
4981 fetch_and_zero(&i915->power_domains.wakeref);
4983 intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
4984 intel_power_domains_verify_state(i915);
4988 * intel_power_domains_disable - disable toggling of display power wells
4989 * @i915: i915 device instance
4991 * Disable the ondemand enabling/disabling of the display power wells. See
4992 * intel_power_domains_enable() for which power wells this call controls.
4994 void intel_power_domains_disable(struct drm_i915_private *i915)
4996 struct i915_power_domains *power_domains = &i915->power_domains;
4998 WARN_ON(power_domains->wakeref);
4999 power_domains->wakeref =
5000 intel_display_power_get(i915, POWER_DOMAIN_INIT);
5002 intel_power_domains_verify_state(i915);
5006 * intel_power_domains_suspend - suspend power domain state
5007 * @i915: i915 device instance
5008 * @suspend_mode: specifies the target suspend state (idle, mem, hibernation)
5010 * This function prepares the hardware power domain state before entering
5013 * It must be called with power domains already disabled (after a call to
5014 * intel_power_domains_disable()) and paired with intel_power_domains_resume().
5016 void intel_power_domains_suspend(struct drm_i915_private *i915,
5017 enum i915_drm_suspend_mode suspend_mode)
5019 struct i915_power_domains *power_domains = &i915->power_domains;
5020 intel_wakeref_t wakeref __maybe_unused =
5021 fetch_and_zero(&power_domains->wakeref);
5023 intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
5026 * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9
5027 * support don't manually deinit the power domains. This also means the
5028 * CSR/DMC firmware will stay active, it will power down any HW
5029 * resources as required and also enable deeper system power states
5030 * that would be blocked if the firmware was inactive.
5032 if (!(i915->csr.allowed_dc_mask & DC_STATE_EN_DC9) &&
5033 suspend_mode == I915_DRM_SUSPEND_IDLE &&
5034 i915->csr.dmc_payload) {
5035 intel_display_power_flush_work(i915);
5036 intel_power_domains_verify_state(i915);
5041 * Even if power well support was disabled we still want to disable
5042 * power wells if power domains must be deinitialized for suspend.
5044 if (!i915_modparams.disable_power_well)
5045 intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
5047 intel_display_power_flush_work(i915);
5048 intel_power_domains_verify_state(i915);
5050 if (INTEL_GEN(i915) >= 11)
5051 icl_display_core_uninit(i915);
5052 else if (IS_CANNONLAKE(i915))
5053 cnl_display_core_uninit(i915);
5054 else if (IS_GEN9_BC(i915))
5055 skl_display_core_uninit(i915);
5056 else if (IS_GEN9_LP(i915))
5057 bxt_display_core_uninit(i915);
5059 power_domains->display_core_suspended = true;
5063 * intel_power_domains_resume - resume power domain state
5064 * @i915: i915 device instance
5066 * This function resume the hardware power domain state during system resume.
5068 * It will return with power domain support disabled (to be enabled later by
5069 * intel_power_domains_enable()) and must be paired with
5070 * intel_power_domains_suspend().
5072 void intel_power_domains_resume(struct drm_i915_private *i915)
5074 struct i915_power_domains *power_domains = &i915->power_domains;
5076 if (power_domains->display_core_suspended) {
5077 intel_power_domains_init_hw(i915, true);
5078 power_domains->display_core_suspended = false;
5080 WARN_ON(power_domains->wakeref);
5081 power_domains->wakeref =
5082 intel_display_power_get(i915, POWER_DOMAIN_INIT);
5085 intel_power_domains_verify_state(i915);
5088 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
5090 static void intel_power_domains_dump_info(struct drm_i915_private *i915)
5092 struct i915_power_domains *power_domains = &i915->power_domains;
5093 struct i915_power_well *power_well;
5095 for_each_power_well(i915, power_well) {
5096 enum intel_display_power_domain domain;
5098 DRM_DEBUG_DRIVER("%-25s %d\n",
5099 power_well->desc->name, power_well->count);
5101 for_each_power_domain(domain, power_well->desc->domains)
5102 DRM_DEBUG_DRIVER(" %-23s %d\n",
5103 intel_display_power_domain_str(domain),
5104 power_domains->domain_use_count[domain]);
5109 * intel_power_domains_verify_state - verify the HW/SW state for all power wells
5110 * @i915: i915 device instance
5112 * Verify if the reference count of each power well matches its HW enabled
5113 * state and the total refcount of the domains it belongs to. This must be
5114 * called after modeset HW state sanitization, which is responsible for
5115 * acquiring reference counts for any power wells in use and disabling the
5116 * ones left on by BIOS but not required by any active output.
5118 static void intel_power_domains_verify_state(struct drm_i915_private *i915)
5120 struct i915_power_domains *power_domains = &i915->power_domains;
5121 struct i915_power_well *power_well;
5122 bool dump_domain_info;
5124 mutex_lock(&power_domains->lock);
5126 verify_async_put_domains_state(power_domains);
5128 dump_domain_info = false;
5129 for_each_power_well(i915, power_well) {
5130 enum intel_display_power_domain domain;
5134 enabled = power_well->desc->ops->is_enabled(i915, power_well);
5135 if ((power_well->count || power_well->desc->always_on) !=
5137 DRM_ERROR("power well %s state mismatch (refcount %d/enabled %d)",
5138 power_well->desc->name,
5139 power_well->count, enabled);
5142 for_each_power_domain(domain, power_well->desc->domains)
5143 domains_count += power_domains->domain_use_count[domain];
5145 if (power_well->count != domains_count) {
5146 DRM_ERROR("power well %s refcount/domain refcount mismatch "
5147 "(refcount %d/domains refcount %d)\n",
5148 power_well->desc->name, power_well->count,
5150 dump_domain_info = true;
5154 if (dump_domain_info) {
5158 intel_power_domains_dump_info(i915);
5163 mutex_unlock(&power_domains->lock);
5168 static void intel_power_domains_verify_state(struct drm_i915_private *i915)
5174 void intel_display_power_suspend_late(struct drm_i915_private *i915)
5176 if (INTEL_GEN(i915) >= 11 || IS_GEN9_LP(i915))
5177 bxt_enable_dc9(i915);
5178 else if (IS_HASWELL(i915) || IS_BROADWELL(i915))
5179 hsw_enable_pc8(i915);
5182 void intel_display_power_resume_early(struct drm_i915_private *i915)
5184 if (INTEL_GEN(i915) >= 11 || IS_GEN9_LP(i915)) {
5185 gen9_sanitize_dc_state(i915);
5186 bxt_disable_dc9(i915);
5187 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
5188 hsw_disable_pc8(i915);
5192 void intel_display_power_suspend(struct drm_i915_private *i915)
5194 if (INTEL_GEN(i915) >= 11) {
5195 icl_display_core_uninit(i915);
5196 bxt_enable_dc9(i915);
5197 } else if (IS_GEN9_LP(i915)) {
5198 bxt_display_core_uninit(i915);
5199 bxt_enable_dc9(i915);
5200 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
5201 hsw_enable_pc8(i915);
5205 void intel_display_power_resume(struct drm_i915_private *i915)
5207 if (INTEL_GEN(i915) >= 11) {
5208 bxt_disable_dc9(i915);
5209 icl_display_core_init(i915, true);
5210 if (i915->csr.dmc_payload) {
5211 if (i915->csr.allowed_dc_mask &
5212 DC_STATE_EN_UPTO_DC6)
5213 skl_enable_dc6(i915);
5214 else if (i915->csr.allowed_dc_mask &
5215 DC_STATE_EN_UPTO_DC5)
5216 gen9_enable_dc5(i915);
5218 } else if (IS_GEN9_LP(i915)) {
5219 bxt_disable_dc9(i915);
5220 bxt_display_core_init(i915, true);
5221 if (i915->csr.dmc_payload &&
5222 (i915->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
5223 gen9_enable_dc5(i915);
5224 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
5225 hsw_disable_pc8(i915);