2 * Copyright © 2006-2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
24 #include "intel_display_types.h"
25 #include "intel_dpio_phy.h"
26 #include "intel_dpll_mgr.h"
31 * Display PLLs used for driving outputs vary by platform. While some have
32 * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL
33 * from a pool. In the latter scenario, it is possible that multiple pipes
34 * share a PLL if their configurations match.
36 * This file provides an abstraction over display PLLs. The function
37 * intel_shared_dpll_init() initializes the PLLs for the given platform. The
38 * users of a PLL are tracked and that tracking is integrated with the atomic
39 * modset interface. During an atomic operation, required PLLs can be reserved
40 * for a given CRTC and encoder configuration by calling
41 * intel_reserve_shared_dplls() and previously reserved PLLs can be released
42 * with intel_release_shared_dplls().
43 * Changes to the users are first staged in the atomic state, and then made
44 * effective by calling intel_shared_dpll_swap_state() during the atomic
49 intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
50 struct intel_shared_dpll_state *shared_dpll)
54 /* Copy shared dpll state */
55 for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
56 struct intel_shared_dpll *pll = &dev_priv->dpll.shared_dplls[i];
58 shared_dpll[i] = pll->state;
62 static struct intel_shared_dpll_state *
63 intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
65 struct intel_atomic_state *state = to_intel_atomic_state(s);
67 WARN_ON(!drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
69 if (!state->dpll_set) {
70 state->dpll_set = true;
72 intel_atomic_duplicate_dpll_state(to_i915(s->dev),
76 return state->shared_dpll;
80 * intel_get_shared_dpll_by_id - get a DPLL given its id
81 * @dev_priv: i915 device instance
85 * A pointer to the DPLL with @id
87 struct intel_shared_dpll *
88 intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
89 enum intel_dpll_id id)
91 return &dev_priv->dpll.shared_dplls[id];
95 * intel_get_shared_dpll_id - get the id of a DPLL
96 * @dev_priv: i915 device instance
103 intel_get_shared_dpll_id(struct drm_i915_private *dev_priv,
104 struct intel_shared_dpll *pll)
106 long pll_idx = pll - dev_priv->dpll.shared_dplls;
108 if (drm_WARN_ON(&dev_priv->drm,
110 pll_idx >= dev_priv->dpll.num_shared_dpll))
117 void assert_shared_dpll(struct drm_i915_private *dev_priv,
118 struct intel_shared_dpll *pll,
122 struct intel_dpll_hw_state hw_state;
124 if (drm_WARN(&dev_priv->drm, !pll,
125 "asserting DPLL %s with no DPLL\n", onoff(state)))
128 cur_state = pll->info->funcs->get_hw_state(dev_priv, pll, &hw_state);
129 I915_STATE_WARN(cur_state != state,
130 "%s assertion failure (expected %s, current %s)\n",
131 pll->info->name, onoff(state), onoff(cur_state));
135 * intel_prepare_shared_dpll - call a dpll's prepare hook
136 * @crtc_state: CRTC, and its state, which has a shared dpll
138 * This calls the PLL's prepare hook if it has one and if the PLL is not
139 * already enabled. The prepare hook is platform specific.
141 void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state)
143 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
144 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
145 struct intel_shared_dpll *pll = crtc_state->shared_dpll;
147 if (drm_WARN_ON(&dev_priv->drm, pll == NULL))
150 mutex_lock(&dev_priv->dpll.lock);
151 drm_WARN_ON(&dev_priv->drm, !pll->state.crtc_mask);
152 if (!pll->active_mask) {
153 drm_dbg(&dev_priv->drm, "setting up %s\n", pll->info->name);
154 drm_WARN_ON(&dev_priv->drm, pll->on);
155 assert_shared_dpll_disabled(dev_priv, pll);
157 pll->info->funcs->prepare(dev_priv, pll);
159 mutex_unlock(&dev_priv->dpll.lock);
163 * intel_enable_shared_dpll - enable a CRTC's shared DPLL
164 * @crtc_state: CRTC, and its state, which has a shared DPLL
166 * Enable the shared DPLL used by @crtc.
168 void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
170 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
171 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
172 struct intel_shared_dpll *pll = crtc_state->shared_dpll;
173 unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
174 unsigned int old_mask;
176 if (drm_WARN_ON(&dev_priv->drm, pll == NULL))
179 mutex_lock(&dev_priv->dpll.lock);
180 old_mask = pll->active_mask;
182 if (drm_WARN_ON(&dev_priv->drm, !(pll->state.crtc_mask & crtc_mask)) ||
183 drm_WARN_ON(&dev_priv->drm, pll->active_mask & crtc_mask))
186 pll->active_mask |= crtc_mask;
188 drm_dbg_kms(&dev_priv->drm,
189 "enable %s (active %x, on? %d) for crtc %d\n",
190 pll->info->name, pll->active_mask, pll->on,
194 drm_WARN_ON(&dev_priv->drm, !pll->on);
195 assert_shared_dpll_enabled(dev_priv, pll);
198 drm_WARN_ON(&dev_priv->drm, pll->on);
200 drm_dbg_kms(&dev_priv->drm, "enabling %s\n", pll->info->name);
201 pll->info->funcs->enable(dev_priv, pll);
205 mutex_unlock(&dev_priv->dpll.lock);
209 * intel_disable_shared_dpll - disable a CRTC's shared DPLL
210 * @crtc_state: CRTC, and its state, which has a shared DPLL
212 * Disable the shared DPLL used by @crtc.
214 void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
216 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
217 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
218 struct intel_shared_dpll *pll = crtc_state->shared_dpll;
219 unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
221 /* PCH only available on ILK+ */
222 if (INTEL_GEN(dev_priv) < 5)
228 mutex_lock(&dev_priv->dpll.lock);
229 if (drm_WARN_ON(&dev_priv->drm, !(pll->active_mask & crtc_mask)))
232 drm_dbg_kms(&dev_priv->drm,
233 "disable %s (active %x, on? %d) for crtc %d\n",
234 pll->info->name, pll->active_mask, pll->on,
237 assert_shared_dpll_enabled(dev_priv, pll);
238 drm_WARN_ON(&dev_priv->drm, !pll->on);
240 pll->active_mask &= ~crtc_mask;
241 if (pll->active_mask)
244 drm_dbg_kms(&dev_priv->drm, "disabling %s\n", pll->info->name);
245 pll->info->funcs->disable(dev_priv, pll);
249 mutex_unlock(&dev_priv->dpll.lock);
252 static struct intel_shared_dpll *
253 intel_find_shared_dpll(struct intel_atomic_state *state,
254 const struct intel_crtc *crtc,
255 const struct intel_dpll_hw_state *pll_state,
256 unsigned long dpll_mask)
258 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
259 struct intel_shared_dpll *pll, *unused_pll = NULL;
260 struct intel_shared_dpll_state *shared_dpll;
261 enum intel_dpll_id i;
263 shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
265 drm_WARN_ON(&dev_priv->drm, dpll_mask & ~(BIT(I915_NUM_PLLS) - 1));
267 for_each_set_bit(i, &dpll_mask, I915_NUM_PLLS) {
268 pll = &dev_priv->dpll.shared_dplls[i];
270 /* Only want to check enabled timings first */
271 if (shared_dpll[i].crtc_mask == 0) {
277 if (memcmp(pll_state,
278 &shared_dpll[i].hw_state,
279 sizeof(*pll_state)) == 0) {
280 drm_dbg_kms(&dev_priv->drm,
281 "[CRTC:%d:%s] sharing existing %s (crtc mask 0x%08x, active %x)\n",
282 crtc->base.base.id, crtc->base.name,
284 shared_dpll[i].crtc_mask,
290 /* Ok no matching timings, maybe there's a free one? */
292 drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] allocated %s\n",
293 crtc->base.base.id, crtc->base.name,
294 unused_pll->info->name);
302 intel_reference_shared_dpll(struct intel_atomic_state *state,
303 const struct intel_crtc *crtc,
304 const struct intel_shared_dpll *pll,
305 const struct intel_dpll_hw_state *pll_state)
307 struct drm_i915_private *i915 = to_i915(state->base.dev);
308 struct intel_shared_dpll_state *shared_dpll;
309 const enum intel_dpll_id id = pll->info->id;
311 shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
313 if (shared_dpll[id].crtc_mask == 0)
314 shared_dpll[id].hw_state = *pll_state;
316 drm_dbg(&i915->drm, "using %s for pipe %c\n", pll->info->name,
317 pipe_name(crtc->pipe));
319 shared_dpll[id].crtc_mask |= 1 << crtc->pipe;
322 static void intel_unreference_shared_dpll(struct intel_atomic_state *state,
323 const struct intel_crtc *crtc,
324 const struct intel_shared_dpll *pll)
326 struct intel_shared_dpll_state *shared_dpll;
328 shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
329 shared_dpll[pll->info->id].crtc_mask &= ~(1 << crtc->pipe);
332 static void intel_put_dpll(struct intel_atomic_state *state,
333 struct intel_crtc *crtc)
335 const struct intel_crtc_state *old_crtc_state =
336 intel_atomic_get_old_crtc_state(state, crtc);
337 struct intel_crtc_state *new_crtc_state =
338 intel_atomic_get_new_crtc_state(state, crtc);
340 new_crtc_state->shared_dpll = NULL;
342 if (!old_crtc_state->shared_dpll)
345 intel_unreference_shared_dpll(state, crtc, old_crtc_state->shared_dpll);
349 * intel_shared_dpll_swap_state - make atomic DPLL configuration effective
350 * @state: atomic state
352 * This is the dpll version of drm_atomic_helper_swap_state() since the
353 * helper does not handle driver-specific global state.
355 * For consistency with atomic helpers this function does a complete swap,
356 * i.e. it also puts the current state into @state, even though there is no
357 * need for that at this moment.
359 void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
361 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
362 struct intel_shared_dpll_state *shared_dpll = state->shared_dpll;
363 enum intel_dpll_id i;
365 if (!state->dpll_set)
368 for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
369 struct intel_shared_dpll *pll =
370 &dev_priv->dpll.shared_dplls[i];
372 swap(pll->state, shared_dpll[i]);
376 static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
377 struct intel_shared_dpll *pll,
378 struct intel_dpll_hw_state *hw_state)
380 const enum intel_dpll_id id = pll->info->id;
381 intel_wakeref_t wakeref;
384 wakeref = intel_display_power_get_if_enabled(dev_priv,
385 POWER_DOMAIN_DISPLAY_CORE);
389 val = intel_de_read(dev_priv, PCH_DPLL(id));
390 hw_state->dpll = val;
391 hw_state->fp0 = intel_de_read(dev_priv, PCH_FP0(id));
392 hw_state->fp1 = intel_de_read(dev_priv, PCH_FP1(id));
394 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
396 return val & DPLL_VCO_ENABLE;
399 static void ibx_pch_dpll_prepare(struct drm_i915_private *dev_priv,
400 struct intel_shared_dpll *pll)
402 const enum intel_dpll_id id = pll->info->id;
404 intel_de_write(dev_priv, PCH_FP0(id), pll->state.hw_state.fp0);
405 intel_de_write(dev_priv, PCH_FP1(id), pll->state.hw_state.fp1);
408 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
413 I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
415 val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
416 enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
417 DREF_SUPERSPREAD_SOURCE_MASK));
418 I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
421 static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
422 struct intel_shared_dpll *pll)
424 const enum intel_dpll_id id = pll->info->id;
426 /* PCH refclock must be enabled first */
427 ibx_assert_pch_refclk_enabled(dev_priv);
429 intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
431 /* Wait for the clocks to stabilize. */
432 intel_de_posting_read(dev_priv, PCH_DPLL(id));
435 /* The pixel multiplier can only be updated once the
436 * DPLL is enabled and the clocks are stable.
440 intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
441 intel_de_posting_read(dev_priv, PCH_DPLL(id));
445 static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
446 struct intel_shared_dpll *pll)
448 const enum intel_dpll_id id = pll->info->id;
450 intel_de_write(dev_priv, PCH_DPLL(id), 0);
451 intel_de_posting_read(dev_priv, PCH_DPLL(id));
455 static bool ibx_get_dpll(struct intel_atomic_state *state,
456 struct intel_crtc *crtc,
457 struct intel_encoder *encoder)
459 struct intel_crtc_state *crtc_state =
460 intel_atomic_get_new_crtc_state(state, crtc);
461 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
462 struct intel_shared_dpll *pll;
463 enum intel_dpll_id i;
465 if (HAS_PCH_IBX(dev_priv)) {
466 /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
467 i = (enum intel_dpll_id) crtc->pipe;
468 pll = &dev_priv->dpll.shared_dplls[i];
470 drm_dbg_kms(&dev_priv->drm,
471 "[CRTC:%d:%s] using pre-allocated %s\n",
472 crtc->base.base.id, crtc->base.name,
475 pll = intel_find_shared_dpll(state, crtc,
476 &crtc_state->dpll_hw_state,
477 BIT(DPLL_ID_PCH_PLL_B) |
478 BIT(DPLL_ID_PCH_PLL_A));
484 /* reference the pll */
485 intel_reference_shared_dpll(state, crtc,
486 pll, &crtc_state->dpll_hw_state);
488 crtc_state->shared_dpll = pll;
493 static void ibx_dump_hw_state(struct drm_i915_private *dev_priv,
494 const struct intel_dpll_hw_state *hw_state)
496 drm_dbg_kms(&dev_priv->drm,
497 "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
498 "fp0: 0x%x, fp1: 0x%x\n",
505 static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
506 .prepare = ibx_pch_dpll_prepare,
507 .enable = ibx_pch_dpll_enable,
508 .disable = ibx_pch_dpll_disable,
509 .get_hw_state = ibx_pch_dpll_get_hw_state,
512 static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
513 struct intel_shared_dpll *pll)
515 const enum intel_dpll_id id = pll->info->id;
517 intel_de_write(dev_priv, WRPLL_CTL(id), pll->state.hw_state.wrpll);
518 intel_de_posting_read(dev_priv, WRPLL_CTL(id));
522 static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv,
523 struct intel_shared_dpll *pll)
525 intel_de_write(dev_priv, SPLL_CTL, pll->state.hw_state.spll);
526 intel_de_posting_read(dev_priv, SPLL_CTL);
530 static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
531 struct intel_shared_dpll *pll)
533 const enum intel_dpll_id id = pll->info->id;
536 val = intel_de_read(dev_priv, WRPLL_CTL(id));
537 intel_de_write(dev_priv, WRPLL_CTL(id), val & ~WRPLL_PLL_ENABLE);
538 intel_de_posting_read(dev_priv, WRPLL_CTL(id));
541 * Try to set up the PCH reference clock once all DPLLs
542 * that depend on it have been shut down.
544 if (dev_priv->pch_ssc_use & BIT(id))
545 intel_init_pch_refclk(dev_priv);
548 static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
549 struct intel_shared_dpll *pll)
551 enum intel_dpll_id id = pll->info->id;
554 val = intel_de_read(dev_priv, SPLL_CTL);
555 intel_de_write(dev_priv, SPLL_CTL, val & ~SPLL_PLL_ENABLE);
556 intel_de_posting_read(dev_priv, SPLL_CTL);
559 * Try to set up the PCH reference clock once all DPLLs
560 * that depend on it have been shut down.
562 if (dev_priv->pch_ssc_use & BIT(id))
563 intel_init_pch_refclk(dev_priv);
566 static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
567 struct intel_shared_dpll *pll,
568 struct intel_dpll_hw_state *hw_state)
570 const enum intel_dpll_id id = pll->info->id;
571 intel_wakeref_t wakeref;
574 wakeref = intel_display_power_get_if_enabled(dev_priv,
575 POWER_DOMAIN_DISPLAY_CORE);
579 val = intel_de_read(dev_priv, WRPLL_CTL(id));
580 hw_state->wrpll = val;
582 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
584 return val & WRPLL_PLL_ENABLE;
587 static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
588 struct intel_shared_dpll *pll,
589 struct intel_dpll_hw_state *hw_state)
591 intel_wakeref_t wakeref;
594 wakeref = intel_display_power_get_if_enabled(dev_priv,
595 POWER_DOMAIN_DISPLAY_CORE);
599 val = intel_de_read(dev_priv, SPLL_CTL);
600 hw_state->spll = val;
602 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
604 return val & SPLL_PLL_ENABLE;
608 #define LC_FREQ_2K U64_C(LC_FREQ * 2000)
614 /* Constraints for PLL good behavior */
620 struct hsw_wrpll_rnp {
624 static unsigned hsw_wrpll_get_budget_for_freq(int clock)
698 static void hsw_wrpll_update_rnp(u64 freq2k, unsigned int budget,
699 unsigned int r2, unsigned int n2,
701 struct hsw_wrpll_rnp *best)
703 u64 a, b, c, d, diff, diff_best;
705 /* No best (r,n,p) yet */
714 * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
718 * abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
721 * and we would like delta <= budget.
723 * If the discrepancy is above the PPM-based budget, always prefer to
724 * improve upon the previous solution. However, if you're within the
725 * budget, try to maximize Ref * VCO, that is N / (P * R^2).
727 a = freq2k * budget * p * r2;
728 b = freq2k * budget * best->p * best->r2;
729 diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
730 diff_best = abs_diff(freq2k * best->p * best->r2,
731 LC_FREQ_2K * best->n2);
733 d = 1000000 * diff_best;
735 if (a < c && b < d) {
736 /* If both are above the budget, pick the closer */
737 if (best->p * best->r2 * diff < p * r2 * diff_best) {
742 } else if (a >= c && b < d) {
743 /* If A is below the threshold but B is above it? Update. */
747 } else if (a >= c && b >= d) {
748 /* Both are below the limit, so pick the higher n2/(r2*r2) */
749 if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
755 /* Otherwise a < c && b >= d, do nothing */
759 hsw_ddi_calculate_wrpll(int clock /* in Hz */,
760 unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
764 struct hsw_wrpll_rnp best = { 0, 0, 0 };
767 freq2k = clock / 100;
769 budget = hsw_wrpll_get_budget_for_freq(clock);
771 /* Special case handling for 540 pixel clock: bypass WR PLL entirely
772 * and directly pass the LC PLL to it. */
773 if (freq2k == 5400000) {
781 * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
784 * We want R so that REF_MIN <= Ref <= REF_MAX.
785 * Injecting R2 = 2 * R gives:
786 * REF_MAX * r2 > LC_FREQ * 2 and
787 * REF_MIN * r2 < LC_FREQ * 2
789 * Which means the desired boundaries for r2 are:
790 * LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
793 for (r2 = LC_FREQ * 2 / REF_MAX + 1;
794 r2 <= LC_FREQ * 2 / REF_MIN;
798 * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
800 * Once again we want VCO_MIN <= VCO <= VCO_MAX.
801 * Injecting R2 = 2 * R and N2 = 2 * N, we get:
802 * VCO_MAX * r2 > n2 * LC_FREQ and
803 * VCO_MIN * r2 < n2 * LC_FREQ)
805 * Which means the desired boundaries for n2 are:
806 * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
808 for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
809 n2 <= VCO_MAX * r2 / LC_FREQ;
812 for (p = P_MIN; p <= P_MAX; p += P_INC)
813 hsw_wrpll_update_rnp(freq2k, budget,
823 static struct intel_shared_dpll *
824 hsw_ddi_hdmi_get_dpll(struct intel_atomic_state *state,
825 struct intel_crtc *crtc)
827 struct intel_crtc_state *crtc_state =
828 intel_atomic_get_new_crtc_state(state, crtc);
829 struct intel_shared_dpll *pll;
831 unsigned int p, n2, r2;
833 hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
835 val = WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
836 WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
837 WRPLL_DIVIDER_POST(p);
839 crtc_state->dpll_hw_state.wrpll = val;
841 pll = intel_find_shared_dpll(state, crtc,
842 &crtc_state->dpll_hw_state,
843 BIT(DPLL_ID_WRPLL2) |
844 BIT(DPLL_ID_WRPLL1));
852 static struct intel_shared_dpll *
853 hsw_ddi_dp_get_dpll(struct intel_crtc_state *crtc_state)
855 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
856 struct intel_shared_dpll *pll;
857 enum intel_dpll_id pll_id;
858 int clock = crtc_state->port_clock;
862 pll_id = DPLL_ID_LCPLL_810;
865 pll_id = DPLL_ID_LCPLL_1350;
868 pll_id = DPLL_ID_LCPLL_2700;
871 drm_dbg_kms(&dev_priv->drm, "Invalid clock for DP: %d\n",
876 pll = intel_get_shared_dpll_by_id(dev_priv, pll_id);
884 static bool hsw_get_dpll(struct intel_atomic_state *state,
885 struct intel_crtc *crtc,
886 struct intel_encoder *encoder)
888 struct intel_crtc_state *crtc_state =
889 intel_atomic_get_new_crtc_state(state, crtc);
890 struct intel_shared_dpll *pll;
892 memset(&crtc_state->dpll_hw_state, 0,
893 sizeof(crtc_state->dpll_hw_state));
895 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
896 pll = hsw_ddi_hdmi_get_dpll(state, crtc);
897 } else if (intel_crtc_has_dp_encoder(crtc_state)) {
898 pll = hsw_ddi_dp_get_dpll(crtc_state);
899 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
900 if (WARN_ON(crtc_state->port_clock / 2 != 135000))
903 crtc_state->dpll_hw_state.spll =
904 SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz | SPLL_REF_MUXED_SSC;
906 pll = intel_find_shared_dpll(state, crtc,
907 &crtc_state->dpll_hw_state,
916 intel_reference_shared_dpll(state, crtc,
917 pll, &crtc_state->dpll_hw_state);
919 crtc_state->shared_dpll = pll;
924 static void hsw_dump_hw_state(struct drm_i915_private *dev_priv,
925 const struct intel_dpll_hw_state *hw_state)
927 drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
928 hw_state->wrpll, hw_state->spll);
931 static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
932 .enable = hsw_ddi_wrpll_enable,
933 .disable = hsw_ddi_wrpll_disable,
934 .get_hw_state = hsw_ddi_wrpll_get_hw_state,
937 static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
938 .enable = hsw_ddi_spll_enable,
939 .disable = hsw_ddi_spll_disable,
940 .get_hw_state = hsw_ddi_spll_get_hw_state,
943 static void hsw_ddi_lcpll_enable(struct drm_i915_private *dev_priv,
944 struct intel_shared_dpll *pll)
948 static void hsw_ddi_lcpll_disable(struct drm_i915_private *dev_priv,
949 struct intel_shared_dpll *pll)
953 static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *dev_priv,
954 struct intel_shared_dpll *pll,
955 struct intel_dpll_hw_state *hw_state)
960 static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
961 .enable = hsw_ddi_lcpll_enable,
962 .disable = hsw_ddi_lcpll_disable,
963 .get_hw_state = hsw_ddi_lcpll_get_hw_state,
966 struct skl_dpll_regs {
967 i915_reg_t ctl, cfgcr1, cfgcr2;
970 /* this array is indexed by the *shared* pll id */
971 static const struct skl_dpll_regs skl_dpll_regs[4] = {
975 /* DPLL 0 doesn't support HDMI mode */
980 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
981 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
986 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
987 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
992 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
993 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
997 static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv,
998 struct intel_shared_dpll *pll)
1000 const enum intel_dpll_id id = pll->info->id;
1003 val = intel_de_read(dev_priv, DPLL_CTRL1);
1005 val &= ~(DPLL_CTRL1_HDMI_MODE(id) |
1006 DPLL_CTRL1_SSC(id) |
1007 DPLL_CTRL1_LINK_RATE_MASK(id));
1008 val |= pll->state.hw_state.ctrl1 << (id * 6);
1010 intel_de_write(dev_priv, DPLL_CTRL1, val);
1011 intel_de_posting_read(dev_priv, DPLL_CTRL1);
1014 static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
1015 struct intel_shared_dpll *pll)
1017 const struct skl_dpll_regs *regs = skl_dpll_regs;
1018 const enum intel_dpll_id id = pll->info->id;
1020 skl_ddi_pll_write_ctrl1(dev_priv, pll);
1022 intel_de_write(dev_priv, regs[id].cfgcr1, pll->state.hw_state.cfgcr1);
1023 intel_de_write(dev_priv, regs[id].cfgcr2, pll->state.hw_state.cfgcr2);
1024 intel_de_posting_read(dev_priv, regs[id].cfgcr1);
1025 intel_de_posting_read(dev_priv, regs[id].cfgcr2);
1027 /* the enable bit is always bit 31 */
1028 intel_de_write(dev_priv, regs[id].ctl,
1029 intel_de_read(dev_priv, regs[id].ctl) | LCPLL_PLL_ENABLE);
1031 if (intel_de_wait_for_set(dev_priv, DPLL_STATUS, DPLL_LOCK(id), 5))
1032 drm_err(&dev_priv->drm, "DPLL %d not locked\n", id);
1035 static void skl_ddi_dpll0_enable(struct drm_i915_private *dev_priv,
1036 struct intel_shared_dpll *pll)
1038 skl_ddi_pll_write_ctrl1(dev_priv, pll);
1041 static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
1042 struct intel_shared_dpll *pll)
1044 const struct skl_dpll_regs *regs = skl_dpll_regs;
1045 const enum intel_dpll_id id = pll->info->id;
1047 /* the enable bit is always bit 31 */
1048 intel_de_write(dev_priv, regs[id].ctl,
1049 intel_de_read(dev_priv, regs[id].ctl) & ~LCPLL_PLL_ENABLE);
1050 intel_de_posting_read(dev_priv, regs[id].ctl);
1053 static void skl_ddi_dpll0_disable(struct drm_i915_private *dev_priv,
1054 struct intel_shared_dpll *pll)
1058 static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
1059 struct intel_shared_dpll *pll,
1060 struct intel_dpll_hw_state *hw_state)
1063 const struct skl_dpll_regs *regs = skl_dpll_regs;
1064 const enum intel_dpll_id id = pll->info->id;
1065 intel_wakeref_t wakeref;
1068 wakeref = intel_display_power_get_if_enabled(dev_priv,
1069 POWER_DOMAIN_DISPLAY_CORE);
1075 val = intel_de_read(dev_priv, regs[id].ctl);
1076 if (!(val & LCPLL_PLL_ENABLE))
1079 val = intel_de_read(dev_priv, DPLL_CTRL1);
1080 hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1082 /* avoid reading back stale values if HDMI mode is not enabled */
1083 if (val & DPLL_CTRL1_HDMI_MODE(id)) {
1084 hw_state->cfgcr1 = intel_de_read(dev_priv, regs[id].cfgcr1);
1085 hw_state->cfgcr2 = intel_de_read(dev_priv, regs[id].cfgcr2);
1090 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1095 static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
1096 struct intel_shared_dpll *pll,
1097 struct intel_dpll_hw_state *hw_state)
1099 const struct skl_dpll_regs *regs = skl_dpll_regs;
1100 const enum intel_dpll_id id = pll->info->id;
1101 intel_wakeref_t wakeref;
1105 wakeref = intel_display_power_get_if_enabled(dev_priv,
1106 POWER_DOMAIN_DISPLAY_CORE);
1112 /* DPLL0 is always enabled since it drives CDCLK */
1113 val = intel_de_read(dev_priv, regs[id].ctl);
1114 if (drm_WARN_ON(&dev_priv->drm, !(val & LCPLL_PLL_ENABLE)))
1117 val = intel_de_read(dev_priv, DPLL_CTRL1);
1118 hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1123 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1128 struct skl_wrpll_context {
1129 u64 min_deviation; /* current minimal deviation */
1130 u64 central_freq; /* chosen central freq */
1131 u64 dco_freq; /* chosen dco freq */
1132 unsigned int p; /* chosen divider */
1135 static void skl_wrpll_context_init(struct skl_wrpll_context *ctx)
1137 memset(ctx, 0, sizeof(*ctx));
1139 ctx->min_deviation = U64_MAX;
1142 /* DCO freq must be within +1%/-6% of the DCO central freq */
1143 #define SKL_DCO_MAX_PDEVIATION 100
1144 #define SKL_DCO_MAX_NDEVIATION 600
1146 static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
1149 unsigned int divider)
1153 deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
1156 /* positive deviation */
1157 if (dco_freq >= central_freq) {
1158 if (deviation < SKL_DCO_MAX_PDEVIATION &&
1159 deviation < ctx->min_deviation) {
1160 ctx->min_deviation = deviation;
1161 ctx->central_freq = central_freq;
1162 ctx->dco_freq = dco_freq;
1165 /* negative deviation */
1166 } else if (deviation < SKL_DCO_MAX_NDEVIATION &&
1167 deviation < ctx->min_deviation) {
1168 ctx->min_deviation = deviation;
1169 ctx->central_freq = central_freq;
1170 ctx->dco_freq = dco_freq;
1175 static void skl_wrpll_get_multipliers(unsigned int p,
1176 unsigned int *p0 /* out */,
1177 unsigned int *p1 /* out */,
1178 unsigned int *p2 /* out */)
1182 unsigned int half = p / 2;
1184 if (half == 1 || half == 2 || half == 3 || half == 5) {
1188 } else if (half % 2 == 0) {
1192 } else if (half % 3 == 0) {
1196 } else if (half % 7 == 0) {
1201 } else if (p == 3 || p == 9) { /* 3, 5, 7, 9, 15, 21, 35 */
1205 } else if (p == 5 || p == 7) {
1209 } else if (p == 15) {
1213 } else if (p == 21) {
1217 } else if (p == 35) {
1224 struct skl_wrpll_params {
1234 static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
1237 u32 p0, u32 p1, u32 p2)
1241 switch (central_freq) {
1243 params->central_freq = 0;
1246 params->central_freq = 1;
1249 params->central_freq = 3;
1266 WARN(1, "Incorrect PDiv\n");
1283 WARN(1, "Incorrect KDiv\n");
1286 params->qdiv_ratio = p1;
1287 params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
1289 dco_freq = p0 * p1 * p2 * afe_clock;
1292 * Intermediate values are in Hz.
1293 * Divide by MHz to match bsepc
1295 params->dco_integer = div_u64(dco_freq, 24 * MHz(1));
1296 params->dco_fraction =
1297 div_u64((div_u64(dco_freq, 24) -
1298 params->dco_integer * MHz(1)) * 0x8000, MHz(1));
1302 skl_ddi_calculate_wrpll(int clock /* in Hz */,
1303 struct skl_wrpll_params *wrpll_params)
1305 u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
1306 u64 dco_central_freq[3] = { 8400000000ULL,
1309 static const int even_dividers[] = { 4, 6, 8, 10, 12, 14, 16, 18, 20,
1310 24, 28, 30, 32, 36, 40, 42, 44,
1311 48, 52, 54, 56, 60, 64, 66, 68,
1312 70, 72, 76, 78, 80, 84, 88, 90,
1314 static const int odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
1315 static const struct {
1319 { even_dividers, ARRAY_SIZE(even_dividers) },
1320 { odd_dividers, ARRAY_SIZE(odd_dividers) },
1322 struct skl_wrpll_context ctx;
1323 unsigned int dco, d, i;
1324 unsigned int p0, p1, p2;
1326 skl_wrpll_context_init(&ctx);
1328 for (d = 0; d < ARRAY_SIZE(dividers); d++) {
1329 for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
1330 for (i = 0; i < dividers[d].n_dividers; i++) {
1331 unsigned int p = dividers[d].list[i];
1332 u64 dco_freq = p * afe_clock;
1334 skl_wrpll_try_divider(&ctx,
1335 dco_central_freq[dco],
1339 * Skip the remaining dividers if we're sure to
1340 * have found the definitive divider, we can't
1341 * improve a 0 deviation.
1343 if (ctx.min_deviation == 0)
1344 goto skip_remaining_dividers;
1348 skip_remaining_dividers:
1350 * If a solution is found with an even divider, prefer
1353 if (d == 0 && ctx.p)
1358 DRM_DEBUG_DRIVER("No valid divider found for %dHz\n", clock);
1363 * gcc incorrectly analyses that these can be used without being
1364 * initialized. To be fair, it's hard to guess.
1367 skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
1368 skl_wrpll_params_populate(wrpll_params, afe_clock, ctx.central_freq,
1374 static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
1376 u32 ctrl1, cfgcr1, cfgcr2;
1377 struct skl_wrpll_params wrpll_params = { 0, };
1380 * See comment in intel_dpll_hw_state to understand why we always use 0
1381 * as the DPLL id in this function.
1383 ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1385 ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
1387 if (!skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
1391 cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
1392 DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
1393 wrpll_params.dco_integer;
1395 cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
1396 DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
1397 DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
1398 DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
1399 wrpll_params.central_freq;
1401 memset(&crtc_state->dpll_hw_state, 0,
1402 sizeof(crtc_state->dpll_hw_state));
1404 crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1405 crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
1406 crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
1411 skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1416 * See comment in intel_dpll_hw_state to understand why we always use 0
1417 * as the DPLL id in this function.
1419 ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1420 switch (crtc_state->port_clock / 2) {
1422 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
1425 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
1428 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
1432 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
1435 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
1438 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
1442 memset(&crtc_state->dpll_hw_state, 0,
1443 sizeof(crtc_state->dpll_hw_state));
1445 crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1450 static bool skl_get_dpll(struct intel_atomic_state *state,
1451 struct intel_crtc *crtc,
1452 struct intel_encoder *encoder)
1454 struct intel_crtc_state *crtc_state =
1455 intel_atomic_get_new_crtc_state(state, crtc);
1456 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
1457 struct intel_shared_dpll *pll;
1460 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
1461 bret = skl_ddi_hdmi_pll_dividers(crtc_state);
1463 drm_dbg_kms(&i915->drm,
1464 "Could not get HDMI pll dividers.\n");
1467 } else if (intel_crtc_has_dp_encoder(crtc_state)) {
1468 bret = skl_ddi_dp_set_dpll_hw_state(crtc_state);
1470 drm_dbg_kms(&i915->drm,
1471 "Could not set DP dpll HW state.\n");
1478 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
1479 pll = intel_find_shared_dpll(state, crtc,
1480 &crtc_state->dpll_hw_state,
1481 BIT(DPLL_ID_SKL_DPLL0));
1483 pll = intel_find_shared_dpll(state, crtc,
1484 &crtc_state->dpll_hw_state,
1485 BIT(DPLL_ID_SKL_DPLL3) |
1486 BIT(DPLL_ID_SKL_DPLL2) |
1487 BIT(DPLL_ID_SKL_DPLL1));
1491 intel_reference_shared_dpll(state, crtc,
1492 pll, &crtc_state->dpll_hw_state);
1494 crtc_state->shared_dpll = pll;
1499 static void skl_dump_hw_state(struct drm_i915_private *dev_priv,
1500 const struct intel_dpll_hw_state *hw_state)
1502 drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: "
1503 "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
1509 static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
1510 .enable = skl_ddi_pll_enable,
1511 .disable = skl_ddi_pll_disable,
1512 .get_hw_state = skl_ddi_pll_get_hw_state,
1515 static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
1516 .enable = skl_ddi_dpll0_enable,
1517 .disable = skl_ddi_dpll0_disable,
1518 .get_hw_state = skl_ddi_dpll0_get_hw_state,
1521 static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
1522 struct intel_shared_dpll *pll)
1525 enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1527 enum dpio_channel ch;
1529 bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
1531 /* Non-SSC reference */
1532 temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1533 temp |= PORT_PLL_REF_SEL;
1534 intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1536 if (IS_GEMINILAKE(dev_priv)) {
1537 temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1538 temp |= PORT_PLL_POWER_ENABLE;
1539 intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1541 if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
1542 PORT_PLL_POWER_STATE), 200))
1543 drm_err(&dev_priv->drm,
1544 "Power state not set for PLL:%d\n", port);
1547 /* Disable 10 bit clock */
1548 temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
1549 temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1550 intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1553 temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch));
1554 temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK);
1555 temp |= pll->state.hw_state.ebb0;
1556 intel_de_write(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch), temp);
1558 /* Write M2 integer */
1559 temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0));
1560 temp &= ~PORT_PLL_M2_MASK;
1561 temp |= pll->state.hw_state.pll0;
1562 intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 0), temp);
1565 temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1));
1566 temp &= ~PORT_PLL_N_MASK;
1567 temp |= pll->state.hw_state.pll1;
1568 intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 1), temp);
1570 /* Write M2 fraction */
1571 temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2));
1572 temp &= ~PORT_PLL_M2_FRAC_MASK;
1573 temp |= pll->state.hw_state.pll2;
1574 intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 2), temp);
1576 /* Write M2 fraction enable */
1577 temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3));
1578 temp &= ~PORT_PLL_M2_FRAC_ENABLE;
1579 temp |= pll->state.hw_state.pll3;
1580 intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 3), temp);
1583 temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
1584 temp &= ~PORT_PLL_PROP_COEFF_MASK;
1585 temp &= ~PORT_PLL_INT_COEFF_MASK;
1586 temp &= ~PORT_PLL_GAIN_CTL_MASK;
1587 temp |= pll->state.hw_state.pll6;
1588 intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 6), temp);
1590 /* Write calibration val */
1591 temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8));
1592 temp &= ~PORT_PLL_TARGET_CNT_MASK;
1593 temp |= pll->state.hw_state.pll8;
1594 intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 8), temp);
1596 temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9));
1597 temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK;
1598 temp |= pll->state.hw_state.pll9;
1599 intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 9), temp);
1601 temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
1602 temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
1603 temp &= ~PORT_PLL_DCO_AMP_MASK;
1604 temp |= pll->state.hw_state.pll10;
1605 intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 10), temp);
1607 /* Recalibrate with new settings */
1608 temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
1609 temp |= PORT_PLL_RECALIBRATE;
1610 intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1611 temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1612 temp |= pll->state.hw_state.ebb4;
1613 intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1616 temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1617 temp |= PORT_PLL_ENABLE;
1618 intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1619 intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1621 if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
1623 drm_err(&dev_priv->drm, "PLL %d not locked\n", port);
1625 if (IS_GEMINILAKE(dev_priv)) {
1626 temp = intel_de_read(dev_priv, BXT_PORT_TX_DW5_LN0(phy, ch));
1627 temp |= DCC_DELAY_RANGE_2;
1628 intel_de_write(dev_priv, BXT_PORT_TX_DW5_GRP(phy, ch), temp);
1632 * While we write to the group register to program all lanes at once we
1633 * can read only lane registers and we pick lanes 0/1 for that.
1635 temp = intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN01(phy, ch));
1636 temp &= ~LANE_STAGGER_MASK;
1637 temp &= ~LANESTAGGER_STRAP_OVRD;
1638 temp |= pll->state.hw_state.pcsdw12;
1639 intel_de_write(dev_priv, BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
1642 static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
1643 struct intel_shared_dpll *pll)
1645 enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1648 temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1649 temp &= ~PORT_PLL_ENABLE;
1650 intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1651 intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1653 if (IS_GEMINILAKE(dev_priv)) {
1654 temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1655 temp &= ~PORT_PLL_POWER_ENABLE;
1656 intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1658 if (wait_for_us(!(intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
1659 PORT_PLL_POWER_STATE), 200))
1660 drm_err(&dev_priv->drm,
1661 "Power state not reset for PLL:%d\n", port);
1665 static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
1666 struct intel_shared_dpll *pll,
1667 struct intel_dpll_hw_state *hw_state)
1669 enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1670 intel_wakeref_t wakeref;
1672 enum dpio_channel ch;
1676 bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
1678 wakeref = intel_display_power_get_if_enabled(dev_priv,
1679 POWER_DOMAIN_DISPLAY_CORE);
1685 val = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1686 if (!(val & PORT_PLL_ENABLE))
1689 hw_state->ebb0 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch));
1690 hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
1692 hw_state->ebb4 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
1693 hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
1695 hw_state->pll0 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0));
1696 hw_state->pll0 &= PORT_PLL_M2_MASK;
1698 hw_state->pll1 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1));
1699 hw_state->pll1 &= PORT_PLL_N_MASK;
1701 hw_state->pll2 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2));
1702 hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
1704 hw_state->pll3 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3));
1705 hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
1707 hw_state->pll6 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
1708 hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
1709 PORT_PLL_INT_COEFF_MASK |
1710 PORT_PLL_GAIN_CTL_MASK;
1712 hw_state->pll8 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8));
1713 hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
1715 hw_state->pll9 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9));
1716 hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
1718 hw_state->pll10 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
1719 hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
1720 PORT_PLL_DCO_AMP_MASK;
1723 * While we write to the group register to program all lanes at once we
1724 * can read only lane registers. We configure all lanes the same way, so
1725 * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
1727 hw_state->pcsdw12 = intel_de_read(dev_priv,
1728 BXT_PORT_PCS_DW12_LN01(phy, ch));
1729 if (intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
1730 drm_dbg(&dev_priv->drm,
1731 "lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
1733 intel_de_read(dev_priv,
1734 BXT_PORT_PCS_DW12_LN23(phy, ch)));
1735 hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
1740 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1745 /* bxt clock parameters */
1746 struct bxt_clk_div {
1758 /* pre-calculated values for DP linkrates */
1759 static const struct bxt_clk_div bxt_dp_clk_val[] = {
1760 {162000, 4, 2, 32, 1677722, 1, 1},
1761 {270000, 4, 1, 27, 0, 0, 1},
1762 {540000, 2, 1, 27, 0, 0, 1},
1763 {216000, 3, 2, 32, 1677722, 1, 1},
1764 {243000, 4, 1, 24, 1258291, 1, 1},
1765 {324000, 4, 1, 32, 1677722, 1, 1},
1766 {432000, 3, 1, 32, 1677722, 1, 1}
1770 bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
1771 struct bxt_clk_div *clk_div)
1773 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1774 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1775 struct dpll best_clock;
1777 /* Calculate HDMI div */
1779 * FIXME: tie the following calculation into
1780 * i9xx_crtc_compute_clock
1782 if (!bxt_find_best_dpll(crtc_state, &best_clock)) {
1783 drm_dbg(&i915->drm, "no PLL dividers found for clock %d pipe %c\n",
1784 crtc_state->port_clock,
1785 pipe_name(crtc->pipe));
1789 clk_div->p1 = best_clock.p1;
1790 clk_div->p2 = best_clock.p2;
1791 WARN_ON(best_clock.m1 != 2);
1792 clk_div->n = best_clock.n;
1793 clk_div->m2_int = best_clock.m2 >> 22;
1794 clk_div->m2_frac = best_clock.m2 & ((1 << 22) - 1);
1795 clk_div->m2_frac_en = clk_div->m2_frac != 0;
1797 clk_div->vco = best_clock.vco;
1802 static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
1803 struct bxt_clk_div *clk_div)
1805 int clock = crtc_state->port_clock;
1808 *clk_div = bxt_dp_clk_val[0];
1809 for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
1810 if (bxt_dp_clk_val[i].clock == clock) {
1811 *clk_div = bxt_dp_clk_val[i];
1816 clk_div->vco = clock * 10 / 2 * clk_div->p1 * clk_div->p2;
1819 static bool bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
1820 const struct bxt_clk_div *clk_div)
1822 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1823 struct intel_dpll_hw_state *dpll_hw_state = &crtc_state->dpll_hw_state;
1824 int clock = crtc_state->port_clock;
1825 int vco = clk_div->vco;
1826 u32 prop_coef, int_coef, gain_ctl, targ_cnt;
1829 memset(dpll_hw_state, 0, sizeof(*dpll_hw_state));
1831 if (vco >= 6200000 && vco <= 6700000) {
1836 } else if ((vco > 5400000 && vco < 6200000) ||
1837 (vco >= 4800000 && vco < 5400000)) {
1842 } else if (vco == 5400000) {
1848 drm_err(&i915->drm, "Invalid VCO\n");
1854 else if (clock > 135000)
1856 else if (clock > 67000)
1858 else if (clock > 33000)
1863 dpll_hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
1864 dpll_hw_state->pll0 = clk_div->m2_int;
1865 dpll_hw_state->pll1 = PORT_PLL_N(clk_div->n);
1866 dpll_hw_state->pll2 = clk_div->m2_frac;
1868 if (clk_div->m2_frac_en)
1869 dpll_hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
1871 dpll_hw_state->pll6 = prop_coef | PORT_PLL_INT_COEFF(int_coef);
1872 dpll_hw_state->pll6 |= PORT_PLL_GAIN_CTL(gain_ctl);
1874 dpll_hw_state->pll8 = targ_cnt;
1876 dpll_hw_state->pll9 = 5 << PORT_PLL_LOCK_THRESHOLD_SHIFT;
1878 dpll_hw_state->pll10 =
1879 PORT_PLL_DCO_AMP(PORT_PLL_DCO_AMP_DEFAULT)
1880 | PORT_PLL_DCO_AMP_OVR_EN_H;
1882 dpll_hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
1884 dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
1890 bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1892 struct bxt_clk_div clk_div = {};
1894 bxt_ddi_dp_pll_dividers(crtc_state, &clk_div);
1896 return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
1900 bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1902 struct bxt_clk_div clk_div = {};
1904 bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div);
1906 return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
1909 static bool bxt_get_dpll(struct intel_atomic_state *state,
1910 struct intel_crtc *crtc,
1911 struct intel_encoder *encoder)
1913 struct intel_crtc_state *crtc_state =
1914 intel_atomic_get_new_crtc_state(state, crtc);
1915 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1916 struct intel_shared_dpll *pll;
1917 enum intel_dpll_id id;
1919 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) &&
1920 !bxt_ddi_hdmi_set_dpll_hw_state(crtc_state))
1923 if (intel_crtc_has_dp_encoder(crtc_state) &&
1924 !bxt_ddi_dp_set_dpll_hw_state(crtc_state))
1927 /* 1:1 mapping between ports and PLLs */
1928 id = (enum intel_dpll_id) encoder->port;
1929 pll = intel_get_shared_dpll_by_id(dev_priv, id);
1931 drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] using pre-allocated %s\n",
1932 crtc->base.base.id, crtc->base.name, pll->info->name);
1934 intel_reference_shared_dpll(state, crtc,
1935 pll, &crtc_state->dpll_hw_state);
1937 crtc_state->shared_dpll = pll;
1942 static void bxt_dump_hw_state(struct drm_i915_private *dev_priv,
1943 const struct intel_dpll_hw_state *hw_state)
1945 drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
1946 "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
1947 "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
1961 static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
1962 .enable = bxt_ddi_pll_enable,
1963 .disable = bxt_ddi_pll_disable,
1964 .get_hw_state = bxt_ddi_pll_get_hw_state,
1967 struct intel_dpll_mgr {
1968 const struct dpll_info *dpll_info;
1970 bool (*get_dplls)(struct intel_atomic_state *state,
1971 struct intel_crtc *crtc,
1972 struct intel_encoder *encoder);
1973 void (*put_dplls)(struct intel_atomic_state *state,
1974 struct intel_crtc *crtc);
1975 void (*update_active_dpll)(struct intel_atomic_state *state,
1976 struct intel_crtc *crtc,
1977 struct intel_encoder *encoder);
1978 void (*dump_hw_state)(struct drm_i915_private *dev_priv,
1979 const struct intel_dpll_hw_state *hw_state);
1982 static const struct dpll_info pch_plls[] = {
1983 { "PCH DPLL A", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_A, 0 },
1984 { "PCH DPLL B", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_B, 0 },
1988 static const struct intel_dpll_mgr pch_pll_mgr = {
1989 .dpll_info = pch_plls,
1990 .get_dplls = ibx_get_dpll,
1991 .put_dplls = intel_put_dpll,
1992 .dump_hw_state = ibx_dump_hw_state,
1995 static const struct dpll_info hsw_plls[] = {
1996 { "WRPLL 1", &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL1, 0 },
1997 { "WRPLL 2", &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL2, 0 },
1998 { "SPLL", &hsw_ddi_spll_funcs, DPLL_ID_SPLL, 0 },
1999 { "LCPLL 810", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_810, INTEL_DPLL_ALWAYS_ON },
2000 { "LCPLL 1350", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_1350, INTEL_DPLL_ALWAYS_ON },
2001 { "LCPLL 2700", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_2700, INTEL_DPLL_ALWAYS_ON },
2005 static const struct intel_dpll_mgr hsw_pll_mgr = {
2006 .dpll_info = hsw_plls,
2007 .get_dplls = hsw_get_dpll,
2008 .put_dplls = intel_put_dpll,
2009 .dump_hw_state = hsw_dump_hw_state,
2012 static const struct dpll_info skl_plls[] = {
2013 { "DPLL 0", &skl_ddi_dpll0_funcs, DPLL_ID_SKL_DPLL0, INTEL_DPLL_ALWAYS_ON },
2014 { "DPLL 1", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
2015 { "DPLL 2", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
2016 { "DPLL 3", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL3, 0 },
2020 static const struct intel_dpll_mgr skl_pll_mgr = {
2021 .dpll_info = skl_plls,
2022 .get_dplls = skl_get_dpll,
2023 .put_dplls = intel_put_dpll,
2024 .dump_hw_state = skl_dump_hw_state,
2027 static const struct dpll_info bxt_plls[] = {
2028 { "PORT PLL A", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
2029 { "PORT PLL B", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
2030 { "PORT PLL C", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
2034 static const struct intel_dpll_mgr bxt_pll_mgr = {
2035 .dpll_info = bxt_plls,
2036 .get_dplls = bxt_get_dpll,
2037 .put_dplls = intel_put_dpll,
2038 .dump_hw_state = bxt_dump_hw_state,
2041 static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
2042 struct intel_shared_dpll *pll)
2044 const enum intel_dpll_id id = pll->info->id;
2047 /* 1. Enable DPLL power in DPLL_ENABLE. */
2048 val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
2049 val |= PLL_POWER_ENABLE;
2050 intel_de_write(dev_priv, CNL_DPLL_ENABLE(id), val);
2052 /* 2. Wait for DPLL power state enabled in DPLL_ENABLE. */
2053 if (intel_de_wait_for_set(dev_priv, CNL_DPLL_ENABLE(id),
2054 PLL_POWER_STATE, 5))
2055 drm_err(&dev_priv->drm, "PLL %d Power not enabled\n", id);
2058 * 3. Configure DPLL_CFGCR0 to set SSC enable/disable,
2059 * select DP mode, and set DP link rate.
2061 val = pll->state.hw_state.cfgcr0;
2062 intel_de_write(dev_priv, CNL_DPLL_CFGCR0(id), val);
2064 /* 4. Reab back to ensure writes completed */
2065 intel_de_posting_read(dev_priv, CNL_DPLL_CFGCR0(id));
2067 /* 3. Configure DPLL_CFGCR0 */
2068 /* Avoid touch CFGCR1 if HDMI mode is not enabled */
2069 if (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_HDMI_MODE) {
2070 val = pll->state.hw_state.cfgcr1;
2071 intel_de_write(dev_priv, CNL_DPLL_CFGCR1(id), val);
2072 /* 4. Reab back to ensure writes completed */
2073 intel_de_posting_read(dev_priv, CNL_DPLL_CFGCR1(id));
2077 * 5. If the frequency will result in a change to the voltage
2078 * requirement, follow the Display Voltage Frequency Switching
2079 * Sequence Before Frequency Change
2081 * Note: DVFS is actually handled via the cdclk code paths,
2082 * hence we do nothing here.
2085 /* 6. Enable DPLL in DPLL_ENABLE. */
2086 val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
2088 intel_de_write(dev_priv, CNL_DPLL_ENABLE(id), val);
2090 /* 7. Wait for PLL lock status in DPLL_ENABLE. */
2091 if (intel_de_wait_for_set(dev_priv, CNL_DPLL_ENABLE(id), PLL_LOCK, 5))
2092 drm_err(&dev_priv->drm, "PLL %d not locked\n", id);
2095 * 8. If the frequency will result in a change to the voltage
2096 * requirement, follow the Display Voltage Frequency Switching
2097 * Sequence After Frequency Change
2099 * Note: DVFS is actually handled via the cdclk code paths,
2100 * hence we do nothing here.
2104 * 9. turn on the clock for the DDI and map the DPLL to the DDI
2105 * Done at intel_ddi_clk_select
2109 static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv,
2110 struct intel_shared_dpll *pll)
2112 const enum intel_dpll_id id = pll->info->id;
2116 * 1. Configure DPCLKA_CFGCR0 to turn off the clock for the DDI.
2117 * Done at intel_ddi_post_disable
2121 * 2. If the frequency will result in a change to the voltage
2122 * requirement, follow the Display Voltage Frequency Switching
2123 * Sequence Before Frequency Change
2125 * Note: DVFS is actually handled via the cdclk code paths,
2126 * hence we do nothing here.
2129 /* 3. Disable DPLL through DPLL_ENABLE. */
2130 val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
2132 intel_de_write(dev_priv, CNL_DPLL_ENABLE(id), val);
2134 /* 4. Wait for PLL not locked status in DPLL_ENABLE. */
2135 if (intel_de_wait_for_clear(dev_priv, CNL_DPLL_ENABLE(id), PLL_LOCK, 5))
2136 drm_err(&dev_priv->drm, "PLL %d locked\n", id);
2139 * 5. If the frequency will result in a change to the voltage
2140 * requirement, follow the Display Voltage Frequency Switching
2141 * Sequence After Frequency Change
2143 * Note: DVFS is actually handled via the cdclk code paths,
2144 * hence we do nothing here.
2147 /* 6. Disable DPLL power in DPLL_ENABLE. */
2148 val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
2149 val &= ~PLL_POWER_ENABLE;
2150 intel_de_write(dev_priv, CNL_DPLL_ENABLE(id), val);
2152 /* 7. Wait for DPLL power state disabled in DPLL_ENABLE. */
2153 if (intel_de_wait_for_clear(dev_priv, CNL_DPLL_ENABLE(id),
2154 PLL_POWER_STATE, 5))
2155 drm_err(&dev_priv->drm, "PLL %d Power not disabled\n", id);
2158 static bool cnl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
2159 struct intel_shared_dpll *pll,
2160 struct intel_dpll_hw_state *hw_state)
2162 const enum intel_dpll_id id = pll->info->id;
2163 intel_wakeref_t wakeref;
2167 wakeref = intel_display_power_get_if_enabled(dev_priv,
2168 POWER_DOMAIN_DISPLAY_CORE);
2174 val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
2175 if (!(val & PLL_ENABLE))
2178 val = intel_de_read(dev_priv, CNL_DPLL_CFGCR0(id));
2179 hw_state->cfgcr0 = val;
2181 /* avoid reading back stale values if HDMI mode is not enabled */
2182 if (val & DPLL_CFGCR0_HDMI_MODE) {
2183 hw_state->cfgcr1 = intel_de_read(dev_priv,
2184 CNL_DPLL_CFGCR1(id));
2189 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
2194 static void cnl_wrpll_get_multipliers(int bestdiv, int *pdiv,
2195 int *qdiv, int *kdiv)
2198 if (bestdiv % 2 == 0) {
2203 } else if (bestdiv % 4 == 0) {
2205 *qdiv = bestdiv / 4;
2207 } else if (bestdiv % 6 == 0) {
2209 *qdiv = bestdiv / 6;
2211 } else if (bestdiv % 5 == 0) {
2213 *qdiv = bestdiv / 10;
2215 } else if (bestdiv % 14 == 0) {
2217 *qdiv = bestdiv / 14;
2221 if (bestdiv == 3 || bestdiv == 5 || bestdiv == 7) {
2225 } else { /* 9, 15, 21 */
2226 *pdiv = bestdiv / 3;
2233 static void cnl_wrpll_params_populate(struct skl_wrpll_params *params,
2234 u32 dco_freq, u32 ref_freq,
2235 int pdiv, int qdiv, int kdiv)
2250 WARN(1, "Incorrect KDiv\n");
2267 WARN(1, "Incorrect PDiv\n");
2270 WARN_ON(kdiv != 2 && qdiv != 1);
2272 params->qdiv_ratio = qdiv;
2273 params->qdiv_mode = (qdiv == 1) ? 0 : 1;
2275 dco = div_u64((u64)dco_freq << 15, ref_freq);
2277 params->dco_integer = dco >> 15;
2278 params->dco_fraction = dco & 0x7fff;
2281 int cnl_hdmi_pll_ref_clock(struct drm_i915_private *dev_priv)
2283 int ref_clock = dev_priv->cdclk.hw.ref;
2286 * For ICL+, the spec states: if reference frequency is 38.4,
2287 * use 19.2 because the DPLL automatically divides that by 2.
2289 if (INTEL_GEN(dev_priv) >= 11 && ref_clock == 38400)
2296 cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state,
2297 struct skl_wrpll_params *wrpll_params)
2299 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2300 u32 afe_clock = crtc_state->port_clock * 5;
2302 u32 dco_min = 7998000;
2303 u32 dco_max = 10000000;
2304 u32 dco_mid = (dco_min + dco_max) / 2;
2305 static const int dividers[] = { 2, 4, 6, 8, 10, 12, 14, 16,
2306 18, 20, 24, 28, 30, 32, 36, 40,
2307 42, 44, 48, 50, 52, 54, 56, 60,
2308 64, 66, 68, 70, 72, 76, 78, 80,
2309 84, 88, 90, 92, 96, 98, 100, 102,
2310 3, 5, 7, 9, 15, 21 };
2311 u32 dco, best_dco = 0, dco_centrality = 0;
2312 u32 best_dco_centrality = U32_MAX; /* Spec meaning of 999999 MHz */
2313 int d, best_div = 0, pdiv = 0, qdiv = 0, kdiv = 0;
2315 for (d = 0; d < ARRAY_SIZE(dividers); d++) {
2316 dco = afe_clock * dividers[d];
2318 if ((dco <= dco_max) && (dco >= dco_min)) {
2319 dco_centrality = abs(dco - dco_mid);
2321 if (dco_centrality < best_dco_centrality) {
2322 best_dco_centrality = dco_centrality;
2323 best_div = dividers[d];
2332 cnl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
2334 ref_clock = cnl_hdmi_pll_ref_clock(dev_priv);
2336 cnl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
2342 static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
2345 struct skl_wrpll_params wrpll_params = { 0, };
2347 cfgcr0 = DPLL_CFGCR0_HDMI_MODE;
2349 if (!cnl_ddi_calculate_wrpll(crtc_state, &wrpll_params))
2352 cfgcr0 |= DPLL_CFGCR0_DCO_FRACTION(wrpll_params.dco_fraction) |
2353 wrpll_params.dco_integer;
2355 cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(wrpll_params.qdiv_ratio) |
2356 DPLL_CFGCR1_QDIV_MODE(wrpll_params.qdiv_mode) |
2357 DPLL_CFGCR1_KDIV(wrpll_params.kdiv) |
2358 DPLL_CFGCR1_PDIV(wrpll_params.pdiv) |
2359 DPLL_CFGCR1_CENTRAL_FREQ;
2361 memset(&crtc_state->dpll_hw_state, 0,
2362 sizeof(crtc_state->dpll_hw_state));
2364 crtc_state->dpll_hw_state.cfgcr0 = cfgcr0;
2365 crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
2370 cnl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2374 cfgcr0 = DPLL_CFGCR0_SSC_ENABLE;
2376 switch (crtc_state->port_clock / 2) {
2378 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_810;
2381 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1350;
2384 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_2700;
2388 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1620;
2391 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1080;
2394 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_2160;
2397 /* Some SKUs may require elevated I/O voltage to support this */
2398 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_3240;
2401 /* Some SKUs may require elevated I/O voltage to support this */
2402 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_4050;
2406 memset(&crtc_state->dpll_hw_state, 0,
2407 sizeof(crtc_state->dpll_hw_state));
2409 crtc_state->dpll_hw_state.cfgcr0 = cfgcr0;
2414 static bool cnl_get_dpll(struct intel_atomic_state *state,
2415 struct intel_crtc *crtc,
2416 struct intel_encoder *encoder)
2418 struct intel_crtc_state *crtc_state =
2419 intel_atomic_get_new_crtc_state(state, crtc);
2420 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2421 struct intel_shared_dpll *pll;
2424 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
2425 bret = cnl_ddi_hdmi_pll_dividers(crtc_state);
2427 drm_dbg_kms(&i915->drm,
2428 "Could not get HDMI pll dividers.\n");
2431 } else if (intel_crtc_has_dp_encoder(crtc_state)) {
2432 bret = cnl_ddi_dp_set_dpll_hw_state(crtc_state);
2434 drm_dbg_kms(&i915->drm,
2435 "Could not set DP dpll HW state.\n");
2439 drm_dbg_kms(&i915->drm,
2440 "Skip DPLL setup for output_types 0x%x\n",
2441 crtc_state->output_types);
2445 pll = intel_find_shared_dpll(state, crtc,
2446 &crtc_state->dpll_hw_state,
2447 BIT(DPLL_ID_SKL_DPLL2) |
2448 BIT(DPLL_ID_SKL_DPLL1) |
2449 BIT(DPLL_ID_SKL_DPLL0));
2451 drm_dbg_kms(&i915->drm, "No PLL selected\n");
2455 intel_reference_shared_dpll(state, crtc,
2456 pll, &crtc_state->dpll_hw_state);
2458 crtc_state->shared_dpll = pll;
2463 static void cnl_dump_hw_state(struct drm_i915_private *dev_priv,
2464 const struct intel_dpll_hw_state *hw_state)
2466 drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: "
2467 "cfgcr0: 0x%x, cfgcr1: 0x%x\n",
2472 static const struct intel_shared_dpll_funcs cnl_ddi_pll_funcs = {
2473 .enable = cnl_ddi_pll_enable,
2474 .disable = cnl_ddi_pll_disable,
2475 .get_hw_state = cnl_ddi_pll_get_hw_state,
2478 static const struct dpll_info cnl_plls[] = {
2479 { "DPLL 0", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
2480 { "DPLL 1", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
2481 { "DPLL 2", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
2485 static const struct intel_dpll_mgr cnl_pll_mgr = {
2486 .dpll_info = cnl_plls,
2487 .get_dplls = cnl_get_dpll,
2488 .put_dplls = intel_put_dpll,
2489 .dump_hw_state = cnl_dump_hw_state,
2492 struct icl_combo_pll_params {
2494 struct skl_wrpll_params wrpll;
2498 * These values alrea already adjusted: they're the bits we write to the
2499 * registers, not the logical values.
2501 static const struct icl_combo_pll_params icl_dp_combo_pll_24MHz_values[] = {
2503 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [0]: 5.4 */
2504 .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2506 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [1]: 2.7 */
2507 .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2509 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [2]: 1.62 */
2510 .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2512 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [3]: 3.24 */
2513 .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2515 { .dco_integer = 0x168, .dco_fraction = 0x0000, /* [4]: 2.16 */
2516 .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2518 { .dco_integer = 0x168, .dco_fraction = 0x0000, /* [5]: 4.32 */
2519 .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2521 { .dco_integer = 0x195, .dco_fraction = 0x0000, /* [6]: 6.48 */
2522 .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2524 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [7]: 8.1 */
2525 .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2529 /* Also used for 38.4 MHz values. */
2530 static const struct icl_combo_pll_params icl_dp_combo_pll_19_2MHz_values[] = {
2532 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [0]: 5.4 */
2533 .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2535 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [1]: 2.7 */
2536 .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2538 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [2]: 1.62 */
2539 .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2541 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [3]: 3.24 */
2542 .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2544 { .dco_integer = 0x1C2, .dco_fraction = 0x0000, /* [4]: 2.16 */
2545 .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2547 { .dco_integer = 0x1C2, .dco_fraction = 0x0000, /* [5]: 4.32 */
2548 .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2550 { .dco_integer = 0x1FA, .dco_fraction = 0x2000, /* [6]: 6.48 */
2551 .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2553 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [7]: 8.1 */
2554 .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2557 static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = {
2558 .dco_integer = 0x151, .dco_fraction = 0x4000,
2559 .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2562 static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
2563 .dco_integer = 0x1A5, .dco_fraction = 0x7000,
2564 .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2567 static const struct skl_wrpll_params tgl_tbt_pll_19_2MHz_values = {
2568 .dco_integer = 0x54, .dco_fraction = 0x3000,
2569 /* the following params are unused */
2570 .pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
2573 static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = {
2574 .dco_integer = 0x43, .dco_fraction = 0x4000,
2575 /* the following params are unused */
2576 .pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
2579 static bool icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
2580 struct skl_wrpll_params *pll_params)
2582 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2583 const struct icl_combo_pll_params *params =
2584 dev_priv->cdclk.hw.ref == 24000 ?
2585 icl_dp_combo_pll_24MHz_values :
2586 icl_dp_combo_pll_19_2MHz_values;
2587 int clock = crtc_state->port_clock;
2590 for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) {
2591 if (clock == params[i].clock) {
2592 *pll_params = params[i].wrpll;
2597 MISSING_CASE(clock);
2601 static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
2602 struct skl_wrpll_params *pll_params)
2604 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2606 if (INTEL_GEN(dev_priv) >= 12) {
2607 switch (dev_priv->cdclk.hw.ref) {
2609 MISSING_CASE(dev_priv->cdclk.hw.ref);
2613 *pll_params = tgl_tbt_pll_19_2MHz_values;
2616 *pll_params = tgl_tbt_pll_24MHz_values;
2620 switch (dev_priv->cdclk.hw.ref) {
2622 MISSING_CASE(dev_priv->cdclk.hw.ref);
2626 *pll_params = icl_tbt_pll_19_2MHz_values;
2629 *pll_params = icl_tbt_pll_24MHz_values;
2637 static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
2638 struct intel_encoder *encoder,
2639 struct intel_dpll_hw_state *pll_state)
2641 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2643 struct skl_wrpll_params pll_params = { 0 };
2646 if (intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv,
2648 ret = icl_calc_tbt_pll(crtc_state, &pll_params);
2649 else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
2650 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
2651 ret = cnl_ddi_calculate_wrpll(crtc_state, &pll_params);
2653 ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
2658 cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(pll_params.dco_fraction) |
2659 pll_params.dco_integer;
2661 cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params.qdiv_ratio) |
2662 DPLL_CFGCR1_QDIV_MODE(pll_params.qdiv_mode) |
2663 DPLL_CFGCR1_KDIV(pll_params.kdiv) |
2664 DPLL_CFGCR1_PDIV(pll_params.pdiv);
2666 if (INTEL_GEN(dev_priv) >= 12)
2667 cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
2669 cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
2671 memset(pll_state, 0, sizeof(*pll_state));
2673 pll_state->cfgcr0 = cfgcr0;
2674 pll_state->cfgcr1 = cfgcr1;
2680 static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
2682 return id - DPLL_ID_ICL_MGPLL1;
2685 enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
2687 return tc_port + DPLL_ID_ICL_MGPLL1;
2690 static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
2691 u32 *target_dco_khz,
2692 struct intel_dpll_hw_state *state,
2695 u32 dco_min_freq, dco_max_freq;
2696 int div1_vals[] = {7, 5, 3, 2};
2700 dco_min_freq = is_dp ? 8100000 : use_ssc ? 8000000 : 7992000;
2701 dco_max_freq = is_dp ? 8100000 : 10000000;
2703 for (i = 0; i < ARRAY_SIZE(div1_vals); i++) {
2704 int div1 = div1_vals[i];
2706 for (div2 = 10; div2 > 0; div2--) {
2707 int dco = div1 * div2 * clock_khz * 5;
2708 int a_divratio, tlinedrv, inputsel;
2711 if (dco < dco_min_freq || dco > dco_max_freq)
2716 * Note: a_divratio not matching TGL BSpec
2717 * algorithm but matching hardcoded values and
2718 * working on HW for DP alt-mode at least
2720 a_divratio = is_dp ? 10 : 5;
2721 tlinedrv = is_dkl ? 1 : 2;
2726 inputsel = is_dp ? 0 : 1;
2733 hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2;
2736 hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3;
2739 hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5;
2742 hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7;
2746 *target_dco_khz = dco;
2748 state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1);
2750 state->mg_clktop2_coreclkctl1 =
2751 MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(a_divratio);
2753 state->mg_clktop2_hsclkctl =
2754 MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) |
2755 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) |
2757 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2);
2767 * The specification for this function uses real numbers, so the math had to be
2768 * adapted to integer-only calculation, that's why it looks so different.
2770 static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
2771 struct intel_dpll_hw_state *pll_state)
2773 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2774 int refclk_khz = dev_priv->cdclk.hw.ref;
2775 int clock = crtc_state->port_clock;
2776 u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
2777 u32 iref_ndiv, iref_trim, iref_pulse_w;
2778 u32 prop_coeff, int_coeff;
2779 u32 tdc_targetcnt, feedfwgain;
2780 u64 ssc_stepsize, ssc_steplen, ssc_steplog;
2782 bool use_ssc = false;
2783 bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
2784 bool is_dkl = INTEL_GEN(dev_priv) >= 12;
2786 memset(pll_state, 0, sizeof(*pll_state));
2788 if (!icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
2789 pll_state, is_dkl)) {
2790 drm_dbg_kms(&dev_priv->drm,
2791 "Failed to find divisors for clock %d\n", clock);
2796 m2div_int = dco_khz / (refclk_khz * m1div);
2797 if (m2div_int > 255) {
2800 m2div_int = dco_khz / (refclk_khz * m1div);
2803 if (m2div_int > 255) {
2804 drm_dbg_kms(&dev_priv->drm,
2805 "Failed to find mdiv for clock %d\n",
2810 m2div_rem = dco_khz % (refclk_khz * m1div);
2812 tmp = (u64)m2div_rem * (1 << 22);
2813 do_div(tmp, refclk_khz * m1div);
2816 switch (refclk_khz) {
2833 MISSING_CASE(refclk_khz);
2838 * tdc_res = 0.000003
2839 * tdc_targetcnt = int(2 / (tdc_res * 8 * 50 * 1.1) / refclk_mhz + 0.5)
2841 * The multiplication by 1000 is due to refclk MHz to KHz conversion. It
2842 * was supposed to be a division, but we rearranged the operations of
2843 * the formula to avoid early divisions so we don't multiply the
2846 * 0.000003 * 8 * 50 * 1.1 = 0.00132, also known as 132 / 100000, which
2847 * we also rearrange to work with integers.
2849 * The 0.5 transformed to 5 results in a multiplication by 10 and the
2850 * last division by 10.
2852 tdc_targetcnt = (2 * 1000 * 100000 * 10 / (132 * refclk_khz) + 5) / 10;
2855 * Here we divide dco_khz by 10 in order to allow the dividend to fit in
2856 * 32 bits. That's not a problem since we round the division down
2859 feedfwgain = (use_ssc || m2div_rem > 0) ?
2860 m1div * 1000000 * 100 / (dco_khz * 3 / 10) : 0;
2862 if (dco_khz >= 9000000) {
2871 tmp = mul_u32_u32(dco_khz, 47 * 32);
2872 do_div(tmp, refclk_khz * m1div * 10000);
2875 tmp = mul_u32_u32(dco_khz, 1000);
2876 ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32);
2883 /* write pll_state calculations */
2885 pll_state->mg_pll_div0 = DKL_PLL_DIV0_INTEG_COEFF(int_coeff) |
2886 DKL_PLL_DIV0_PROP_COEFF(prop_coeff) |
2887 DKL_PLL_DIV0_FBPREDIV(m1div) |
2888 DKL_PLL_DIV0_FBDIV_INT(m2div_int);
2890 pll_state->mg_pll_div1 = DKL_PLL_DIV1_IREF_TRIM(iref_trim) |
2891 DKL_PLL_DIV1_TDC_TARGET_CNT(tdc_targetcnt);
2893 pll_state->mg_pll_ssc = DKL_PLL_SSC_IREF_NDIV_RATIO(iref_ndiv) |
2894 DKL_PLL_SSC_STEP_LEN(ssc_steplen) |
2895 DKL_PLL_SSC_STEP_NUM(ssc_steplog) |
2896 (use_ssc ? DKL_PLL_SSC_EN : 0);
2898 pll_state->mg_pll_bias = (m2div_frac ? DKL_PLL_BIAS_FRAC_EN_H : 0) |
2899 DKL_PLL_BIAS_FBDIV_FRAC(m2div_frac);
2901 pll_state->mg_pll_tdc_coldst_bias =
2902 DKL_PLL_TDC_SSC_STEP_SIZE(ssc_stepsize) |
2903 DKL_PLL_TDC_FEED_FWD_GAIN(feedfwgain);
2906 pll_state->mg_pll_div0 =
2907 (m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
2908 MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
2909 MG_PLL_DIV0_FBDIV_INT(m2div_int);
2911 pll_state->mg_pll_div1 =
2912 MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
2913 MG_PLL_DIV1_DITHER_DIV_2 |
2914 MG_PLL_DIV1_NDIVRATIO(1) |
2915 MG_PLL_DIV1_FBPREDIV(m1div);
2917 pll_state->mg_pll_lf =
2918 MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
2919 MG_PLL_LF_AFCCNTSEL_512 |
2920 MG_PLL_LF_GAINCTRL(1) |
2921 MG_PLL_LF_INT_COEFF(int_coeff) |
2922 MG_PLL_LF_PROP_COEFF(prop_coeff);
2924 pll_state->mg_pll_frac_lock =
2925 MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
2926 MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
2927 MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
2928 MG_PLL_FRAC_LOCK_DCODITHEREN |
2929 MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
2930 if (use_ssc || m2div_rem > 0)
2931 pll_state->mg_pll_frac_lock |=
2932 MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
2934 pll_state->mg_pll_ssc =
2935 (use_ssc ? MG_PLL_SSC_EN : 0) |
2936 MG_PLL_SSC_TYPE(2) |
2937 MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
2938 MG_PLL_SSC_STEPNUM(ssc_steplog) |
2940 MG_PLL_SSC_STEPSIZE(ssc_stepsize);
2942 pll_state->mg_pll_tdc_coldst_bias =
2943 MG_PLL_TDC_COLDST_COLDSTART |
2944 MG_PLL_TDC_COLDST_IREFINT_EN |
2945 MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
2946 MG_PLL_TDC_TDCOVCCORR_EN |
2947 MG_PLL_TDC_TDCSEL(3);
2949 pll_state->mg_pll_bias =
2950 MG_PLL_BIAS_BIAS_GB_SEL(3) |
2951 MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
2952 MG_PLL_BIAS_BIAS_BONUS(10) |
2953 MG_PLL_BIAS_BIASCAL_EN |
2954 MG_PLL_BIAS_CTRIM(12) |
2955 MG_PLL_BIAS_VREF_RDAC(4) |
2956 MG_PLL_BIAS_IREFTRIM(iref_trim);
2958 if (refclk_khz == 38400) {
2959 pll_state->mg_pll_tdc_coldst_bias_mask =
2960 MG_PLL_TDC_COLDST_COLDSTART;
2961 pll_state->mg_pll_bias_mask = 0;
2963 pll_state->mg_pll_tdc_coldst_bias_mask = -1U;
2964 pll_state->mg_pll_bias_mask = -1U;
2967 pll_state->mg_pll_tdc_coldst_bias &=
2968 pll_state->mg_pll_tdc_coldst_bias_mask;
2969 pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask;
2976 * icl_set_active_port_dpll - select the active port DPLL for a given CRTC
2977 * @crtc_state: state for the CRTC to select the DPLL for
2978 * @port_dpll_id: the active @port_dpll_id to select
2980 * Select the given @port_dpll_id instance from the DPLLs reserved for the
2983 void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
2984 enum icl_port_dpll_id port_dpll_id)
2986 struct icl_port_dpll *port_dpll =
2987 &crtc_state->icl_port_dplls[port_dpll_id];
2989 crtc_state->shared_dpll = port_dpll->pll;
2990 crtc_state->dpll_hw_state = port_dpll->hw_state;
2993 static void icl_update_active_dpll(struct intel_atomic_state *state,
2994 struct intel_crtc *crtc,
2995 struct intel_encoder *encoder)
2997 struct intel_crtc_state *crtc_state =
2998 intel_atomic_get_new_crtc_state(state, crtc);
2999 struct intel_digital_port *primary_port;
3000 enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
3002 primary_port = encoder->type == INTEL_OUTPUT_DP_MST ?
3003 enc_to_mst(encoder)->primary :
3004 enc_to_dig_port(encoder);
3007 (primary_port->tc_mode == TC_PORT_DP_ALT ||
3008 primary_port->tc_mode == TC_PORT_LEGACY))
3009 port_dpll_id = ICL_PORT_DPLL_MG_PHY;
3011 icl_set_active_port_dpll(crtc_state, port_dpll_id);
3014 static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state,
3015 struct intel_crtc *crtc,
3016 struct intel_encoder *encoder)
3018 struct intel_crtc_state *crtc_state =
3019 intel_atomic_get_new_crtc_state(state, crtc);
3020 struct icl_port_dpll *port_dpll =
3021 &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3022 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3023 enum port port = encoder->port;
3024 unsigned long dpll_mask;
3026 if (!icl_calc_dpll_state(crtc_state, encoder, &port_dpll->hw_state)) {
3027 drm_dbg_kms(&dev_priv->drm,
3028 "Could not calculate combo PHY PLL state.\n");
3033 if (IS_ELKHARTLAKE(dev_priv) && port != PORT_A)
3035 BIT(DPLL_ID_EHL_DPLL4) |
3036 BIT(DPLL_ID_ICL_DPLL1) |
3037 BIT(DPLL_ID_ICL_DPLL0);
3039 dpll_mask = BIT(DPLL_ID_ICL_DPLL1) | BIT(DPLL_ID_ICL_DPLL0);
3041 port_dpll->pll = intel_find_shared_dpll(state, crtc,
3042 &port_dpll->hw_state,
3044 if (!port_dpll->pll) {
3045 drm_dbg_kms(&dev_priv->drm,
3046 "No combo PHY PLL found for [ENCODER:%d:%s]\n",
3047 encoder->base.base.id, encoder->base.name);
3051 intel_reference_shared_dpll(state, crtc,
3052 port_dpll->pll, &port_dpll->hw_state);
3054 icl_update_active_dpll(state, crtc, encoder);
3059 static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state,
3060 struct intel_crtc *crtc,
3061 struct intel_encoder *encoder)
3063 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3064 struct intel_crtc_state *crtc_state =
3065 intel_atomic_get_new_crtc_state(state, crtc);
3066 struct icl_port_dpll *port_dpll;
3067 enum intel_dpll_id dpll_id;
3069 port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3070 if (!icl_calc_dpll_state(crtc_state, encoder, &port_dpll->hw_state)) {
3071 drm_dbg_kms(&dev_priv->drm,
3072 "Could not calculate TBT PLL state.\n");
3076 port_dpll->pll = intel_find_shared_dpll(state, crtc,
3077 &port_dpll->hw_state,
3078 BIT(DPLL_ID_ICL_TBTPLL));
3079 if (!port_dpll->pll) {
3080 drm_dbg_kms(&dev_priv->drm, "No TBT-ALT PLL found\n");
3083 intel_reference_shared_dpll(state, crtc,
3084 port_dpll->pll, &port_dpll->hw_state);
3087 port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3088 if (!icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state)) {
3089 drm_dbg_kms(&dev_priv->drm,
3090 "Could not calculate MG PHY PLL state.\n");
3091 goto err_unreference_tbt_pll;
3094 dpll_id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
3096 port_dpll->pll = intel_find_shared_dpll(state, crtc,
3097 &port_dpll->hw_state,
3099 if (!port_dpll->pll) {
3100 drm_dbg_kms(&dev_priv->drm, "No MG PHY PLL found\n");
3101 goto err_unreference_tbt_pll;
3103 intel_reference_shared_dpll(state, crtc,
3104 port_dpll->pll, &port_dpll->hw_state);
3106 icl_update_active_dpll(state, crtc, encoder);
3110 err_unreference_tbt_pll:
3111 port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3112 intel_unreference_shared_dpll(state, crtc, port_dpll->pll);
3117 static bool icl_get_dplls(struct intel_atomic_state *state,
3118 struct intel_crtc *crtc,
3119 struct intel_encoder *encoder)
3121 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3122 enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
3124 if (intel_phy_is_combo(dev_priv, phy))
3125 return icl_get_combo_phy_dpll(state, crtc, encoder);
3126 else if (intel_phy_is_tc(dev_priv, phy))
3127 return icl_get_tc_phy_dplls(state, crtc, encoder);
3134 static void icl_put_dplls(struct intel_atomic_state *state,
3135 struct intel_crtc *crtc)
3137 const struct intel_crtc_state *old_crtc_state =
3138 intel_atomic_get_old_crtc_state(state, crtc);
3139 struct intel_crtc_state *new_crtc_state =
3140 intel_atomic_get_new_crtc_state(state, crtc);
3141 enum icl_port_dpll_id id;
3143 new_crtc_state->shared_dpll = NULL;
3145 for (id = ICL_PORT_DPLL_DEFAULT; id < ICL_PORT_DPLL_COUNT; id++) {
3146 const struct icl_port_dpll *old_port_dpll =
3147 &old_crtc_state->icl_port_dplls[id];
3148 struct icl_port_dpll *new_port_dpll =
3149 &new_crtc_state->icl_port_dplls[id];
3151 new_port_dpll->pll = NULL;
3153 if (!old_port_dpll->pll)
3156 intel_unreference_shared_dpll(state, crtc, old_port_dpll->pll);
3160 static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv,
3161 struct intel_shared_dpll *pll,
3162 struct intel_dpll_hw_state *hw_state)
3164 const enum intel_dpll_id id = pll->info->id;
3165 enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3166 intel_wakeref_t wakeref;
3170 wakeref = intel_display_power_get_if_enabled(dev_priv,
3171 POWER_DOMAIN_DISPLAY_CORE);
3175 val = intel_de_read(dev_priv, MG_PLL_ENABLE(tc_port));
3176 if (!(val & PLL_ENABLE))
3179 hw_state->mg_refclkin_ctl = intel_de_read(dev_priv,
3180 MG_REFCLKIN_CTL(tc_port));
3181 hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3183 hw_state->mg_clktop2_coreclkctl1 =
3184 intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port));
3185 hw_state->mg_clktop2_coreclkctl1 &=
3186 MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3188 hw_state->mg_clktop2_hsclkctl =
3189 intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port));
3190 hw_state->mg_clktop2_hsclkctl &=
3191 MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3192 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3193 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3194 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3196 hw_state->mg_pll_div0 = intel_de_read(dev_priv, MG_PLL_DIV0(tc_port));
3197 hw_state->mg_pll_div1 = intel_de_read(dev_priv, MG_PLL_DIV1(tc_port));
3198 hw_state->mg_pll_lf = intel_de_read(dev_priv, MG_PLL_LF(tc_port));
3199 hw_state->mg_pll_frac_lock = intel_de_read(dev_priv,
3200 MG_PLL_FRAC_LOCK(tc_port));
3201 hw_state->mg_pll_ssc = intel_de_read(dev_priv, MG_PLL_SSC(tc_port));
3203 hw_state->mg_pll_bias = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port));
3204 hw_state->mg_pll_tdc_coldst_bias =
3205 intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3207 if (dev_priv->cdclk.hw.ref == 38400) {
3208 hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
3209 hw_state->mg_pll_bias_mask = 0;
3211 hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
3212 hw_state->mg_pll_bias_mask = -1U;
3215 hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
3216 hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
3220 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3224 static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3225 struct intel_shared_dpll *pll,
3226 struct intel_dpll_hw_state *hw_state)
3228 const enum intel_dpll_id id = pll->info->id;
3229 enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3230 intel_wakeref_t wakeref;
3234 wakeref = intel_display_power_get_if_enabled(dev_priv,
3235 POWER_DOMAIN_DISPLAY_CORE);
3239 val = intel_de_read(dev_priv, MG_PLL_ENABLE(tc_port));
3240 if (!(val & PLL_ENABLE))
3244 * All registers read here have the same HIP_INDEX_REG even though
3245 * they are on different building blocks
3247 intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
3248 HIP_INDEX_VAL(tc_port, 0x2));
3250 hw_state->mg_refclkin_ctl = intel_de_read(dev_priv,
3251 DKL_REFCLKIN_CTL(tc_port));
3252 hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3254 hw_state->mg_clktop2_hsclkctl =
3255 intel_de_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
3256 hw_state->mg_clktop2_hsclkctl &=
3257 MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3258 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3259 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3260 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3262 hw_state->mg_clktop2_coreclkctl1 =
3263 intel_de_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3264 hw_state->mg_clktop2_coreclkctl1 &=
3265 MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3267 hw_state->mg_pll_div0 = intel_de_read(dev_priv, DKL_PLL_DIV0(tc_port));
3268 hw_state->mg_pll_div0 &= (DKL_PLL_DIV0_INTEG_COEFF_MASK |
3269 DKL_PLL_DIV0_PROP_COEFF_MASK |
3270 DKL_PLL_DIV0_FBPREDIV_MASK |
3271 DKL_PLL_DIV0_FBDIV_INT_MASK);
3273 hw_state->mg_pll_div1 = intel_de_read(dev_priv, DKL_PLL_DIV1(tc_port));
3274 hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK |
3275 DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3277 hw_state->mg_pll_ssc = intel_de_read(dev_priv, DKL_PLL_SSC(tc_port));
3278 hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3279 DKL_PLL_SSC_STEP_LEN_MASK |
3280 DKL_PLL_SSC_STEP_NUM_MASK |
3283 hw_state->mg_pll_bias = intel_de_read(dev_priv, DKL_PLL_BIAS(tc_port));
3284 hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H |
3285 DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3287 hw_state->mg_pll_tdc_coldst_bias =
3288 intel_de_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3289 hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3290 DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3294 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3298 static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3299 struct intel_shared_dpll *pll,
3300 struct intel_dpll_hw_state *hw_state,
3301 i915_reg_t enable_reg)
3303 const enum intel_dpll_id id = pll->info->id;
3304 intel_wakeref_t wakeref;
3308 wakeref = intel_display_power_get_if_enabled(dev_priv,
3309 POWER_DOMAIN_DISPLAY_CORE);
3313 val = intel_de_read(dev_priv, enable_reg);
3314 if (!(val & PLL_ENABLE))
3317 if (INTEL_GEN(dev_priv) >= 12) {
3318 hw_state->cfgcr0 = intel_de_read(dev_priv,
3319 TGL_DPLL_CFGCR0(id));
3320 hw_state->cfgcr1 = intel_de_read(dev_priv,
3321 TGL_DPLL_CFGCR1(id));
3323 if (IS_ELKHARTLAKE(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
3324 hw_state->cfgcr0 = intel_de_read(dev_priv,
3325 ICL_DPLL_CFGCR0(4));
3326 hw_state->cfgcr1 = intel_de_read(dev_priv,
3327 ICL_DPLL_CFGCR1(4));
3329 hw_state->cfgcr0 = intel_de_read(dev_priv,
3330 ICL_DPLL_CFGCR0(id));
3331 hw_state->cfgcr1 = intel_de_read(dev_priv,
3332 ICL_DPLL_CFGCR1(id));
3338 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3342 static bool combo_pll_get_hw_state(struct drm_i915_private *dev_priv,
3343 struct intel_shared_dpll *pll,
3344 struct intel_dpll_hw_state *hw_state)
3346 i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id);
3348 if (IS_ELKHARTLAKE(dev_priv) &&
3349 pll->info->id == DPLL_ID_EHL_DPLL4) {
3350 enable_reg = MG_PLL_ENABLE(0);
3353 return icl_pll_get_hw_state(dev_priv, pll, hw_state, enable_reg);
3356 static bool tbt_pll_get_hw_state(struct drm_i915_private *dev_priv,
3357 struct intel_shared_dpll *pll,
3358 struct intel_dpll_hw_state *hw_state)
3360 return icl_pll_get_hw_state(dev_priv, pll, hw_state, TBT_PLL_ENABLE);
3363 static void icl_dpll_write(struct drm_i915_private *dev_priv,
3364 struct intel_shared_dpll *pll)
3366 struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3367 const enum intel_dpll_id id = pll->info->id;
3368 i915_reg_t cfgcr0_reg, cfgcr1_reg;
3370 if (INTEL_GEN(dev_priv) >= 12) {
3371 cfgcr0_reg = TGL_DPLL_CFGCR0(id);
3372 cfgcr1_reg = TGL_DPLL_CFGCR1(id);
3374 if (IS_ELKHARTLAKE(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
3375 cfgcr0_reg = ICL_DPLL_CFGCR0(4);
3376 cfgcr1_reg = ICL_DPLL_CFGCR1(4);
3378 cfgcr0_reg = ICL_DPLL_CFGCR0(id);
3379 cfgcr1_reg = ICL_DPLL_CFGCR1(id);
3383 intel_de_write(dev_priv, cfgcr0_reg, hw_state->cfgcr0);
3384 intel_de_write(dev_priv, cfgcr1_reg, hw_state->cfgcr1);
3385 intel_de_posting_read(dev_priv, cfgcr1_reg);
3388 static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
3389 struct intel_shared_dpll *pll)
3391 struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3392 enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3396 * Some of the following registers have reserved fields, so program
3397 * these with RMW based on a mask. The mask can be fixed or generated
3398 * during the calc/readout phase if the mask depends on some other HW
3399 * state like refclk, see icl_calc_mg_pll_state().
3401 val = intel_de_read(dev_priv, MG_REFCLKIN_CTL(tc_port));
3402 val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3403 val |= hw_state->mg_refclkin_ctl;
3404 intel_de_write(dev_priv, MG_REFCLKIN_CTL(tc_port), val);
3406 val = intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port));
3407 val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3408 val |= hw_state->mg_clktop2_coreclkctl1;
3409 intel_de_write(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port), val);
3411 val = intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port));
3412 val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3413 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3414 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3415 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3416 val |= hw_state->mg_clktop2_hsclkctl;
3417 intel_de_write(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port), val);
3419 intel_de_write(dev_priv, MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
3420 intel_de_write(dev_priv, MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
3421 intel_de_write(dev_priv, MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
3422 intel_de_write(dev_priv, MG_PLL_FRAC_LOCK(tc_port),
3423 hw_state->mg_pll_frac_lock);
3424 intel_de_write(dev_priv, MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
3426 val = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port));
3427 val &= ~hw_state->mg_pll_bias_mask;
3428 val |= hw_state->mg_pll_bias;
3429 intel_de_write(dev_priv, MG_PLL_BIAS(tc_port), val);
3431 val = intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3432 val &= ~hw_state->mg_pll_tdc_coldst_bias_mask;
3433 val |= hw_state->mg_pll_tdc_coldst_bias;
3434 intel_de_write(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port), val);
3436 intel_de_posting_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3439 static void dkl_pll_write(struct drm_i915_private *dev_priv,
3440 struct intel_shared_dpll *pll)
3442 struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3443 enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3447 * All registers programmed here have the same HIP_INDEX_REG even
3448 * though on different building block
3450 intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
3451 HIP_INDEX_VAL(tc_port, 0x2));
3453 /* All the registers are RMW */
3454 val = intel_de_read(dev_priv, DKL_REFCLKIN_CTL(tc_port));
3455 val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3456 val |= hw_state->mg_refclkin_ctl;
3457 intel_de_write(dev_priv, DKL_REFCLKIN_CTL(tc_port), val);
3459 val = intel_de_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3460 val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3461 val |= hw_state->mg_clktop2_coreclkctl1;
3462 intel_de_write(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port), val);
3464 val = intel_de_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
3465 val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3466 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3467 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3468 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3469 val |= hw_state->mg_clktop2_hsclkctl;
3470 intel_de_write(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port), val);
3472 val = intel_de_read(dev_priv, DKL_PLL_DIV0(tc_port));
3473 val &= ~(DKL_PLL_DIV0_INTEG_COEFF_MASK |
3474 DKL_PLL_DIV0_PROP_COEFF_MASK |
3475 DKL_PLL_DIV0_FBPREDIV_MASK |
3476 DKL_PLL_DIV0_FBDIV_INT_MASK);
3477 val |= hw_state->mg_pll_div0;
3478 intel_de_write(dev_priv, DKL_PLL_DIV0(tc_port), val);
3480 val = intel_de_read(dev_priv, DKL_PLL_DIV1(tc_port));
3481 val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK |
3482 DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3483 val |= hw_state->mg_pll_div1;
3484 intel_de_write(dev_priv, DKL_PLL_DIV1(tc_port), val);
3486 val = intel_de_read(dev_priv, DKL_PLL_SSC(tc_port));
3487 val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3488 DKL_PLL_SSC_STEP_LEN_MASK |
3489 DKL_PLL_SSC_STEP_NUM_MASK |
3491 val |= hw_state->mg_pll_ssc;
3492 intel_de_write(dev_priv, DKL_PLL_SSC(tc_port), val);
3494 val = intel_de_read(dev_priv, DKL_PLL_BIAS(tc_port));
3495 val &= ~(DKL_PLL_BIAS_FRAC_EN_H |
3496 DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3497 val |= hw_state->mg_pll_bias;
3498 intel_de_write(dev_priv, DKL_PLL_BIAS(tc_port), val);
3500 val = intel_de_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3501 val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3502 DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3503 val |= hw_state->mg_pll_tdc_coldst_bias;
3504 intel_de_write(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port), val);
3506 intel_de_posting_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3509 static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
3510 struct intel_shared_dpll *pll,
3511 i915_reg_t enable_reg)
3515 val = intel_de_read(dev_priv, enable_reg);
3516 val |= PLL_POWER_ENABLE;
3517 intel_de_write(dev_priv, enable_reg, val);
3520 * The spec says we need to "wait" but it also says it should be
3523 if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_POWER_STATE, 1))
3524 drm_err(&dev_priv->drm, "PLL %d Power not enabled\n",
3528 static void icl_pll_enable(struct drm_i915_private *dev_priv,
3529 struct intel_shared_dpll *pll,
3530 i915_reg_t enable_reg)
3534 val = intel_de_read(dev_priv, enable_reg);
3536 intel_de_write(dev_priv, enable_reg, val);
3538 /* Timeout is actually 600us. */
3539 if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_LOCK, 1))
3540 drm_err(&dev_priv->drm, "PLL %d not locked\n", pll->info->id);
3543 static void combo_pll_enable(struct drm_i915_private *dev_priv,
3544 struct intel_shared_dpll *pll)
3546 i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id);
3548 if (IS_ELKHARTLAKE(dev_priv) &&
3549 pll->info->id == DPLL_ID_EHL_DPLL4) {
3550 enable_reg = MG_PLL_ENABLE(0);
3553 * We need to disable DC states when this DPLL is enabled.
3554 * This can be done by taking a reference on DPLL4 power
3557 pll->wakeref = intel_display_power_get(dev_priv,
3558 POWER_DOMAIN_DPLL_DC_OFF);
3561 icl_pll_power_enable(dev_priv, pll, enable_reg);
3563 icl_dpll_write(dev_priv, pll);
3566 * DVFS pre sequence would be here, but in our driver the cdclk code
3567 * paths should already be setting the appropriate voltage, hence we do
3571 icl_pll_enable(dev_priv, pll, enable_reg);
3573 /* DVFS post sequence would be here. See the comment above. */
3576 static void tbt_pll_enable(struct drm_i915_private *dev_priv,
3577 struct intel_shared_dpll *pll)
3579 icl_pll_power_enable(dev_priv, pll, TBT_PLL_ENABLE);
3581 icl_dpll_write(dev_priv, pll);
3584 * DVFS pre sequence would be here, but in our driver the cdclk code
3585 * paths should already be setting the appropriate voltage, hence we do
3589 icl_pll_enable(dev_priv, pll, TBT_PLL_ENABLE);
3591 /* DVFS post sequence would be here. See the comment above. */
3594 static void mg_pll_enable(struct drm_i915_private *dev_priv,
3595 struct intel_shared_dpll *pll)
3597 i915_reg_t enable_reg =
3598 MG_PLL_ENABLE(icl_pll_id_to_tc_port(pll->info->id));
3600 icl_pll_power_enable(dev_priv, pll, enable_reg);
3602 if (INTEL_GEN(dev_priv) >= 12)
3603 dkl_pll_write(dev_priv, pll);
3605 icl_mg_pll_write(dev_priv, pll);
3608 * DVFS pre sequence would be here, but in our driver the cdclk code
3609 * paths should already be setting the appropriate voltage, hence we do
3613 icl_pll_enable(dev_priv, pll, enable_reg);
3615 /* DVFS post sequence would be here. See the comment above. */
3618 static void icl_pll_disable(struct drm_i915_private *dev_priv,
3619 struct intel_shared_dpll *pll,
3620 i915_reg_t enable_reg)
3624 /* The first steps are done by intel_ddi_post_disable(). */
3627 * DVFS pre sequence would be here, but in our driver the cdclk code
3628 * paths should already be setting the appropriate voltage, hence we do
3632 val = intel_de_read(dev_priv, enable_reg);
3634 intel_de_write(dev_priv, enable_reg, val);
3636 /* Timeout is actually 1us. */
3637 if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_LOCK, 1))
3638 drm_err(&dev_priv->drm, "PLL %d locked\n", pll->info->id);
3640 /* DVFS post sequence would be here. See the comment above. */
3642 val = intel_de_read(dev_priv, enable_reg);
3643 val &= ~PLL_POWER_ENABLE;
3644 intel_de_write(dev_priv, enable_reg, val);
3647 * The spec says we need to "wait" but it also says it should be
3650 if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_POWER_STATE, 1))
3651 drm_err(&dev_priv->drm, "PLL %d Power not disabled\n",
3655 static void combo_pll_disable(struct drm_i915_private *dev_priv,
3656 struct intel_shared_dpll *pll)
3658 i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id);
3660 if (IS_ELKHARTLAKE(dev_priv) &&
3661 pll->info->id == DPLL_ID_EHL_DPLL4) {
3662 enable_reg = MG_PLL_ENABLE(0);
3663 icl_pll_disable(dev_priv, pll, enable_reg);
3665 intel_display_power_put(dev_priv, POWER_DOMAIN_DPLL_DC_OFF,
3670 icl_pll_disable(dev_priv, pll, enable_reg);
3673 static void tbt_pll_disable(struct drm_i915_private *dev_priv,
3674 struct intel_shared_dpll *pll)
3676 icl_pll_disable(dev_priv, pll, TBT_PLL_ENABLE);
3679 static void mg_pll_disable(struct drm_i915_private *dev_priv,
3680 struct intel_shared_dpll *pll)
3682 i915_reg_t enable_reg =
3683 MG_PLL_ENABLE(icl_pll_id_to_tc_port(pll->info->id));
3685 icl_pll_disable(dev_priv, pll, enable_reg);
3688 static void icl_dump_hw_state(struct drm_i915_private *dev_priv,
3689 const struct intel_dpll_hw_state *hw_state)
3691 drm_dbg_kms(&dev_priv->drm,
3692 "dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, "
3693 "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
3694 "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
3695 "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
3696 "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
3697 "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
3698 hw_state->cfgcr0, hw_state->cfgcr1,
3699 hw_state->mg_refclkin_ctl,
3700 hw_state->mg_clktop2_coreclkctl1,
3701 hw_state->mg_clktop2_hsclkctl,
3702 hw_state->mg_pll_div0,
3703 hw_state->mg_pll_div1,
3704 hw_state->mg_pll_lf,
3705 hw_state->mg_pll_frac_lock,
3706 hw_state->mg_pll_ssc,
3707 hw_state->mg_pll_bias,
3708 hw_state->mg_pll_tdc_coldst_bias);
3711 static const struct intel_shared_dpll_funcs combo_pll_funcs = {
3712 .enable = combo_pll_enable,
3713 .disable = combo_pll_disable,
3714 .get_hw_state = combo_pll_get_hw_state,
3717 static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
3718 .enable = tbt_pll_enable,
3719 .disable = tbt_pll_disable,
3720 .get_hw_state = tbt_pll_get_hw_state,
3723 static const struct intel_shared_dpll_funcs mg_pll_funcs = {
3724 .enable = mg_pll_enable,
3725 .disable = mg_pll_disable,
3726 .get_hw_state = mg_pll_get_hw_state,
3729 static const struct dpll_info icl_plls[] = {
3730 { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
3731 { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
3732 { "TBT PLL", &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
3733 { "MG PLL 1", &mg_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
3734 { "MG PLL 2", &mg_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
3735 { "MG PLL 3", &mg_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
3736 { "MG PLL 4", &mg_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
3740 static const struct intel_dpll_mgr icl_pll_mgr = {
3741 .dpll_info = icl_plls,
3742 .get_dplls = icl_get_dplls,
3743 .put_dplls = icl_put_dplls,
3744 .update_active_dpll = icl_update_active_dpll,
3745 .dump_hw_state = icl_dump_hw_state,
3748 static const struct dpll_info ehl_plls[] = {
3749 { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
3750 { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
3751 { "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
3755 static const struct intel_dpll_mgr ehl_pll_mgr = {
3756 .dpll_info = ehl_plls,
3757 .get_dplls = icl_get_dplls,
3758 .put_dplls = icl_put_dplls,
3759 .dump_hw_state = icl_dump_hw_state,
3762 static const struct intel_shared_dpll_funcs dkl_pll_funcs = {
3763 .enable = mg_pll_enable,
3764 .disable = mg_pll_disable,
3765 .get_hw_state = dkl_pll_get_hw_state,
3768 static const struct dpll_info tgl_plls[] = {
3769 { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
3770 { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
3771 { "TBT PLL", &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
3772 { "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
3773 { "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
3774 { "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
3775 { "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
3776 { "TC PLL 5", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL5, 0 },
3777 { "TC PLL 6", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL6, 0 },
3781 static const struct intel_dpll_mgr tgl_pll_mgr = {
3782 .dpll_info = tgl_plls,
3783 .get_dplls = icl_get_dplls,
3784 .put_dplls = icl_put_dplls,
3785 .update_active_dpll = icl_update_active_dpll,
3786 .dump_hw_state = icl_dump_hw_state,
3790 * intel_shared_dpll_init - Initialize shared DPLLs
3793 * Initialize shared DPLLs for @dev.
3795 void intel_shared_dpll_init(struct drm_device *dev)
3797 struct drm_i915_private *dev_priv = to_i915(dev);
3798 const struct intel_dpll_mgr *dpll_mgr = NULL;
3799 const struct dpll_info *dpll_info;
3802 if (INTEL_GEN(dev_priv) >= 12)
3803 dpll_mgr = &tgl_pll_mgr;
3804 else if (IS_ELKHARTLAKE(dev_priv))
3805 dpll_mgr = &ehl_pll_mgr;
3806 else if (INTEL_GEN(dev_priv) >= 11)
3807 dpll_mgr = &icl_pll_mgr;
3808 else if (IS_CANNONLAKE(dev_priv))
3809 dpll_mgr = &cnl_pll_mgr;
3810 else if (IS_GEN9_BC(dev_priv))
3811 dpll_mgr = &skl_pll_mgr;
3812 else if (IS_GEN9_LP(dev_priv))
3813 dpll_mgr = &bxt_pll_mgr;
3814 else if (HAS_DDI(dev_priv))
3815 dpll_mgr = &hsw_pll_mgr;
3816 else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
3817 dpll_mgr = &pch_pll_mgr;
3820 dev_priv->dpll.num_shared_dpll = 0;
3824 dpll_info = dpll_mgr->dpll_info;
3826 for (i = 0; dpll_info[i].name; i++) {
3827 drm_WARN_ON(dev, i != dpll_info[i].id);
3828 dev_priv->dpll.shared_dplls[i].info = &dpll_info[i];
3831 dev_priv->dpll.mgr = dpll_mgr;
3832 dev_priv->dpll.num_shared_dpll = i;
3833 mutex_init(&dev_priv->dpll.lock);
3835 BUG_ON(dev_priv->dpll.num_shared_dpll > I915_NUM_PLLS);
3839 * intel_reserve_shared_dplls - reserve DPLLs for CRTC and encoder combination
3840 * @state: atomic state
3841 * @crtc: CRTC to reserve DPLLs for
3844 * This function reserves all required DPLLs for the given CRTC and encoder
3845 * combination in the current atomic commit @state and the new @crtc atomic
3848 * The new configuration in the atomic commit @state is made effective by
3849 * calling intel_shared_dpll_swap_state().
3851 * The reserved DPLLs should be released by calling
3852 * intel_release_shared_dplls().
3855 * True if all required DPLLs were successfully reserved.
3857 bool intel_reserve_shared_dplls(struct intel_atomic_state *state,
3858 struct intel_crtc *crtc,
3859 struct intel_encoder *encoder)
3861 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3862 const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
3864 if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
3867 return dpll_mgr->get_dplls(state, crtc, encoder);
3871 * intel_release_shared_dplls - end use of DPLLs by CRTC in atomic state
3872 * @state: atomic state
3873 * @crtc: crtc from which the DPLLs are to be released
3875 * This function releases all DPLLs reserved by intel_reserve_shared_dplls()
3876 * from the current atomic commit @state and the old @crtc atomic state.
3878 * The new configuration in the atomic commit @state is made effective by
3879 * calling intel_shared_dpll_swap_state().
3881 void intel_release_shared_dplls(struct intel_atomic_state *state,
3882 struct intel_crtc *crtc)
3884 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3885 const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
3888 * FIXME: this function is called for every platform having a
3889 * compute_clock hook, even though the platform doesn't yet support
3890 * the shared DPLL framework and intel_reserve_shared_dplls() is not
3896 dpll_mgr->put_dplls(state, crtc);
3900 * intel_update_active_dpll - update the active DPLL for a CRTC/encoder
3901 * @state: atomic state
3902 * @crtc: the CRTC for which to update the active DPLL
3903 * @encoder: encoder determining the type of port DPLL
3905 * Update the active DPLL for the given @crtc/@encoder in @crtc's atomic state,
3906 * from the port DPLLs reserved previously by intel_reserve_shared_dplls(). The
3907 * DPLL selected will be based on the current mode of the encoder's port.
3909 void intel_update_active_dpll(struct intel_atomic_state *state,
3910 struct intel_crtc *crtc,
3911 struct intel_encoder *encoder)
3913 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3914 const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
3916 if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
3919 dpll_mgr->update_active_dpll(state, crtc, encoder);
3922 static void readout_dpll_hw_state(struct drm_i915_private *i915,
3923 struct intel_shared_dpll *pll)
3925 struct intel_crtc *crtc;
3927 pll->on = pll->info->funcs->get_hw_state(i915, pll,
3928 &pll->state.hw_state);
3930 if (IS_ELKHARTLAKE(i915) && pll->on &&
3931 pll->info->id == DPLL_ID_EHL_DPLL4) {
3932 pll->wakeref = intel_display_power_get(i915,
3933 POWER_DOMAIN_DPLL_DC_OFF);
3936 pll->state.crtc_mask = 0;
3937 for_each_intel_crtc(&i915->drm, crtc) {
3938 struct intel_crtc_state *crtc_state =
3939 to_intel_crtc_state(crtc->base.state);
3941 if (crtc_state->hw.active && crtc_state->shared_dpll == pll)
3942 pll->state.crtc_mask |= 1 << crtc->pipe;
3944 pll->active_mask = pll->state.crtc_mask;
3946 drm_dbg_kms(&i915->drm,
3947 "%s hw state readout: crtc_mask 0x%08x, on %i\n",
3948 pll->info->name, pll->state.crtc_mask, pll->on);
3951 void intel_dpll_readout_hw_state(struct drm_i915_private *i915)
3955 for (i = 0; i < i915->dpll.num_shared_dpll; i++)
3956 readout_dpll_hw_state(i915, &i915->dpll.shared_dplls[i]);
3959 static void sanitize_dpll_state(struct drm_i915_private *i915,
3960 struct intel_shared_dpll *pll)
3962 if (!pll->on || pll->active_mask)
3965 drm_dbg_kms(&i915->drm,
3966 "%s enabled but not in use, disabling\n",
3969 pll->info->funcs->disable(i915, pll);
3973 void intel_dpll_sanitize_state(struct drm_i915_private *i915)
3977 for (i = 0; i < i915->dpll.num_shared_dpll; i++)
3978 sanitize_dpll_state(i915, &i915->dpll.shared_dplls[i]);
3982 * intel_shared_dpll_dump_hw_state - write hw_state to dmesg
3983 * @dev_priv: i915 drm device
3984 * @hw_state: hw state to be written to the log
3986 * Write the relevant values in @hw_state to dmesg using drm_dbg_kms.
3988 void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
3989 const struct intel_dpll_hw_state *hw_state)
3991 if (dev_priv->dpll.mgr) {
3992 dev_priv->dpll.mgr->dump_hw_state(dev_priv, hw_state);
3994 /* fallback for platforms that don't use the shared dpll
3997 drm_dbg_kms(&dev_priv->drm,
3998 "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
3999 "fp0: 0x%x, fp1: 0x%x\n",