2 * Copyright © 2006-2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
24 #include "intel_drv.h"
29 * Display PLLs used for driving outputs vary by platform. While some have
30 * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL
31 * from a pool. In the latter scenario, it is possible that multiple pipes
32 * share a PLL if their configurations match.
34 * This file provides an abstraction over display PLLs. The function
35 * intel_shared_dpll_init() initializes the PLLs for the given platform. The
36 * users of a PLL are tracked and that tracking is integrated with the atomic
37 * modest interface. During an atomic operation, a PLL can be requested for a
38 * given CRTC and encoder configuration by calling intel_get_shared_dpll() and
39 * a previously used PLL can be released with intel_release_shared_dpll().
40 * Changes to the users are first staged in the atomic state, and then made
41 * effective by calling intel_shared_dpll_swap_state() during the atomic
46 intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
47 struct intel_shared_dpll_state *shared_dpll)
51 /* Copy shared dpll state */
52 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
53 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
55 shared_dpll[i] = pll->state;
59 static struct intel_shared_dpll_state *
60 intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
62 struct intel_atomic_state *state = to_intel_atomic_state(s);
64 WARN_ON(!drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
66 if (!state->dpll_set) {
67 state->dpll_set = true;
69 intel_atomic_duplicate_dpll_state(to_i915(s->dev),
73 return state->shared_dpll;
77 * intel_get_shared_dpll_by_id - get a DPLL given its id
78 * @dev_priv: i915 device instance
82 * A pointer to the DPLL with @id
84 struct intel_shared_dpll *
85 intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
86 enum intel_dpll_id id)
88 return &dev_priv->shared_dplls[id];
92 * intel_get_shared_dpll_id - get the id of a DPLL
93 * @dev_priv: i915 device instance
100 intel_get_shared_dpll_id(struct drm_i915_private *dev_priv,
101 struct intel_shared_dpll *pll)
103 if (WARN_ON(pll < dev_priv->shared_dplls||
104 pll > &dev_priv->shared_dplls[dev_priv->num_shared_dpll]))
107 return (enum intel_dpll_id) (pll - dev_priv->shared_dplls);
111 void assert_shared_dpll(struct drm_i915_private *dev_priv,
112 struct intel_shared_dpll *pll,
116 struct intel_dpll_hw_state hw_state;
118 if (WARN(!pll, "asserting DPLL %s with no DPLL\n", onoff(state)))
121 cur_state = pll->info->funcs->get_hw_state(dev_priv, pll, &hw_state);
122 I915_STATE_WARN(cur_state != state,
123 "%s assertion failure (expected %s, current %s)\n",
124 pll->info->name, onoff(state), onoff(cur_state));
128 * intel_prepare_shared_dpll - call a dpll's prepare hook
129 * @crtc_state: CRTC, and its state, which has a shared dpll
131 * This calls the PLL's prepare hook if it has one and if the PLL is not
132 * already enabled. The prepare hook is platform specific.
134 void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state)
136 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
137 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
138 struct intel_shared_dpll *pll = crtc_state->shared_dpll;
140 if (WARN_ON(pll == NULL))
143 mutex_lock(&dev_priv->dpll_lock);
144 WARN_ON(!pll->state.crtc_mask);
145 if (!pll->active_mask) {
146 DRM_DEBUG_DRIVER("setting up %s\n", pll->info->name);
148 assert_shared_dpll_disabled(dev_priv, pll);
150 pll->info->funcs->prepare(dev_priv, pll);
152 mutex_unlock(&dev_priv->dpll_lock);
156 * intel_enable_shared_dpll - enable a CRTC's shared DPLL
157 * @crtc_state: CRTC, and its state, which has a shared DPLL
159 * Enable the shared DPLL used by @crtc.
161 void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
163 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
164 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
165 struct intel_shared_dpll *pll = crtc_state->shared_dpll;
166 unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
167 unsigned int old_mask;
169 if (WARN_ON(pll == NULL))
172 mutex_lock(&dev_priv->dpll_lock);
173 old_mask = pll->active_mask;
175 if (WARN_ON(!(pll->state.crtc_mask & crtc_mask)) ||
176 WARN_ON(pll->active_mask & crtc_mask))
179 pll->active_mask |= crtc_mask;
181 DRM_DEBUG_KMS("enable %s (active %x, on? %d) for crtc %d\n",
182 pll->info->name, pll->active_mask, pll->on,
187 assert_shared_dpll_enabled(dev_priv, pll);
192 DRM_DEBUG_KMS("enabling %s\n", pll->info->name);
193 pll->info->funcs->enable(dev_priv, pll);
197 mutex_unlock(&dev_priv->dpll_lock);
201 * intel_disable_shared_dpll - disable a CRTC's shared DPLL
202 * @crtc_state: CRTC, and its state, which has a shared DPLL
204 * Disable the shared DPLL used by @crtc.
206 void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
208 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
209 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
210 struct intel_shared_dpll *pll = crtc_state->shared_dpll;
211 unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
213 /* PCH only available on ILK+ */
214 if (INTEL_GEN(dev_priv) < 5)
220 mutex_lock(&dev_priv->dpll_lock);
221 if (WARN_ON(!(pll->active_mask & crtc_mask)))
224 DRM_DEBUG_KMS("disable %s (active %x, on? %d) for crtc %d\n",
225 pll->info->name, pll->active_mask, pll->on,
228 assert_shared_dpll_enabled(dev_priv, pll);
231 pll->active_mask &= ~crtc_mask;
232 if (pll->active_mask)
235 DRM_DEBUG_KMS("disabling %s\n", pll->info->name);
236 pll->info->funcs->disable(dev_priv, pll);
240 mutex_unlock(&dev_priv->dpll_lock);
243 static struct intel_shared_dpll *
244 intel_find_shared_dpll(struct intel_crtc_state *crtc_state,
245 enum intel_dpll_id range_min,
246 enum intel_dpll_id range_max)
248 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
249 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
250 struct intel_shared_dpll *pll, *unused_pll = NULL;
251 struct intel_shared_dpll_state *shared_dpll;
252 enum intel_dpll_id i;
254 shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state);
256 for (i = range_min; i <= range_max; i++) {
257 pll = &dev_priv->shared_dplls[i];
259 /* Only want to check enabled timings first */
260 if (shared_dpll[i].crtc_mask == 0) {
266 if (memcmp(&crtc_state->dpll_hw_state,
267 &shared_dpll[i].hw_state,
268 sizeof(crtc_state->dpll_hw_state)) == 0) {
269 DRM_DEBUG_KMS("[CRTC:%d:%s] sharing existing %s (crtc mask 0x%08x, active %x)\n",
270 crtc->base.base.id, crtc->base.name,
272 shared_dpll[i].crtc_mask,
278 /* Ok no matching timings, maybe there's a free one? */
280 DRM_DEBUG_KMS("[CRTC:%d:%s] allocated %s\n",
281 crtc->base.base.id, crtc->base.name,
282 unused_pll->info->name);
290 intel_reference_shared_dpll(struct intel_shared_dpll *pll,
291 struct intel_crtc_state *crtc_state)
293 struct intel_shared_dpll_state *shared_dpll;
294 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
295 const enum intel_dpll_id id = pll->info->id;
297 shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state);
299 if (shared_dpll[id].crtc_mask == 0)
300 shared_dpll[id].hw_state =
301 crtc_state->dpll_hw_state;
303 crtc_state->shared_dpll = pll;
304 DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->info->name,
305 pipe_name(crtc->pipe));
307 shared_dpll[id].crtc_mask |= 1 << crtc->pipe;
311 * intel_shared_dpll_swap_state - make atomic DPLL configuration effective
312 * @state: atomic state
314 * This is the dpll version of drm_atomic_helper_swap_state() since the
315 * helper does not handle driver-specific global state.
317 * For consistency with atomic helpers this function does a complete swap,
318 * i.e. it also puts the current state into @state, even though there is no
319 * need for that at this moment.
321 void intel_shared_dpll_swap_state(struct drm_atomic_state *state)
323 struct drm_i915_private *dev_priv = to_i915(state->dev);
324 struct intel_shared_dpll_state *shared_dpll;
325 struct intel_shared_dpll *pll;
326 enum intel_dpll_id i;
328 if (!to_intel_atomic_state(state)->dpll_set)
331 shared_dpll = to_intel_atomic_state(state)->shared_dpll;
332 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
333 struct intel_shared_dpll_state tmp;
335 pll = &dev_priv->shared_dplls[i];
338 pll->state = shared_dpll[i];
339 shared_dpll[i] = tmp;
343 static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
344 struct intel_shared_dpll *pll,
345 struct intel_dpll_hw_state *hw_state)
347 const enum intel_dpll_id id = pll->info->id;
348 intel_wakeref_t wakeref;
351 wakeref = intel_display_power_get_if_enabled(dev_priv,
356 val = I915_READ(PCH_DPLL(id));
357 hw_state->dpll = val;
358 hw_state->fp0 = I915_READ(PCH_FP0(id));
359 hw_state->fp1 = I915_READ(PCH_FP1(id));
361 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
363 return val & DPLL_VCO_ENABLE;
366 static void ibx_pch_dpll_prepare(struct drm_i915_private *dev_priv,
367 struct intel_shared_dpll *pll)
369 const enum intel_dpll_id id = pll->info->id;
371 I915_WRITE(PCH_FP0(id), pll->state.hw_state.fp0);
372 I915_WRITE(PCH_FP1(id), pll->state.hw_state.fp1);
375 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
380 I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
382 val = I915_READ(PCH_DREF_CONTROL);
383 enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
384 DREF_SUPERSPREAD_SOURCE_MASK));
385 I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
388 static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
389 struct intel_shared_dpll *pll)
391 const enum intel_dpll_id id = pll->info->id;
393 /* PCH refclock must be enabled first */
394 ibx_assert_pch_refclk_enabled(dev_priv);
396 I915_WRITE(PCH_DPLL(id), pll->state.hw_state.dpll);
398 /* Wait for the clocks to stabilize. */
399 POSTING_READ(PCH_DPLL(id));
402 /* The pixel multiplier can only be updated once the
403 * DPLL is enabled and the clocks are stable.
407 I915_WRITE(PCH_DPLL(id), pll->state.hw_state.dpll);
408 POSTING_READ(PCH_DPLL(id));
412 static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
413 struct intel_shared_dpll *pll)
415 const enum intel_dpll_id id = pll->info->id;
417 I915_WRITE(PCH_DPLL(id), 0);
418 POSTING_READ(PCH_DPLL(id));
422 static struct intel_shared_dpll *
423 ibx_get_dpll(struct intel_crtc_state *crtc_state,
424 struct intel_encoder *encoder)
426 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
427 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
428 struct intel_shared_dpll *pll;
429 enum intel_dpll_id i;
431 if (HAS_PCH_IBX(dev_priv)) {
432 /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
433 i = (enum intel_dpll_id) crtc->pipe;
434 pll = &dev_priv->shared_dplls[i];
436 DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n",
437 crtc->base.base.id, crtc->base.name,
440 pll = intel_find_shared_dpll(crtc_state,
448 /* reference the pll */
449 intel_reference_shared_dpll(pll, crtc_state);
454 static void ibx_dump_hw_state(struct drm_i915_private *dev_priv,
455 struct intel_dpll_hw_state *hw_state)
457 DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
458 "fp0: 0x%x, fp1: 0x%x\n",
465 static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
466 .prepare = ibx_pch_dpll_prepare,
467 .enable = ibx_pch_dpll_enable,
468 .disable = ibx_pch_dpll_disable,
469 .get_hw_state = ibx_pch_dpll_get_hw_state,
472 static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
473 struct intel_shared_dpll *pll)
475 const enum intel_dpll_id id = pll->info->id;
477 I915_WRITE(WRPLL_CTL(id), pll->state.hw_state.wrpll);
478 POSTING_READ(WRPLL_CTL(id));
482 static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv,
483 struct intel_shared_dpll *pll)
485 I915_WRITE(SPLL_CTL, pll->state.hw_state.spll);
486 POSTING_READ(SPLL_CTL);
490 static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
491 struct intel_shared_dpll *pll)
493 const enum intel_dpll_id id = pll->info->id;
496 val = I915_READ(WRPLL_CTL(id));
497 I915_WRITE(WRPLL_CTL(id), val & ~WRPLL_PLL_ENABLE);
498 POSTING_READ(WRPLL_CTL(id));
501 static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
502 struct intel_shared_dpll *pll)
506 val = I915_READ(SPLL_CTL);
507 I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
508 POSTING_READ(SPLL_CTL);
511 static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
512 struct intel_shared_dpll *pll,
513 struct intel_dpll_hw_state *hw_state)
515 const enum intel_dpll_id id = pll->info->id;
516 intel_wakeref_t wakeref;
519 wakeref = intel_display_power_get_if_enabled(dev_priv,
524 val = I915_READ(WRPLL_CTL(id));
525 hw_state->wrpll = val;
527 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
529 return val & WRPLL_PLL_ENABLE;
532 static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
533 struct intel_shared_dpll *pll,
534 struct intel_dpll_hw_state *hw_state)
536 intel_wakeref_t wakeref;
539 wakeref = intel_display_power_get_if_enabled(dev_priv,
544 val = I915_READ(SPLL_CTL);
545 hw_state->spll = val;
547 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
549 return val & SPLL_PLL_ENABLE;
553 #define LC_FREQ_2K U64_C(LC_FREQ * 2000)
559 /* Constraints for PLL good behavior */
565 struct hsw_wrpll_rnp {
569 static unsigned hsw_wrpll_get_budget_for_freq(int clock)
643 static void hsw_wrpll_update_rnp(u64 freq2k, unsigned int budget,
644 unsigned int r2, unsigned int n2,
646 struct hsw_wrpll_rnp *best)
648 u64 a, b, c, d, diff, diff_best;
650 /* No best (r,n,p) yet */
659 * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
663 * abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
666 * and we would like delta <= budget.
668 * If the discrepancy is above the PPM-based budget, always prefer to
669 * improve upon the previous solution. However, if you're within the
670 * budget, try to maximize Ref * VCO, that is N / (P * R^2).
672 a = freq2k * budget * p * r2;
673 b = freq2k * budget * best->p * best->r2;
674 diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
675 diff_best = abs_diff(freq2k * best->p * best->r2,
676 LC_FREQ_2K * best->n2);
678 d = 1000000 * diff_best;
680 if (a < c && b < d) {
681 /* If both are above the budget, pick the closer */
682 if (best->p * best->r2 * diff < p * r2 * diff_best) {
687 } else if (a >= c && b < d) {
688 /* If A is below the threshold but B is above it? Update. */
692 } else if (a >= c && b >= d) {
693 /* Both are below the limit, so pick the higher n2/(r2*r2) */
694 if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
700 /* Otherwise a < c && b >= d, do nothing */
704 hsw_ddi_calculate_wrpll(int clock /* in Hz */,
705 unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
709 struct hsw_wrpll_rnp best = { 0, 0, 0 };
712 freq2k = clock / 100;
714 budget = hsw_wrpll_get_budget_for_freq(clock);
716 /* Special case handling for 540 pixel clock: bypass WR PLL entirely
717 * and directly pass the LC PLL to it. */
718 if (freq2k == 5400000) {
726 * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
729 * We want R so that REF_MIN <= Ref <= REF_MAX.
730 * Injecting R2 = 2 * R gives:
731 * REF_MAX * r2 > LC_FREQ * 2 and
732 * REF_MIN * r2 < LC_FREQ * 2
734 * Which means the desired boundaries for r2 are:
735 * LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
738 for (r2 = LC_FREQ * 2 / REF_MAX + 1;
739 r2 <= LC_FREQ * 2 / REF_MIN;
743 * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
745 * Once again we want VCO_MIN <= VCO <= VCO_MAX.
746 * Injecting R2 = 2 * R and N2 = 2 * N, we get:
747 * VCO_MAX * r2 > n2 * LC_FREQ and
748 * VCO_MIN * r2 < n2 * LC_FREQ)
750 * Which means the desired boundaries for n2 are:
751 * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
753 for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
754 n2 <= VCO_MAX * r2 / LC_FREQ;
757 for (p = P_MIN; p <= P_MAX; p += P_INC)
758 hsw_wrpll_update_rnp(freq2k, budget,
768 static struct intel_shared_dpll *hsw_ddi_hdmi_get_dpll(struct intel_crtc_state *crtc_state)
770 struct intel_shared_dpll *pll;
772 unsigned int p, n2, r2;
774 hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
776 val = WRPLL_PLL_ENABLE | WRPLL_PLL_LCPLL |
777 WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
778 WRPLL_DIVIDER_POST(p);
780 crtc_state->dpll_hw_state.wrpll = val;
782 pll = intel_find_shared_dpll(crtc_state,
783 DPLL_ID_WRPLL1, DPLL_ID_WRPLL2);
791 static struct intel_shared_dpll *
792 hsw_ddi_dp_get_dpll(struct intel_crtc_state *crtc_state)
794 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
795 struct intel_shared_dpll *pll;
796 enum intel_dpll_id pll_id;
797 int clock = crtc_state->port_clock;
801 pll_id = DPLL_ID_LCPLL_810;
804 pll_id = DPLL_ID_LCPLL_1350;
807 pll_id = DPLL_ID_LCPLL_2700;
810 DRM_DEBUG_KMS("Invalid clock for DP: %d\n", clock);
814 pll = intel_get_shared_dpll_by_id(dev_priv, pll_id);
822 static struct intel_shared_dpll *
823 hsw_get_dpll(struct intel_crtc_state *crtc_state,
824 struct intel_encoder *encoder)
826 struct intel_shared_dpll *pll;
828 memset(&crtc_state->dpll_hw_state, 0,
829 sizeof(crtc_state->dpll_hw_state));
831 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
832 pll = hsw_ddi_hdmi_get_dpll(crtc_state);
833 } else if (intel_crtc_has_dp_encoder(crtc_state)) {
834 pll = hsw_ddi_dp_get_dpll(crtc_state);
835 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
836 if (WARN_ON(crtc_state->port_clock / 2 != 135000))
839 crtc_state->dpll_hw_state.spll =
840 SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC;
842 pll = intel_find_shared_dpll(crtc_state,
843 DPLL_ID_SPLL, DPLL_ID_SPLL);
851 intel_reference_shared_dpll(pll, crtc_state);
856 static void hsw_dump_hw_state(struct drm_i915_private *dev_priv,
857 struct intel_dpll_hw_state *hw_state)
859 DRM_DEBUG_KMS("dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
860 hw_state->wrpll, hw_state->spll);
863 static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
864 .enable = hsw_ddi_wrpll_enable,
865 .disable = hsw_ddi_wrpll_disable,
866 .get_hw_state = hsw_ddi_wrpll_get_hw_state,
869 static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
870 .enable = hsw_ddi_spll_enable,
871 .disable = hsw_ddi_spll_disable,
872 .get_hw_state = hsw_ddi_spll_get_hw_state,
875 static void hsw_ddi_lcpll_enable(struct drm_i915_private *dev_priv,
876 struct intel_shared_dpll *pll)
880 static void hsw_ddi_lcpll_disable(struct drm_i915_private *dev_priv,
881 struct intel_shared_dpll *pll)
885 static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *dev_priv,
886 struct intel_shared_dpll *pll,
887 struct intel_dpll_hw_state *hw_state)
892 static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
893 .enable = hsw_ddi_lcpll_enable,
894 .disable = hsw_ddi_lcpll_disable,
895 .get_hw_state = hsw_ddi_lcpll_get_hw_state,
898 struct skl_dpll_regs {
899 i915_reg_t ctl, cfgcr1, cfgcr2;
902 /* this array is indexed by the *shared* pll id */
903 static const struct skl_dpll_regs skl_dpll_regs[4] = {
907 /* DPLL 0 doesn't support HDMI mode */
912 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
913 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
918 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
919 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
924 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
925 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
929 static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv,
930 struct intel_shared_dpll *pll)
932 const enum intel_dpll_id id = pll->info->id;
935 val = I915_READ(DPLL_CTRL1);
937 val &= ~(DPLL_CTRL1_HDMI_MODE(id) |
939 DPLL_CTRL1_LINK_RATE_MASK(id));
940 val |= pll->state.hw_state.ctrl1 << (id * 6);
942 I915_WRITE(DPLL_CTRL1, val);
943 POSTING_READ(DPLL_CTRL1);
946 static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
947 struct intel_shared_dpll *pll)
949 const struct skl_dpll_regs *regs = skl_dpll_regs;
950 const enum intel_dpll_id id = pll->info->id;
952 skl_ddi_pll_write_ctrl1(dev_priv, pll);
954 I915_WRITE(regs[id].cfgcr1, pll->state.hw_state.cfgcr1);
955 I915_WRITE(regs[id].cfgcr2, pll->state.hw_state.cfgcr2);
956 POSTING_READ(regs[id].cfgcr1);
957 POSTING_READ(regs[id].cfgcr2);
959 /* the enable bit is always bit 31 */
960 I915_WRITE(regs[id].ctl,
961 I915_READ(regs[id].ctl) | LCPLL_PLL_ENABLE);
963 if (intel_wait_for_register(dev_priv,
968 DRM_ERROR("DPLL %d not locked\n", id);
971 static void skl_ddi_dpll0_enable(struct drm_i915_private *dev_priv,
972 struct intel_shared_dpll *pll)
974 skl_ddi_pll_write_ctrl1(dev_priv, pll);
977 static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
978 struct intel_shared_dpll *pll)
980 const struct skl_dpll_regs *regs = skl_dpll_regs;
981 const enum intel_dpll_id id = pll->info->id;
983 /* the enable bit is always bit 31 */
984 I915_WRITE(regs[id].ctl,
985 I915_READ(regs[id].ctl) & ~LCPLL_PLL_ENABLE);
986 POSTING_READ(regs[id].ctl);
989 static void skl_ddi_dpll0_disable(struct drm_i915_private *dev_priv,
990 struct intel_shared_dpll *pll)
994 static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
995 struct intel_shared_dpll *pll,
996 struct intel_dpll_hw_state *hw_state)
999 const struct skl_dpll_regs *regs = skl_dpll_regs;
1000 const enum intel_dpll_id id = pll->info->id;
1001 intel_wakeref_t wakeref;
1004 wakeref = intel_display_power_get_if_enabled(dev_priv,
1011 val = I915_READ(regs[id].ctl);
1012 if (!(val & LCPLL_PLL_ENABLE))
1015 val = I915_READ(DPLL_CTRL1);
1016 hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1018 /* avoid reading back stale values if HDMI mode is not enabled */
1019 if (val & DPLL_CTRL1_HDMI_MODE(id)) {
1020 hw_state->cfgcr1 = I915_READ(regs[id].cfgcr1);
1021 hw_state->cfgcr2 = I915_READ(regs[id].cfgcr2);
1026 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
1031 static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
1032 struct intel_shared_dpll *pll,
1033 struct intel_dpll_hw_state *hw_state)
1035 const struct skl_dpll_regs *regs = skl_dpll_regs;
1036 const enum intel_dpll_id id = pll->info->id;
1037 intel_wakeref_t wakeref;
1041 wakeref = intel_display_power_get_if_enabled(dev_priv,
1048 /* DPLL0 is always enabled since it drives CDCLK */
1049 val = I915_READ(regs[id].ctl);
1050 if (WARN_ON(!(val & LCPLL_PLL_ENABLE)))
1053 val = I915_READ(DPLL_CTRL1);
1054 hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1059 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
1064 struct skl_wrpll_context {
1065 u64 min_deviation; /* current minimal deviation */
1066 u64 central_freq; /* chosen central freq */
1067 u64 dco_freq; /* chosen dco freq */
1068 unsigned int p; /* chosen divider */
1071 static void skl_wrpll_context_init(struct skl_wrpll_context *ctx)
1073 memset(ctx, 0, sizeof(*ctx));
1075 ctx->min_deviation = U64_MAX;
1078 /* DCO freq must be within +1%/-6% of the DCO central freq */
1079 #define SKL_DCO_MAX_PDEVIATION 100
1080 #define SKL_DCO_MAX_NDEVIATION 600
1082 static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
1085 unsigned int divider)
1089 deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
1092 /* positive deviation */
1093 if (dco_freq >= central_freq) {
1094 if (deviation < SKL_DCO_MAX_PDEVIATION &&
1095 deviation < ctx->min_deviation) {
1096 ctx->min_deviation = deviation;
1097 ctx->central_freq = central_freq;
1098 ctx->dco_freq = dco_freq;
1101 /* negative deviation */
1102 } else if (deviation < SKL_DCO_MAX_NDEVIATION &&
1103 deviation < ctx->min_deviation) {
1104 ctx->min_deviation = deviation;
1105 ctx->central_freq = central_freq;
1106 ctx->dco_freq = dco_freq;
1111 static void skl_wrpll_get_multipliers(unsigned int p,
1112 unsigned int *p0 /* out */,
1113 unsigned int *p1 /* out */,
1114 unsigned int *p2 /* out */)
1118 unsigned int half = p / 2;
1120 if (half == 1 || half == 2 || half == 3 || half == 5) {
1124 } else if (half % 2 == 0) {
1128 } else if (half % 3 == 0) {
1132 } else if (half % 7 == 0) {
1137 } else if (p == 3 || p == 9) { /* 3, 5, 7, 9, 15, 21, 35 */
1141 } else if (p == 5 || p == 7) {
1145 } else if (p == 15) {
1149 } else if (p == 21) {
1153 } else if (p == 35) {
1160 struct skl_wrpll_params {
1170 static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
1173 u32 p0, u32 p1, u32 p2)
1177 switch (central_freq) {
1179 params->central_freq = 0;
1182 params->central_freq = 1;
1185 params->central_freq = 3;
1202 WARN(1, "Incorrect PDiv\n");
1219 WARN(1, "Incorrect KDiv\n");
1222 params->qdiv_ratio = p1;
1223 params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
1225 dco_freq = p0 * p1 * p2 * afe_clock;
1228 * Intermediate values are in Hz.
1229 * Divide by MHz to match bsepc
1231 params->dco_integer = div_u64(dco_freq, 24 * MHz(1));
1232 params->dco_fraction =
1233 div_u64((div_u64(dco_freq, 24) -
1234 params->dco_integer * MHz(1)) * 0x8000, MHz(1));
1238 skl_ddi_calculate_wrpll(int clock /* in Hz */,
1239 struct skl_wrpll_params *wrpll_params)
1241 u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
1242 u64 dco_central_freq[3] = { 8400000000ULL,
1245 static const int even_dividers[] = { 4, 6, 8, 10, 12, 14, 16, 18, 20,
1246 24, 28, 30, 32, 36, 40, 42, 44,
1247 48, 52, 54, 56, 60, 64, 66, 68,
1248 70, 72, 76, 78, 80, 84, 88, 90,
1250 static const int odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
1251 static const struct {
1255 { even_dividers, ARRAY_SIZE(even_dividers) },
1256 { odd_dividers, ARRAY_SIZE(odd_dividers) },
1258 struct skl_wrpll_context ctx;
1259 unsigned int dco, d, i;
1260 unsigned int p0, p1, p2;
1262 skl_wrpll_context_init(&ctx);
1264 for (d = 0; d < ARRAY_SIZE(dividers); d++) {
1265 for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
1266 for (i = 0; i < dividers[d].n_dividers; i++) {
1267 unsigned int p = dividers[d].list[i];
1268 u64 dco_freq = p * afe_clock;
1270 skl_wrpll_try_divider(&ctx,
1271 dco_central_freq[dco],
1275 * Skip the remaining dividers if we're sure to
1276 * have found the definitive divider, we can't
1277 * improve a 0 deviation.
1279 if (ctx.min_deviation == 0)
1280 goto skip_remaining_dividers;
1284 skip_remaining_dividers:
1286 * If a solution is found with an even divider, prefer
1289 if (d == 0 && ctx.p)
1294 DRM_DEBUG_DRIVER("No valid divider found for %dHz\n", clock);
1299 * gcc incorrectly analyses that these can be used without being
1300 * initialized. To be fair, it's hard to guess.
1303 skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
1304 skl_wrpll_params_populate(wrpll_params, afe_clock, ctx.central_freq,
1310 static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
1312 u32 ctrl1, cfgcr1, cfgcr2;
1313 struct skl_wrpll_params wrpll_params = { 0, };
1316 * See comment in intel_dpll_hw_state to understand why we always use 0
1317 * as the DPLL id in this function.
1319 ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1321 ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
1323 if (!skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
1327 cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
1328 DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
1329 wrpll_params.dco_integer;
1331 cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
1332 DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
1333 DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
1334 DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
1335 wrpll_params.central_freq;
1337 memset(&crtc_state->dpll_hw_state, 0,
1338 sizeof(crtc_state->dpll_hw_state));
1340 crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1341 crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
1342 crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
1347 skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1352 * See comment in intel_dpll_hw_state to understand why we always use 0
1353 * as the DPLL id in this function.
1355 ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1356 switch (crtc_state->port_clock / 2) {
1358 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
1361 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
1364 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
1368 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
1371 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
1374 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
1378 memset(&crtc_state->dpll_hw_state, 0,
1379 sizeof(crtc_state->dpll_hw_state));
1381 crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1386 static struct intel_shared_dpll *
1387 skl_get_dpll(struct intel_crtc_state *crtc_state,
1388 struct intel_encoder *encoder)
1390 struct intel_shared_dpll *pll;
1393 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
1394 bret = skl_ddi_hdmi_pll_dividers(crtc_state);
1396 DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n");
1399 } else if (intel_crtc_has_dp_encoder(crtc_state)) {
1400 bret = skl_ddi_dp_set_dpll_hw_state(crtc_state);
1402 DRM_DEBUG_KMS("Could not set DP dpll HW state.\n");
1409 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
1410 pll = intel_find_shared_dpll(crtc_state,
1414 pll = intel_find_shared_dpll(crtc_state,
1420 intel_reference_shared_dpll(pll, crtc_state);
1425 static void skl_dump_hw_state(struct drm_i915_private *dev_priv,
1426 struct intel_dpll_hw_state *hw_state)
1428 DRM_DEBUG_KMS("dpll_hw_state: "
1429 "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
1435 static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
1436 .enable = skl_ddi_pll_enable,
1437 .disable = skl_ddi_pll_disable,
1438 .get_hw_state = skl_ddi_pll_get_hw_state,
1441 static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
1442 .enable = skl_ddi_dpll0_enable,
1443 .disable = skl_ddi_dpll0_disable,
1444 .get_hw_state = skl_ddi_dpll0_get_hw_state,
1447 static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
1448 struct intel_shared_dpll *pll)
1451 enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1453 enum dpio_channel ch;
1455 bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
1457 /* Non-SSC reference */
1458 temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
1459 temp |= PORT_PLL_REF_SEL;
1460 I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
1462 if (IS_GEMINILAKE(dev_priv)) {
1463 temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
1464 temp |= PORT_PLL_POWER_ENABLE;
1465 I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
1467 if (wait_for_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) &
1468 PORT_PLL_POWER_STATE), 200))
1469 DRM_ERROR("Power state not set for PLL:%d\n", port);
1472 /* Disable 10 bit clock */
1473 temp = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
1474 temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1475 I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
1478 temp = I915_READ(BXT_PORT_PLL_EBB_0(phy, ch));
1479 temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK);
1480 temp |= pll->state.hw_state.ebb0;
1481 I915_WRITE(BXT_PORT_PLL_EBB_0(phy, ch), temp);
1483 /* Write M2 integer */
1484 temp = I915_READ(BXT_PORT_PLL(phy, ch, 0));
1485 temp &= ~PORT_PLL_M2_MASK;
1486 temp |= pll->state.hw_state.pll0;
1487 I915_WRITE(BXT_PORT_PLL(phy, ch, 0), temp);
1490 temp = I915_READ(BXT_PORT_PLL(phy, ch, 1));
1491 temp &= ~PORT_PLL_N_MASK;
1492 temp |= pll->state.hw_state.pll1;
1493 I915_WRITE(BXT_PORT_PLL(phy, ch, 1), temp);
1495 /* Write M2 fraction */
1496 temp = I915_READ(BXT_PORT_PLL(phy, ch, 2));
1497 temp &= ~PORT_PLL_M2_FRAC_MASK;
1498 temp |= pll->state.hw_state.pll2;
1499 I915_WRITE(BXT_PORT_PLL(phy, ch, 2), temp);
1501 /* Write M2 fraction enable */
1502 temp = I915_READ(BXT_PORT_PLL(phy, ch, 3));
1503 temp &= ~PORT_PLL_M2_FRAC_ENABLE;
1504 temp |= pll->state.hw_state.pll3;
1505 I915_WRITE(BXT_PORT_PLL(phy, ch, 3), temp);
1508 temp = I915_READ(BXT_PORT_PLL(phy, ch, 6));
1509 temp &= ~PORT_PLL_PROP_COEFF_MASK;
1510 temp &= ~PORT_PLL_INT_COEFF_MASK;
1511 temp &= ~PORT_PLL_GAIN_CTL_MASK;
1512 temp |= pll->state.hw_state.pll6;
1513 I915_WRITE(BXT_PORT_PLL(phy, ch, 6), temp);
1515 /* Write calibration val */
1516 temp = I915_READ(BXT_PORT_PLL(phy, ch, 8));
1517 temp &= ~PORT_PLL_TARGET_CNT_MASK;
1518 temp |= pll->state.hw_state.pll8;
1519 I915_WRITE(BXT_PORT_PLL(phy, ch, 8), temp);
1521 temp = I915_READ(BXT_PORT_PLL(phy, ch, 9));
1522 temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK;
1523 temp |= pll->state.hw_state.pll9;
1524 I915_WRITE(BXT_PORT_PLL(phy, ch, 9), temp);
1526 temp = I915_READ(BXT_PORT_PLL(phy, ch, 10));
1527 temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
1528 temp &= ~PORT_PLL_DCO_AMP_MASK;
1529 temp |= pll->state.hw_state.pll10;
1530 I915_WRITE(BXT_PORT_PLL(phy, ch, 10), temp);
1532 /* Recalibrate with new settings */
1533 temp = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
1534 temp |= PORT_PLL_RECALIBRATE;
1535 I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
1536 temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1537 temp |= pll->state.hw_state.ebb4;
1538 I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
1541 temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
1542 temp |= PORT_PLL_ENABLE;
1543 I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
1544 POSTING_READ(BXT_PORT_PLL_ENABLE(port));
1546 if (wait_for_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
1548 DRM_ERROR("PLL %d not locked\n", port);
1550 if (IS_GEMINILAKE(dev_priv)) {
1551 temp = I915_READ(BXT_PORT_TX_DW5_LN0(phy, ch));
1552 temp |= DCC_DELAY_RANGE_2;
1553 I915_WRITE(BXT_PORT_TX_DW5_GRP(phy, ch), temp);
1557 * While we write to the group register to program all lanes at once we
1558 * can read only lane registers and we pick lanes 0/1 for that.
1560 temp = I915_READ(BXT_PORT_PCS_DW12_LN01(phy, ch));
1561 temp &= ~LANE_STAGGER_MASK;
1562 temp &= ~LANESTAGGER_STRAP_OVRD;
1563 temp |= pll->state.hw_state.pcsdw12;
1564 I915_WRITE(BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
1567 static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
1568 struct intel_shared_dpll *pll)
1570 enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1573 temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
1574 temp &= ~PORT_PLL_ENABLE;
1575 I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
1576 POSTING_READ(BXT_PORT_PLL_ENABLE(port));
1578 if (IS_GEMINILAKE(dev_priv)) {
1579 temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
1580 temp &= ~PORT_PLL_POWER_ENABLE;
1581 I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
1583 if (wait_for_us(!(I915_READ(BXT_PORT_PLL_ENABLE(port)) &
1584 PORT_PLL_POWER_STATE), 200))
1585 DRM_ERROR("Power state not reset for PLL:%d\n", port);
1589 static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
1590 struct intel_shared_dpll *pll,
1591 struct intel_dpll_hw_state *hw_state)
1593 enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1594 intel_wakeref_t wakeref;
1596 enum dpio_channel ch;
1600 bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
1602 wakeref = intel_display_power_get_if_enabled(dev_priv,
1609 val = I915_READ(BXT_PORT_PLL_ENABLE(port));
1610 if (!(val & PORT_PLL_ENABLE))
1613 hw_state->ebb0 = I915_READ(BXT_PORT_PLL_EBB_0(phy, ch));
1614 hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
1616 hw_state->ebb4 = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
1617 hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
1619 hw_state->pll0 = I915_READ(BXT_PORT_PLL(phy, ch, 0));
1620 hw_state->pll0 &= PORT_PLL_M2_MASK;
1622 hw_state->pll1 = I915_READ(BXT_PORT_PLL(phy, ch, 1));
1623 hw_state->pll1 &= PORT_PLL_N_MASK;
1625 hw_state->pll2 = I915_READ(BXT_PORT_PLL(phy, ch, 2));
1626 hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
1628 hw_state->pll3 = I915_READ(BXT_PORT_PLL(phy, ch, 3));
1629 hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
1631 hw_state->pll6 = I915_READ(BXT_PORT_PLL(phy, ch, 6));
1632 hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
1633 PORT_PLL_INT_COEFF_MASK |
1634 PORT_PLL_GAIN_CTL_MASK;
1636 hw_state->pll8 = I915_READ(BXT_PORT_PLL(phy, ch, 8));
1637 hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
1639 hw_state->pll9 = I915_READ(BXT_PORT_PLL(phy, ch, 9));
1640 hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
1642 hw_state->pll10 = I915_READ(BXT_PORT_PLL(phy, ch, 10));
1643 hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
1644 PORT_PLL_DCO_AMP_MASK;
1647 * While we write to the group register to program all lanes at once we
1648 * can read only lane registers. We configure all lanes the same way, so
1649 * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
1651 hw_state->pcsdw12 = I915_READ(BXT_PORT_PCS_DW12_LN01(phy, ch));
1652 if (I915_READ(BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
1653 DRM_DEBUG_DRIVER("lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
1655 I915_READ(BXT_PORT_PCS_DW12_LN23(phy, ch)));
1656 hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
1661 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
1666 /* bxt clock parameters */
1667 struct bxt_clk_div {
1679 /* pre-calculated values for DP linkrates */
1680 static const struct bxt_clk_div bxt_dp_clk_val[] = {
1681 {162000, 4, 2, 32, 1677722, 1, 1},
1682 {270000, 4, 1, 27, 0, 0, 1},
1683 {540000, 2, 1, 27, 0, 0, 1},
1684 {216000, 3, 2, 32, 1677722, 1, 1},
1685 {243000, 4, 1, 24, 1258291, 1, 1},
1686 {324000, 4, 1, 32, 1677722, 1, 1},
1687 {432000, 3, 1, 32, 1677722, 1, 1}
1691 bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
1692 struct bxt_clk_div *clk_div)
1694 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1695 struct dpll best_clock;
1697 /* Calculate HDMI div */
1699 * FIXME: tie the following calculation into
1700 * i9xx_crtc_compute_clock
1702 if (!bxt_find_best_dpll(crtc_state, &best_clock)) {
1703 DRM_DEBUG_DRIVER("no PLL dividers found for clock %d pipe %c\n",
1704 crtc_state->port_clock,
1705 pipe_name(crtc->pipe));
1709 clk_div->p1 = best_clock.p1;
1710 clk_div->p2 = best_clock.p2;
1711 WARN_ON(best_clock.m1 != 2);
1712 clk_div->n = best_clock.n;
1713 clk_div->m2_int = best_clock.m2 >> 22;
1714 clk_div->m2_frac = best_clock.m2 & ((1 << 22) - 1);
1715 clk_div->m2_frac_en = clk_div->m2_frac != 0;
1717 clk_div->vco = best_clock.vco;
1722 static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
1723 struct bxt_clk_div *clk_div)
1725 int clock = crtc_state->port_clock;
1728 *clk_div = bxt_dp_clk_val[0];
1729 for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
1730 if (bxt_dp_clk_val[i].clock == clock) {
1731 *clk_div = bxt_dp_clk_val[i];
1736 clk_div->vco = clock * 10 / 2 * clk_div->p1 * clk_div->p2;
1739 static bool bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
1740 const struct bxt_clk_div *clk_div)
1742 struct intel_dpll_hw_state *dpll_hw_state = &crtc_state->dpll_hw_state;
1743 int clock = crtc_state->port_clock;
1744 int vco = clk_div->vco;
1745 u32 prop_coef, int_coef, gain_ctl, targ_cnt;
1748 memset(dpll_hw_state, 0, sizeof(*dpll_hw_state));
1750 if (vco >= 6200000 && vco <= 6700000) {
1755 } else if ((vco > 5400000 && vco < 6200000) ||
1756 (vco >= 4800000 && vco < 5400000)) {
1761 } else if (vco == 5400000) {
1767 DRM_ERROR("Invalid VCO\n");
1773 else if (clock > 135000)
1775 else if (clock > 67000)
1777 else if (clock > 33000)
1782 dpll_hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
1783 dpll_hw_state->pll0 = clk_div->m2_int;
1784 dpll_hw_state->pll1 = PORT_PLL_N(clk_div->n);
1785 dpll_hw_state->pll2 = clk_div->m2_frac;
1787 if (clk_div->m2_frac_en)
1788 dpll_hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
1790 dpll_hw_state->pll6 = prop_coef | PORT_PLL_INT_COEFF(int_coef);
1791 dpll_hw_state->pll6 |= PORT_PLL_GAIN_CTL(gain_ctl);
1793 dpll_hw_state->pll8 = targ_cnt;
1795 dpll_hw_state->pll9 = 5 << PORT_PLL_LOCK_THRESHOLD_SHIFT;
1797 dpll_hw_state->pll10 =
1798 PORT_PLL_DCO_AMP(PORT_PLL_DCO_AMP_DEFAULT)
1799 | PORT_PLL_DCO_AMP_OVR_EN_H;
1801 dpll_hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
1803 dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
1809 bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1811 struct bxt_clk_div clk_div = {};
1813 bxt_ddi_dp_pll_dividers(crtc_state, &clk_div);
1815 return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
1819 bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1821 struct bxt_clk_div clk_div = {};
1823 bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div);
1825 return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
1828 static struct intel_shared_dpll *
1829 bxt_get_dpll(struct intel_crtc_state *crtc_state,
1830 struct intel_encoder *encoder)
1832 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1833 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1834 struct intel_shared_dpll *pll;
1835 enum intel_dpll_id id;
1837 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) &&
1838 !bxt_ddi_hdmi_set_dpll_hw_state(crtc_state))
1841 if (intel_crtc_has_dp_encoder(crtc_state) &&
1842 !bxt_ddi_dp_set_dpll_hw_state(crtc_state))
1845 /* 1:1 mapping between ports and PLLs */
1846 id = (enum intel_dpll_id) encoder->port;
1847 pll = intel_get_shared_dpll_by_id(dev_priv, id);
1849 DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n",
1850 crtc->base.base.id, crtc->base.name, pll->info->name);
1852 intel_reference_shared_dpll(pll, crtc_state);
1857 static void bxt_dump_hw_state(struct drm_i915_private *dev_priv,
1858 struct intel_dpll_hw_state *hw_state)
1860 DRM_DEBUG_KMS("dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
1861 "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
1862 "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
1876 static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
1877 .enable = bxt_ddi_pll_enable,
1878 .disable = bxt_ddi_pll_disable,
1879 .get_hw_state = bxt_ddi_pll_get_hw_state,
1882 static void intel_ddi_pll_init(struct drm_device *dev)
1884 struct drm_i915_private *dev_priv = to_i915(dev);
1886 if (INTEL_GEN(dev_priv) < 9) {
1887 u32 val = I915_READ(LCPLL_CTL);
1890 * The LCPLL register should be turned on by the BIOS. For now
1891 * let's just check its state and print errors in case
1892 * something is wrong. Don't even try to turn it on.
1895 if (val & LCPLL_CD_SOURCE_FCLK)
1896 DRM_ERROR("CDCLK source is not LCPLL\n");
1898 if (val & LCPLL_PLL_DISABLE)
1899 DRM_ERROR("LCPLL is disabled\n");
1903 struct intel_dpll_mgr {
1904 const struct dpll_info *dpll_info;
1906 struct intel_shared_dpll *(*get_dpll)(struct intel_crtc_state *crtc_state,
1907 struct intel_encoder *encoder);
1909 void (*dump_hw_state)(struct drm_i915_private *dev_priv,
1910 struct intel_dpll_hw_state *hw_state);
1913 static const struct dpll_info pch_plls[] = {
1914 { "PCH DPLL A", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_A, 0 },
1915 { "PCH DPLL B", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_B, 0 },
1919 static const struct intel_dpll_mgr pch_pll_mgr = {
1920 .dpll_info = pch_plls,
1921 .get_dpll = ibx_get_dpll,
1922 .dump_hw_state = ibx_dump_hw_state,
1925 static const struct dpll_info hsw_plls[] = {
1926 { "WRPLL 1", &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL1, 0 },
1927 { "WRPLL 2", &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL2, 0 },
1928 { "SPLL", &hsw_ddi_spll_funcs, DPLL_ID_SPLL, 0 },
1929 { "LCPLL 810", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_810, INTEL_DPLL_ALWAYS_ON },
1930 { "LCPLL 1350", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_1350, INTEL_DPLL_ALWAYS_ON },
1931 { "LCPLL 2700", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_2700, INTEL_DPLL_ALWAYS_ON },
1935 static const struct intel_dpll_mgr hsw_pll_mgr = {
1936 .dpll_info = hsw_plls,
1937 .get_dpll = hsw_get_dpll,
1938 .dump_hw_state = hsw_dump_hw_state,
1941 static const struct dpll_info skl_plls[] = {
1942 { "DPLL 0", &skl_ddi_dpll0_funcs, DPLL_ID_SKL_DPLL0, INTEL_DPLL_ALWAYS_ON },
1943 { "DPLL 1", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
1944 { "DPLL 2", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
1945 { "DPLL 3", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL3, 0 },
1949 static const struct intel_dpll_mgr skl_pll_mgr = {
1950 .dpll_info = skl_plls,
1951 .get_dpll = skl_get_dpll,
1952 .dump_hw_state = skl_dump_hw_state,
1955 static const struct dpll_info bxt_plls[] = {
1956 { "PORT PLL A", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
1957 { "PORT PLL B", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
1958 { "PORT PLL C", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
1962 static const struct intel_dpll_mgr bxt_pll_mgr = {
1963 .dpll_info = bxt_plls,
1964 .get_dpll = bxt_get_dpll,
1965 .dump_hw_state = bxt_dump_hw_state,
1968 static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
1969 struct intel_shared_dpll *pll)
1971 const enum intel_dpll_id id = pll->info->id;
1974 /* 1. Enable DPLL power in DPLL_ENABLE. */
1975 val = I915_READ(CNL_DPLL_ENABLE(id));
1976 val |= PLL_POWER_ENABLE;
1977 I915_WRITE(CNL_DPLL_ENABLE(id), val);
1979 /* 2. Wait for DPLL power state enabled in DPLL_ENABLE. */
1980 if (intel_wait_for_register(dev_priv,
1981 CNL_DPLL_ENABLE(id),
1985 DRM_ERROR("PLL %d Power not enabled\n", id);
1988 * 3. Configure DPLL_CFGCR0 to set SSC enable/disable,
1989 * select DP mode, and set DP link rate.
1991 val = pll->state.hw_state.cfgcr0;
1992 I915_WRITE(CNL_DPLL_CFGCR0(id), val);
1994 /* 4. Reab back to ensure writes completed */
1995 POSTING_READ(CNL_DPLL_CFGCR0(id));
1997 /* 3. Configure DPLL_CFGCR0 */
1998 /* Avoid touch CFGCR1 if HDMI mode is not enabled */
1999 if (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_HDMI_MODE) {
2000 val = pll->state.hw_state.cfgcr1;
2001 I915_WRITE(CNL_DPLL_CFGCR1(id), val);
2002 /* 4. Reab back to ensure writes completed */
2003 POSTING_READ(CNL_DPLL_CFGCR1(id));
2007 * 5. If the frequency will result in a change to the voltage
2008 * requirement, follow the Display Voltage Frequency Switching
2009 * Sequence Before Frequency Change
2011 * Note: DVFS is actually handled via the cdclk code paths,
2012 * hence we do nothing here.
2015 /* 6. Enable DPLL in DPLL_ENABLE. */
2016 val = I915_READ(CNL_DPLL_ENABLE(id));
2018 I915_WRITE(CNL_DPLL_ENABLE(id), val);
2020 /* 7. Wait for PLL lock status in DPLL_ENABLE. */
2021 if (intel_wait_for_register(dev_priv,
2022 CNL_DPLL_ENABLE(id),
2026 DRM_ERROR("PLL %d not locked\n", id);
2029 * 8. If the frequency will result in a change to the voltage
2030 * requirement, follow the Display Voltage Frequency Switching
2031 * Sequence After Frequency Change
2033 * Note: DVFS is actually handled via the cdclk code paths,
2034 * hence we do nothing here.
2038 * 9. turn on the clock for the DDI and map the DPLL to the DDI
2039 * Done at intel_ddi_clk_select
2043 static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv,
2044 struct intel_shared_dpll *pll)
2046 const enum intel_dpll_id id = pll->info->id;
2050 * 1. Configure DPCLKA_CFGCR0 to turn off the clock for the DDI.
2051 * Done at intel_ddi_post_disable
2055 * 2. If the frequency will result in a change to the voltage
2056 * requirement, follow the Display Voltage Frequency Switching
2057 * Sequence Before Frequency Change
2059 * Note: DVFS is actually handled via the cdclk code paths,
2060 * hence we do nothing here.
2063 /* 3. Disable DPLL through DPLL_ENABLE. */
2064 val = I915_READ(CNL_DPLL_ENABLE(id));
2066 I915_WRITE(CNL_DPLL_ENABLE(id), val);
2068 /* 4. Wait for PLL not locked status in DPLL_ENABLE. */
2069 if (intel_wait_for_register(dev_priv,
2070 CNL_DPLL_ENABLE(id),
2074 DRM_ERROR("PLL %d locked\n", id);
2077 * 5. If the frequency will result in a change to the voltage
2078 * requirement, follow the Display Voltage Frequency Switching
2079 * Sequence After Frequency Change
2081 * Note: DVFS is actually handled via the cdclk code paths,
2082 * hence we do nothing here.
2085 /* 6. Disable DPLL power in DPLL_ENABLE. */
2086 val = I915_READ(CNL_DPLL_ENABLE(id));
2087 val &= ~PLL_POWER_ENABLE;
2088 I915_WRITE(CNL_DPLL_ENABLE(id), val);
2090 /* 7. Wait for DPLL power state disabled in DPLL_ENABLE. */
2091 if (intel_wait_for_register(dev_priv,
2092 CNL_DPLL_ENABLE(id),
2096 DRM_ERROR("PLL %d Power not disabled\n", id);
2099 static bool cnl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
2100 struct intel_shared_dpll *pll,
2101 struct intel_dpll_hw_state *hw_state)
2103 const enum intel_dpll_id id = pll->info->id;
2104 intel_wakeref_t wakeref;
2108 wakeref = intel_display_power_get_if_enabled(dev_priv,
2115 val = I915_READ(CNL_DPLL_ENABLE(id));
2116 if (!(val & PLL_ENABLE))
2119 val = I915_READ(CNL_DPLL_CFGCR0(id));
2120 hw_state->cfgcr0 = val;
2122 /* avoid reading back stale values if HDMI mode is not enabled */
2123 if (val & DPLL_CFGCR0_HDMI_MODE) {
2124 hw_state->cfgcr1 = I915_READ(CNL_DPLL_CFGCR1(id));
2129 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
2134 static void cnl_wrpll_get_multipliers(int bestdiv, int *pdiv,
2135 int *qdiv, int *kdiv)
2138 if (bestdiv % 2 == 0) {
2143 } else if (bestdiv % 4 == 0) {
2145 *qdiv = bestdiv / 4;
2147 } else if (bestdiv % 6 == 0) {
2149 *qdiv = bestdiv / 6;
2151 } else if (bestdiv % 5 == 0) {
2153 *qdiv = bestdiv / 10;
2155 } else if (bestdiv % 14 == 0) {
2157 *qdiv = bestdiv / 14;
2161 if (bestdiv == 3 || bestdiv == 5 || bestdiv == 7) {
2165 } else { /* 9, 15, 21 */
2166 *pdiv = bestdiv / 3;
2173 static void cnl_wrpll_params_populate(struct skl_wrpll_params *params,
2174 u32 dco_freq, u32 ref_freq,
2175 int pdiv, int qdiv, int kdiv)
2190 WARN(1, "Incorrect KDiv\n");
2207 WARN(1, "Incorrect PDiv\n");
2210 WARN_ON(kdiv != 2 && qdiv != 1);
2212 params->qdiv_ratio = qdiv;
2213 params->qdiv_mode = (qdiv == 1) ? 0 : 1;
2215 dco = div_u64((u64)dco_freq << 15, ref_freq);
2217 params->dco_integer = dco >> 15;
2218 params->dco_fraction = dco & 0x7fff;
2221 int cnl_hdmi_pll_ref_clock(struct drm_i915_private *dev_priv)
2223 int ref_clock = dev_priv->cdclk.hw.ref;
2226 * For ICL+, the spec states: if reference frequency is 38.4,
2227 * use 19.2 because the DPLL automatically divides that by 2.
2229 if (INTEL_GEN(dev_priv) >= 11 && ref_clock == 38400)
2236 cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state,
2237 struct skl_wrpll_params *wrpll_params)
2239 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
2240 u32 afe_clock = crtc_state->port_clock * 5;
2242 u32 dco_min = 7998000;
2243 u32 dco_max = 10000000;
2244 u32 dco_mid = (dco_min + dco_max) / 2;
2245 static const int dividers[] = { 2, 4, 6, 8, 10, 12, 14, 16,
2246 18, 20, 24, 28, 30, 32, 36, 40,
2247 42, 44, 48, 50, 52, 54, 56, 60,
2248 64, 66, 68, 70, 72, 76, 78, 80,
2249 84, 88, 90, 92, 96, 98, 100, 102,
2250 3, 5, 7, 9, 15, 21 };
2251 u32 dco, best_dco = 0, dco_centrality = 0;
2252 u32 best_dco_centrality = U32_MAX; /* Spec meaning of 999999 MHz */
2253 int d, best_div = 0, pdiv = 0, qdiv = 0, kdiv = 0;
2255 for (d = 0; d < ARRAY_SIZE(dividers); d++) {
2256 dco = afe_clock * dividers[d];
2258 if ((dco <= dco_max) && (dco >= dco_min)) {
2259 dco_centrality = abs(dco - dco_mid);
2261 if (dco_centrality < best_dco_centrality) {
2262 best_dco_centrality = dco_centrality;
2263 best_div = dividers[d];
2272 cnl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
2274 ref_clock = cnl_hdmi_pll_ref_clock(dev_priv);
2276 cnl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
2282 static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
2285 struct skl_wrpll_params wrpll_params = { 0, };
2287 cfgcr0 = DPLL_CFGCR0_HDMI_MODE;
2289 if (!cnl_ddi_calculate_wrpll(crtc_state, &wrpll_params))
2292 cfgcr0 |= DPLL_CFGCR0_DCO_FRACTION(wrpll_params.dco_fraction) |
2293 wrpll_params.dco_integer;
2295 cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(wrpll_params.qdiv_ratio) |
2296 DPLL_CFGCR1_QDIV_MODE(wrpll_params.qdiv_mode) |
2297 DPLL_CFGCR1_KDIV(wrpll_params.kdiv) |
2298 DPLL_CFGCR1_PDIV(wrpll_params.pdiv) |
2299 DPLL_CFGCR1_CENTRAL_FREQ;
2301 memset(&crtc_state->dpll_hw_state, 0,
2302 sizeof(crtc_state->dpll_hw_state));
2304 crtc_state->dpll_hw_state.cfgcr0 = cfgcr0;
2305 crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
2310 cnl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2314 cfgcr0 = DPLL_CFGCR0_SSC_ENABLE;
2316 switch (crtc_state->port_clock / 2) {
2318 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_810;
2321 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1350;
2324 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_2700;
2328 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1620;
2331 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1080;
2334 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_2160;
2337 /* Some SKUs may require elevated I/O voltage to support this */
2338 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_3240;
2341 /* Some SKUs may require elevated I/O voltage to support this */
2342 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_4050;
2346 memset(&crtc_state->dpll_hw_state, 0,
2347 sizeof(crtc_state->dpll_hw_state));
2349 crtc_state->dpll_hw_state.cfgcr0 = cfgcr0;
2354 static struct intel_shared_dpll *
2355 cnl_get_dpll(struct intel_crtc_state *crtc_state,
2356 struct intel_encoder *encoder)
2358 struct intel_shared_dpll *pll;
2361 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
2362 bret = cnl_ddi_hdmi_pll_dividers(crtc_state);
2364 DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n");
2367 } else if (intel_crtc_has_dp_encoder(crtc_state)) {
2368 bret = cnl_ddi_dp_set_dpll_hw_state(crtc_state);
2370 DRM_DEBUG_KMS("Could not set DP dpll HW state.\n");
2374 DRM_DEBUG_KMS("Skip DPLL setup for output_types 0x%x\n",
2375 crtc_state->output_types);
2379 pll = intel_find_shared_dpll(crtc_state,
2383 DRM_DEBUG_KMS("No PLL selected\n");
2387 intel_reference_shared_dpll(pll, crtc_state);
2392 static void cnl_dump_hw_state(struct drm_i915_private *dev_priv,
2393 struct intel_dpll_hw_state *hw_state)
2395 DRM_DEBUG_KMS("dpll_hw_state: "
2396 "cfgcr0: 0x%x, cfgcr1: 0x%x\n",
2401 static const struct intel_shared_dpll_funcs cnl_ddi_pll_funcs = {
2402 .enable = cnl_ddi_pll_enable,
2403 .disable = cnl_ddi_pll_disable,
2404 .get_hw_state = cnl_ddi_pll_get_hw_state,
2407 static const struct dpll_info cnl_plls[] = {
2408 { "DPLL 0", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
2409 { "DPLL 1", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
2410 { "DPLL 2", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
2414 static const struct intel_dpll_mgr cnl_pll_mgr = {
2415 .dpll_info = cnl_plls,
2416 .get_dpll = cnl_get_dpll,
2417 .dump_hw_state = cnl_dump_hw_state,
2420 struct icl_combo_pll_params {
2422 struct skl_wrpll_params wrpll;
2426 * These values alrea already adjusted: they're the bits we write to the
2427 * registers, not the logical values.
2429 static const struct icl_combo_pll_params icl_dp_combo_pll_24MHz_values[] = {
2431 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [0]: 5.4 */
2432 .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2434 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [1]: 2.7 */
2435 .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2437 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [2]: 1.62 */
2438 .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2440 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [3]: 3.24 */
2441 .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2443 { .dco_integer = 0x168, .dco_fraction = 0x0000, /* [4]: 2.16 */
2444 .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2446 { .dco_integer = 0x168, .dco_fraction = 0x0000, /* [5]: 4.32 */
2447 .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2449 { .dco_integer = 0x195, .dco_fraction = 0x0000, /* [6]: 6.48 */
2450 .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2452 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [7]: 8.1 */
2453 .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2457 /* Also used for 38.4 MHz values. */
2458 static const struct icl_combo_pll_params icl_dp_combo_pll_19_2MHz_values[] = {
2460 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [0]: 5.4 */
2461 .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2463 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [1]: 2.7 */
2464 .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2466 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [2]: 1.62 */
2467 .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2469 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [3]: 3.24 */
2470 .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2472 { .dco_integer = 0x1C2, .dco_fraction = 0x0000, /* [4]: 2.16 */
2473 .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2475 { .dco_integer = 0x1C2, .dco_fraction = 0x0000, /* [5]: 4.32 */
2476 .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2478 { .dco_integer = 0x1FA, .dco_fraction = 0x2000, /* [6]: 6.48 */
2479 .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2481 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [7]: 8.1 */
2482 .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2485 static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = {
2486 .dco_integer = 0x151, .dco_fraction = 0x4000,
2487 .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2490 static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
2491 .dco_integer = 0x1A5, .dco_fraction = 0x7000,
2492 .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2495 static bool icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
2496 struct skl_wrpll_params *pll_params)
2498 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
2499 const struct icl_combo_pll_params *params =
2500 dev_priv->cdclk.hw.ref == 24000 ?
2501 icl_dp_combo_pll_24MHz_values :
2502 icl_dp_combo_pll_19_2MHz_values;
2503 int clock = crtc_state->port_clock;
2506 for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) {
2507 if (clock == params[i].clock) {
2508 *pll_params = params[i].wrpll;
2513 MISSING_CASE(clock);
2517 static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
2518 struct skl_wrpll_params *pll_params)
2520 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
2522 *pll_params = dev_priv->cdclk.hw.ref == 24000 ?
2523 icl_tbt_pll_24MHz_values : icl_tbt_pll_19_2MHz_values;
2527 static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
2528 struct intel_encoder *encoder)
2530 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
2532 struct skl_wrpll_params pll_params = { 0 };
2535 if (intel_port_is_tc(dev_priv, encoder->port))
2536 ret = icl_calc_tbt_pll(crtc_state, &pll_params);
2537 else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
2538 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
2539 ret = cnl_ddi_calculate_wrpll(crtc_state, &pll_params);
2541 ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
2546 cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(pll_params.dco_fraction) |
2547 pll_params.dco_integer;
2549 cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params.qdiv_ratio) |
2550 DPLL_CFGCR1_QDIV_MODE(pll_params.qdiv_mode) |
2551 DPLL_CFGCR1_KDIV(pll_params.kdiv) |
2552 DPLL_CFGCR1_PDIV(pll_params.pdiv) |
2553 DPLL_CFGCR1_CENTRAL_FREQ_8400;
2555 memset(&crtc_state->dpll_hw_state, 0,
2556 sizeof(crtc_state->dpll_hw_state));
2558 crtc_state->dpll_hw_state.cfgcr0 = cfgcr0;
2559 crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
2565 static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
2567 return id - DPLL_ID_ICL_MGPLL1;
2570 enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
2572 return tc_port + DPLL_ID_ICL_MGPLL1;
2575 static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
2576 u32 *target_dco_khz,
2577 struct intel_dpll_hw_state *state)
2579 u32 dco_min_freq, dco_max_freq;
2580 int div1_vals[] = {7, 5, 3, 2};
2584 dco_min_freq = is_dp ? 8100000 : use_ssc ? 8000000 : 7992000;
2585 dco_max_freq = is_dp ? 8100000 : 10000000;
2587 for (i = 0; i < ARRAY_SIZE(div1_vals); i++) {
2588 int div1 = div1_vals[i];
2590 for (div2 = 10; div2 > 0; div2--) {
2591 int dco = div1 * div2 * clock_khz * 5;
2592 int a_divratio, tlinedrv, inputsel;
2595 if (dco < dco_min_freq || dco > dco_max_freq)
2599 a_divratio = is_dp ? 10 : 5;
2605 inputsel = is_dp ? 0 : 1;
2612 hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2;
2615 hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3;
2618 hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5;
2621 hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7;
2625 *target_dco_khz = dco;
2627 state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1);
2629 state->mg_clktop2_coreclkctl1 =
2630 MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(a_divratio);
2632 state->mg_clktop2_hsclkctl =
2633 MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) |
2634 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) |
2636 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2);
2646 * The specification for this function uses real numbers, so the math had to be
2647 * adapted to integer-only calculation, that's why it looks so different.
2649 static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state)
2651 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
2652 struct intel_dpll_hw_state *pll_state = &crtc_state->dpll_hw_state;
2653 int refclk_khz = dev_priv->cdclk.hw.ref;
2654 int clock = crtc_state->port_clock;
2655 u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
2656 u32 iref_ndiv, iref_trim, iref_pulse_w;
2657 u32 prop_coeff, int_coeff;
2658 u32 tdc_targetcnt, feedfwgain;
2659 u64 ssc_stepsize, ssc_steplen, ssc_steplog;
2661 bool use_ssc = false;
2662 bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
2664 memset(pll_state, 0, sizeof(*pll_state));
2666 if (!icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
2668 DRM_DEBUG_KMS("Failed to find divisors for clock %d\n", clock);
2673 m2div_int = dco_khz / (refclk_khz * m1div);
2674 if (m2div_int > 255) {
2676 m2div_int = dco_khz / (refclk_khz * m1div);
2677 if (m2div_int > 255) {
2678 DRM_DEBUG_KMS("Failed to find mdiv for clock %d\n",
2683 m2div_rem = dco_khz % (refclk_khz * m1div);
2685 tmp = (u64)m2div_rem * (1 << 22);
2686 do_div(tmp, refclk_khz * m1div);
2689 switch (refclk_khz) {
2706 MISSING_CASE(refclk_khz);
2711 * tdc_res = 0.000003
2712 * tdc_targetcnt = int(2 / (tdc_res * 8 * 50 * 1.1) / refclk_mhz + 0.5)
2714 * The multiplication by 1000 is due to refclk MHz to KHz conversion. It
2715 * was supposed to be a division, but we rearranged the operations of
2716 * the formula to avoid early divisions so we don't multiply the
2719 * 0.000003 * 8 * 50 * 1.1 = 0.00132, also known as 132 / 100000, which
2720 * we also rearrange to work with integers.
2722 * The 0.5 transformed to 5 results in a multiplication by 10 and the
2723 * last division by 10.
2725 tdc_targetcnt = (2 * 1000 * 100000 * 10 / (132 * refclk_khz) + 5) / 10;
2728 * Here we divide dco_khz by 10 in order to allow the dividend to fit in
2729 * 32 bits. That's not a problem since we round the division down
2732 feedfwgain = (use_ssc || m2div_rem > 0) ?
2733 m1div * 1000000 * 100 / (dco_khz * 3 / 10) : 0;
2735 if (dco_khz >= 9000000) {
2744 tmp = (u64)dco_khz * 47 * 32;
2745 do_div(tmp, refclk_khz * m1div * 10000);
2748 tmp = (u64)dco_khz * 1000;
2749 ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32);
2756 pll_state->mg_pll_div0 = (m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
2757 MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
2758 MG_PLL_DIV0_FBDIV_INT(m2div_int);
2760 pll_state->mg_pll_div1 = MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
2761 MG_PLL_DIV1_DITHER_DIV_2 |
2762 MG_PLL_DIV1_NDIVRATIO(1) |
2763 MG_PLL_DIV1_FBPREDIV(m1div);
2765 pll_state->mg_pll_lf = MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
2766 MG_PLL_LF_AFCCNTSEL_512 |
2767 MG_PLL_LF_GAINCTRL(1) |
2768 MG_PLL_LF_INT_COEFF(int_coeff) |
2769 MG_PLL_LF_PROP_COEFF(prop_coeff);
2771 pll_state->mg_pll_frac_lock = MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
2772 MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
2773 MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
2774 MG_PLL_FRAC_LOCK_DCODITHEREN |
2775 MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
2776 if (use_ssc || m2div_rem > 0)
2777 pll_state->mg_pll_frac_lock |= MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
2779 pll_state->mg_pll_ssc = (use_ssc ? MG_PLL_SSC_EN : 0) |
2780 MG_PLL_SSC_TYPE(2) |
2781 MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
2782 MG_PLL_SSC_STEPNUM(ssc_steplog) |
2784 MG_PLL_SSC_STEPSIZE(ssc_stepsize);
2786 pll_state->mg_pll_tdc_coldst_bias = MG_PLL_TDC_COLDST_COLDSTART |
2787 MG_PLL_TDC_COLDST_IREFINT_EN |
2788 MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
2789 MG_PLL_TDC_TDCOVCCORR_EN |
2790 MG_PLL_TDC_TDCSEL(3);
2792 pll_state->mg_pll_bias = MG_PLL_BIAS_BIAS_GB_SEL(3) |
2793 MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
2794 MG_PLL_BIAS_BIAS_BONUS(10) |
2795 MG_PLL_BIAS_BIASCAL_EN |
2796 MG_PLL_BIAS_CTRIM(12) |
2797 MG_PLL_BIAS_VREF_RDAC(4) |
2798 MG_PLL_BIAS_IREFTRIM(iref_trim);
2800 if (refclk_khz == 38400) {
2801 pll_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
2802 pll_state->mg_pll_bias_mask = 0;
2804 pll_state->mg_pll_tdc_coldst_bias_mask = -1U;
2805 pll_state->mg_pll_bias_mask = -1U;
2808 pll_state->mg_pll_tdc_coldst_bias &= pll_state->mg_pll_tdc_coldst_bias_mask;
2809 pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask;
2814 static struct intel_shared_dpll *
2815 icl_get_dpll(struct intel_crtc_state *crtc_state,
2816 struct intel_encoder *encoder)
2818 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
2819 struct intel_digital_port *intel_dig_port;
2820 struct intel_shared_dpll *pll;
2821 enum port port = encoder->port;
2822 enum intel_dpll_id min, max;
2825 if (intel_port_is_combophy(dev_priv, port)) {
2826 min = DPLL_ID_ICL_DPLL0;
2827 max = DPLL_ID_ICL_DPLL1;
2828 ret = icl_calc_dpll_state(crtc_state, encoder);
2829 } else if (intel_port_is_tc(dev_priv, port)) {
2830 if (encoder->type == INTEL_OUTPUT_DP_MST) {
2831 struct intel_dp_mst_encoder *mst_encoder;
2833 mst_encoder = enc_to_mst(&encoder->base);
2834 intel_dig_port = mst_encoder->primary;
2836 intel_dig_port = enc_to_dig_port(&encoder->base);
2839 if (intel_dig_port->tc_type == TC_PORT_TBT) {
2840 min = DPLL_ID_ICL_TBTPLL;
2842 ret = icl_calc_dpll_state(crtc_state, encoder);
2844 enum tc_port tc_port;
2846 tc_port = intel_port_to_tc(dev_priv, port);
2847 min = icl_tc_port_to_pll_id(tc_port);
2849 ret = icl_calc_mg_pll_state(crtc_state);
2857 DRM_DEBUG_KMS("Could not calculate PLL state.\n");
2862 pll = intel_find_shared_dpll(crtc_state, min, max);
2864 DRM_DEBUG_KMS("No PLL selected\n");
2868 intel_reference_shared_dpll(pll, crtc_state);
2873 static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv,
2874 struct intel_shared_dpll *pll,
2875 struct intel_dpll_hw_state *hw_state)
2877 const enum intel_dpll_id id = pll->info->id;
2878 enum tc_port tc_port = icl_pll_id_to_tc_port(id);
2879 intel_wakeref_t wakeref;
2883 wakeref = intel_display_power_get_if_enabled(dev_priv,
2888 val = I915_READ(MG_PLL_ENABLE(tc_port));
2889 if (!(val & PLL_ENABLE))
2892 hw_state->mg_refclkin_ctl = I915_READ(MG_REFCLKIN_CTL(tc_port));
2893 hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
2895 hw_state->mg_clktop2_coreclkctl1 =
2896 I915_READ(MG_CLKTOP2_CORECLKCTL1(tc_port));
2897 hw_state->mg_clktop2_coreclkctl1 &=
2898 MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
2900 hw_state->mg_clktop2_hsclkctl =
2901 I915_READ(MG_CLKTOP2_HSCLKCTL(tc_port));
2902 hw_state->mg_clktop2_hsclkctl &=
2903 MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
2904 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
2905 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
2906 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
2908 hw_state->mg_pll_div0 = I915_READ(MG_PLL_DIV0(tc_port));
2909 hw_state->mg_pll_div1 = I915_READ(MG_PLL_DIV1(tc_port));
2910 hw_state->mg_pll_lf = I915_READ(MG_PLL_LF(tc_port));
2911 hw_state->mg_pll_frac_lock = I915_READ(MG_PLL_FRAC_LOCK(tc_port));
2912 hw_state->mg_pll_ssc = I915_READ(MG_PLL_SSC(tc_port));
2914 hw_state->mg_pll_bias = I915_READ(MG_PLL_BIAS(tc_port));
2915 hw_state->mg_pll_tdc_coldst_bias =
2916 I915_READ(MG_PLL_TDC_COLDST_BIAS(tc_port));
2918 if (dev_priv->cdclk.hw.ref == 38400) {
2919 hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
2920 hw_state->mg_pll_bias_mask = 0;
2922 hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
2923 hw_state->mg_pll_bias_mask = -1U;
2926 hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
2927 hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
2931 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
2935 static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
2936 struct intel_shared_dpll *pll,
2937 struct intel_dpll_hw_state *hw_state,
2938 i915_reg_t enable_reg)
2940 const enum intel_dpll_id id = pll->info->id;
2941 intel_wakeref_t wakeref;
2945 wakeref = intel_display_power_get_if_enabled(dev_priv,
2950 val = I915_READ(enable_reg);
2951 if (!(val & PLL_ENABLE))
2954 hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(id));
2955 hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(id));
2959 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
2963 static bool combo_pll_get_hw_state(struct drm_i915_private *dev_priv,
2964 struct intel_shared_dpll *pll,
2965 struct intel_dpll_hw_state *hw_state)
2967 return icl_pll_get_hw_state(dev_priv, pll, hw_state,
2968 CNL_DPLL_ENABLE(pll->info->id));
2971 static bool tbt_pll_get_hw_state(struct drm_i915_private *dev_priv,
2972 struct intel_shared_dpll *pll,
2973 struct intel_dpll_hw_state *hw_state)
2975 return icl_pll_get_hw_state(dev_priv, pll, hw_state, TBT_PLL_ENABLE);
2978 static void icl_dpll_write(struct drm_i915_private *dev_priv,
2979 struct intel_shared_dpll *pll)
2981 struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
2982 const enum intel_dpll_id id = pll->info->id;
2984 I915_WRITE(ICL_DPLL_CFGCR0(id), hw_state->cfgcr0);
2985 I915_WRITE(ICL_DPLL_CFGCR1(id), hw_state->cfgcr1);
2986 POSTING_READ(ICL_DPLL_CFGCR1(id));
2989 static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
2990 struct intel_shared_dpll *pll)
2992 struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
2993 enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
2997 * Some of the following registers have reserved fields, so program
2998 * these with RMW based on a mask. The mask can be fixed or generated
2999 * during the calc/readout phase if the mask depends on some other HW
3000 * state like refclk, see icl_calc_mg_pll_state().
3002 val = I915_READ(MG_REFCLKIN_CTL(tc_port));
3003 val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3004 val |= hw_state->mg_refclkin_ctl;
3005 I915_WRITE(MG_REFCLKIN_CTL(tc_port), val);
3007 val = I915_READ(MG_CLKTOP2_CORECLKCTL1(tc_port));
3008 val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3009 val |= hw_state->mg_clktop2_coreclkctl1;
3010 I915_WRITE(MG_CLKTOP2_CORECLKCTL1(tc_port), val);
3012 val = I915_READ(MG_CLKTOP2_HSCLKCTL(tc_port));
3013 val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3014 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3015 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3016 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3017 val |= hw_state->mg_clktop2_hsclkctl;
3018 I915_WRITE(MG_CLKTOP2_HSCLKCTL(tc_port), val);
3020 I915_WRITE(MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
3021 I915_WRITE(MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
3022 I915_WRITE(MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
3023 I915_WRITE(MG_PLL_FRAC_LOCK(tc_port), hw_state->mg_pll_frac_lock);
3024 I915_WRITE(MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
3026 val = I915_READ(MG_PLL_BIAS(tc_port));
3027 val &= ~hw_state->mg_pll_bias_mask;
3028 val |= hw_state->mg_pll_bias;
3029 I915_WRITE(MG_PLL_BIAS(tc_port), val);
3031 val = I915_READ(MG_PLL_TDC_COLDST_BIAS(tc_port));
3032 val &= ~hw_state->mg_pll_tdc_coldst_bias_mask;
3033 val |= hw_state->mg_pll_tdc_coldst_bias;
3034 I915_WRITE(MG_PLL_TDC_COLDST_BIAS(tc_port), val);
3036 POSTING_READ(MG_PLL_TDC_COLDST_BIAS(tc_port));
3039 static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
3040 struct intel_shared_dpll *pll,
3041 i915_reg_t enable_reg)
3045 val = I915_READ(enable_reg);
3046 val |= PLL_POWER_ENABLE;
3047 I915_WRITE(enable_reg, val);
3050 * The spec says we need to "wait" but it also says it should be
3053 if (intel_wait_for_register(dev_priv, enable_reg, PLL_POWER_STATE,
3054 PLL_POWER_STATE, 1))
3055 DRM_ERROR("PLL %d Power not enabled\n", pll->info->id);
3058 static void icl_pll_enable(struct drm_i915_private *dev_priv,
3059 struct intel_shared_dpll *pll,
3060 i915_reg_t enable_reg)
3064 val = I915_READ(enable_reg);
3066 I915_WRITE(enable_reg, val);
3068 /* Timeout is actually 600us. */
3069 if (intel_wait_for_register(dev_priv, enable_reg, PLL_LOCK, PLL_LOCK,
3071 DRM_ERROR("PLL %d not locked\n", pll->info->id);
3074 static void combo_pll_enable(struct drm_i915_private *dev_priv,
3075 struct intel_shared_dpll *pll)
3077 i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id);
3079 icl_pll_power_enable(dev_priv, pll, enable_reg);
3081 icl_dpll_write(dev_priv, pll);
3084 * DVFS pre sequence would be here, but in our driver the cdclk code
3085 * paths should already be setting the appropriate voltage, hence we do
3089 icl_pll_enable(dev_priv, pll, enable_reg);
3091 /* DVFS post sequence would be here. See the comment above. */
3094 static void tbt_pll_enable(struct drm_i915_private *dev_priv,
3095 struct intel_shared_dpll *pll)
3097 icl_pll_power_enable(dev_priv, pll, TBT_PLL_ENABLE);
3099 icl_dpll_write(dev_priv, pll);
3102 * DVFS pre sequence would be here, but in our driver the cdclk code
3103 * paths should already be setting the appropriate voltage, hence we do
3107 icl_pll_enable(dev_priv, pll, TBT_PLL_ENABLE);
3109 /* DVFS post sequence would be here. See the comment above. */
3112 static void mg_pll_enable(struct drm_i915_private *dev_priv,
3113 struct intel_shared_dpll *pll)
3115 i915_reg_t enable_reg =
3116 MG_PLL_ENABLE(icl_pll_id_to_tc_port(pll->info->id));
3118 icl_pll_power_enable(dev_priv, pll, enable_reg);
3120 icl_mg_pll_write(dev_priv, pll);
3123 * DVFS pre sequence would be here, but in our driver the cdclk code
3124 * paths should already be setting the appropriate voltage, hence we do
3128 icl_pll_enable(dev_priv, pll, enable_reg);
3130 /* DVFS post sequence would be here. See the comment above. */
3133 static void icl_pll_disable(struct drm_i915_private *dev_priv,
3134 struct intel_shared_dpll *pll,
3135 i915_reg_t enable_reg)
3139 /* The first steps are done by intel_ddi_post_disable(). */
3142 * DVFS pre sequence would be here, but in our driver the cdclk code
3143 * paths should already be setting the appropriate voltage, hence we do
3147 val = I915_READ(enable_reg);
3149 I915_WRITE(enable_reg, val);
3151 /* Timeout is actually 1us. */
3152 if (intel_wait_for_register(dev_priv, enable_reg, PLL_LOCK, 0, 1))
3153 DRM_ERROR("PLL %d locked\n", pll->info->id);
3155 /* DVFS post sequence would be here. See the comment above. */
3157 val = I915_READ(enable_reg);
3158 val &= ~PLL_POWER_ENABLE;
3159 I915_WRITE(enable_reg, val);
3162 * The spec says we need to "wait" but it also says it should be
3165 if (intel_wait_for_register(dev_priv, enable_reg, PLL_POWER_STATE, 0,
3167 DRM_ERROR("PLL %d Power not disabled\n", pll->info->id);
3170 static void combo_pll_disable(struct drm_i915_private *dev_priv,
3171 struct intel_shared_dpll *pll)
3173 icl_pll_disable(dev_priv, pll, CNL_DPLL_ENABLE(pll->info->id));
3176 static void tbt_pll_disable(struct drm_i915_private *dev_priv,
3177 struct intel_shared_dpll *pll)
3179 icl_pll_disable(dev_priv, pll, TBT_PLL_ENABLE);
3182 static void mg_pll_disable(struct drm_i915_private *dev_priv,
3183 struct intel_shared_dpll *pll)
3185 i915_reg_t enable_reg =
3186 MG_PLL_ENABLE(icl_pll_id_to_tc_port(pll->info->id));
3188 icl_pll_disable(dev_priv, pll, enable_reg);
3191 static void icl_dump_hw_state(struct drm_i915_private *dev_priv,
3192 struct intel_dpll_hw_state *hw_state)
3194 DRM_DEBUG_KMS("dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, "
3195 "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
3196 "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
3197 "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
3198 "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
3199 "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
3200 hw_state->cfgcr0, hw_state->cfgcr1,
3201 hw_state->mg_refclkin_ctl,
3202 hw_state->mg_clktop2_coreclkctl1,
3203 hw_state->mg_clktop2_hsclkctl,
3204 hw_state->mg_pll_div0,
3205 hw_state->mg_pll_div1,
3206 hw_state->mg_pll_lf,
3207 hw_state->mg_pll_frac_lock,
3208 hw_state->mg_pll_ssc,
3209 hw_state->mg_pll_bias,
3210 hw_state->mg_pll_tdc_coldst_bias);
3213 static const struct intel_shared_dpll_funcs combo_pll_funcs = {
3214 .enable = combo_pll_enable,
3215 .disable = combo_pll_disable,
3216 .get_hw_state = combo_pll_get_hw_state,
3219 static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
3220 .enable = tbt_pll_enable,
3221 .disable = tbt_pll_disable,
3222 .get_hw_state = tbt_pll_get_hw_state,
3225 static const struct intel_shared_dpll_funcs mg_pll_funcs = {
3226 .enable = mg_pll_enable,
3227 .disable = mg_pll_disable,
3228 .get_hw_state = mg_pll_get_hw_state,
3231 static const struct dpll_info icl_plls[] = {
3232 { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
3233 { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
3234 { "TBT PLL", &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
3235 { "MG PLL 1", &mg_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
3236 { "MG PLL 2", &mg_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
3237 { "MG PLL 3", &mg_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
3238 { "MG PLL 4", &mg_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
3242 static const struct intel_dpll_mgr icl_pll_mgr = {
3243 .dpll_info = icl_plls,
3244 .get_dpll = icl_get_dpll,
3245 .dump_hw_state = icl_dump_hw_state,
3249 * intel_shared_dpll_init - Initialize shared DPLLs
3252 * Initialize shared DPLLs for @dev.
3254 void intel_shared_dpll_init(struct drm_device *dev)
3256 struct drm_i915_private *dev_priv = to_i915(dev);
3257 const struct intel_dpll_mgr *dpll_mgr = NULL;
3258 const struct dpll_info *dpll_info;
3261 if (INTEL_GEN(dev_priv) >= 11)
3262 dpll_mgr = &icl_pll_mgr;
3263 else if (IS_CANNONLAKE(dev_priv))
3264 dpll_mgr = &cnl_pll_mgr;
3265 else if (IS_GEN9_BC(dev_priv))
3266 dpll_mgr = &skl_pll_mgr;
3267 else if (IS_GEN9_LP(dev_priv))
3268 dpll_mgr = &bxt_pll_mgr;
3269 else if (HAS_DDI(dev_priv))
3270 dpll_mgr = &hsw_pll_mgr;
3271 else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
3272 dpll_mgr = &pch_pll_mgr;
3275 dev_priv->num_shared_dpll = 0;
3279 dpll_info = dpll_mgr->dpll_info;
3281 for (i = 0; dpll_info[i].name; i++) {
3282 WARN_ON(i != dpll_info[i].id);
3283 dev_priv->shared_dplls[i].info = &dpll_info[i];
3286 dev_priv->dpll_mgr = dpll_mgr;
3287 dev_priv->num_shared_dpll = i;
3288 mutex_init(&dev_priv->dpll_lock);
3290 BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
3292 /* FIXME: Move this to a more suitable place */
3293 if (HAS_DDI(dev_priv))
3294 intel_ddi_pll_init(dev);
3298 * intel_get_shared_dpll - get a shared DPLL for CRTC and encoder combination
3299 * @crtc_state: atomic state for the crtc
3302 * Find an appropriate DPLL for the given CRTC and encoder combination. A
3303 * reference from the @crtc_state to the returned pll is registered in the
3304 * atomic state. That configuration is made effective by calling
3305 * intel_shared_dpll_swap_state(). The reference should be released by calling
3306 * intel_release_shared_dpll().
3309 * A shared DPLL to be used by @crtc_state and @encoder.
3311 struct intel_shared_dpll *
3312 intel_get_shared_dpll(struct intel_crtc_state *crtc_state,
3313 struct intel_encoder *encoder)
3315 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
3316 const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr;
3318 if (WARN_ON(!dpll_mgr))
3321 return dpll_mgr->get_dpll(crtc_state, encoder);
3325 * intel_release_shared_dpll - end use of DPLL by CRTC in atomic state
3326 * @dpll: dpll in use by @crtc
3328 * @state: atomic state
3330 * This function releases the reference from @crtc to @dpll from the
3331 * atomic @state. The new configuration is made effective by calling
3332 * intel_shared_dpll_swap_state().
3334 void intel_release_shared_dpll(struct intel_shared_dpll *dpll,
3335 struct intel_crtc *crtc,
3336 struct drm_atomic_state *state)
3338 struct intel_shared_dpll_state *shared_dpll_state;
3340 shared_dpll_state = intel_atomic_get_shared_dpll_state(state);
3341 shared_dpll_state[dpll->info->id].crtc_mask &= ~(1 << crtc->pipe);
3345 * intel_shared_dpll_dump_hw_state - write hw_state to dmesg
3346 * @dev_priv: i915 drm device
3347 * @hw_state: hw state to be written to the log
3349 * Write the relevant values in @hw_state to dmesg using DRM_DEBUG_KMS.
3351 void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
3352 struct intel_dpll_hw_state *hw_state)
3354 if (dev_priv->dpll_mgr) {
3355 dev_priv->dpll_mgr->dump_hw_state(dev_priv, hw_state);
3357 /* fallback for platforms that don't use the shared dpll
3360 DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
3361 "fp0: 0x%x, fp1: 0x%x\n",