2 * Copyright © 2006-2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
24 #include "intel_drv.h"
29 * Display PLLs used for driving outputs vary by platform. While some have
30 * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL
31 * from a pool. In the latter scenario, it is possible that multiple pipes
32 * share a PLL if their configurations match.
34 * This file provides an abstraction over display PLLs. The function
35 * intel_shared_dpll_init() initializes the PLLs for the given platform. The
36 * users of a PLL are tracked and that tracking is integrated with the atomic
37 * modest interface. During an atomic operation, a PLL can be requested for a
38 * given CRTC and encoder configuration by calling intel_get_shared_dpll() and
39 * a previously used PLL can be released with intel_release_shared_dpll().
40 * Changes to the users are first staged in the atomic state, and then made
41 * effective by calling intel_shared_dpll_swap_state() during the atomic
46 intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
47 struct intel_shared_dpll_state *shared_dpll)
51 /* Copy shared dpll state */
52 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
53 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
55 shared_dpll[i] = pll->state;
59 static struct intel_shared_dpll_state *
60 intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
62 struct intel_atomic_state *state = to_intel_atomic_state(s);
64 WARN_ON(!drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
66 if (!state->dpll_set) {
67 state->dpll_set = true;
69 intel_atomic_duplicate_dpll_state(to_i915(s->dev),
73 return state->shared_dpll;
77 * intel_get_shared_dpll_by_id - get a DPLL given its id
78 * @dev_priv: i915 device instance
82 * A pointer to the DPLL with @id
84 struct intel_shared_dpll *
85 intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
86 enum intel_dpll_id id)
88 return &dev_priv->shared_dplls[id];
92 * intel_get_shared_dpll_id - get the id of a DPLL
93 * @dev_priv: i915 device instance
100 intel_get_shared_dpll_id(struct drm_i915_private *dev_priv,
101 struct intel_shared_dpll *pll)
103 if (WARN_ON(pll < dev_priv->shared_dplls||
104 pll > &dev_priv->shared_dplls[dev_priv->num_shared_dpll]))
107 return (enum intel_dpll_id) (pll - dev_priv->shared_dplls);
111 void assert_shared_dpll(struct drm_i915_private *dev_priv,
112 struct intel_shared_dpll *pll,
116 struct intel_dpll_hw_state hw_state;
118 if (WARN(!pll, "asserting DPLL %s with no DPLL\n", onoff(state)))
121 cur_state = pll->info->funcs->get_hw_state(dev_priv, pll, &hw_state);
122 I915_STATE_WARN(cur_state != state,
123 "%s assertion failure (expected %s, current %s)\n",
124 pll->info->name, onoff(state), onoff(cur_state));
128 * intel_prepare_shared_dpll - call a dpll's prepare hook
129 * @crtc_state: CRTC, and its state, which has a shared dpll
131 * This calls the PLL's prepare hook if it has one and if the PLL is not
132 * already enabled. The prepare hook is platform specific.
134 void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state)
136 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
137 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
138 struct intel_shared_dpll *pll = crtc_state->shared_dpll;
140 if (WARN_ON(pll == NULL))
143 mutex_lock(&dev_priv->dpll_lock);
144 WARN_ON(!pll->state.crtc_mask);
145 if (!pll->active_mask) {
146 DRM_DEBUG_DRIVER("setting up %s\n", pll->info->name);
148 assert_shared_dpll_disabled(dev_priv, pll);
150 pll->info->funcs->prepare(dev_priv, pll);
152 mutex_unlock(&dev_priv->dpll_lock);
156 * intel_enable_shared_dpll - enable a CRTC's shared DPLL
157 * @crtc_state: CRTC, and its state, which has a shared DPLL
159 * Enable the shared DPLL used by @crtc.
161 void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
163 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
164 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
165 struct intel_shared_dpll *pll = crtc_state->shared_dpll;
166 unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
167 unsigned int old_mask;
169 if (WARN_ON(pll == NULL))
172 mutex_lock(&dev_priv->dpll_lock);
173 old_mask = pll->active_mask;
175 if (WARN_ON(!(pll->state.crtc_mask & crtc_mask)) ||
176 WARN_ON(pll->active_mask & crtc_mask))
179 pll->active_mask |= crtc_mask;
181 DRM_DEBUG_KMS("enable %s (active %x, on? %d) for crtc %d\n",
182 pll->info->name, pll->active_mask, pll->on,
187 assert_shared_dpll_enabled(dev_priv, pll);
192 DRM_DEBUG_KMS("enabling %s\n", pll->info->name);
193 pll->info->funcs->enable(dev_priv, pll);
197 mutex_unlock(&dev_priv->dpll_lock);
201 * intel_disable_shared_dpll - disable a CRTC's shared DPLL
202 * @crtc_state: CRTC, and its state, which has a shared DPLL
204 * Disable the shared DPLL used by @crtc.
206 void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
208 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
209 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
210 struct intel_shared_dpll *pll = crtc_state->shared_dpll;
211 unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
213 /* PCH only available on ILK+ */
214 if (INTEL_GEN(dev_priv) < 5)
220 mutex_lock(&dev_priv->dpll_lock);
221 if (WARN_ON(!(pll->active_mask & crtc_mask)))
224 DRM_DEBUG_KMS("disable %s (active %x, on? %d) for crtc %d\n",
225 pll->info->name, pll->active_mask, pll->on,
228 assert_shared_dpll_enabled(dev_priv, pll);
231 pll->active_mask &= ~crtc_mask;
232 if (pll->active_mask)
235 DRM_DEBUG_KMS("disabling %s\n", pll->info->name);
236 pll->info->funcs->disable(dev_priv, pll);
240 mutex_unlock(&dev_priv->dpll_lock);
243 static struct intel_shared_dpll *
244 intel_find_shared_dpll(struct intel_crtc_state *crtc_state,
245 enum intel_dpll_id range_min,
246 enum intel_dpll_id range_max)
248 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
249 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
250 struct intel_shared_dpll *pll, *unused_pll = NULL;
251 struct intel_shared_dpll_state *shared_dpll;
252 enum intel_dpll_id i;
254 shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state);
256 for (i = range_min; i <= range_max; i++) {
257 pll = &dev_priv->shared_dplls[i];
259 /* Only want to check enabled timings first */
260 if (shared_dpll[i].crtc_mask == 0) {
266 if (memcmp(&crtc_state->dpll_hw_state,
267 &shared_dpll[i].hw_state,
268 sizeof(crtc_state->dpll_hw_state)) == 0) {
269 DRM_DEBUG_KMS("[CRTC:%d:%s] sharing existing %s (crtc mask 0x%08x, active %x)\n",
270 crtc->base.base.id, crtc->base.name,
272 shared_dpll[i].crtc_mask,
278 /* Ok no matching timings, maybe there's a free one? */
280 DRM_DEBUG_KMS("[CRTC:%d:%s] allocated %s\n",
281 crtc->base.base.id, crtc->base.name,
282 unused_pll->info->name);
290 intel_reference_shared_dpll(struct intel_shared_dpll *pll,
291 struct intel_crtc_state *crtc_state)
293 struct intel_shared_dpll_state *shared_dpll;
294 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
295 const enum intel_dpll_id id = pll->info->id;
297 shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state);
299 if (shared_dpll[id].crtc_mask == 0)
300 shared_dpll[id].hw_state =
301 crtc_state->dpll_hw_state;
303 crtc_state->shared_dpll = pll;
304 DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->info->name,
305 pipe_name(crtc->pipe));
307 shared_dpll[id].crtc_mask |= 1 << crtc->pipe;
311 * intel_shared_dpll_swap_state - make atomic DPLL configuration effective
312 * @state: atomic state
314 * This is the dpll version of drm_atomic_helper_swap_state() since the
315 * helper does not handle driver-specific global state.
317 * For consistency with atomic helpers this function does a complete swap,
318 * i.e. it also puts the current state into @state, even though there is no
319 * need for that at this moment.
321 void intel_shared_dpll_swap_state(struct drm_atomic_state *state)
323 struct drm_i915_private *dev_priv = to_i915(state->dev);
324 struct intel_shared_dpll_state *shared_dpll;
325 struct intel_shared_dpll *pll;
326 enum intel_dpll_id i;
328 if (!to_intel_atomic_state(state)->dpll_set)
331 shared_dpll = to_intel_atomic_state(state)->shared_dpll;
332 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
333 struct intel_shared_dpll_state tmp;
335 pll = &dev_priv->shared_dplls[i];
338 pll->state = shared_dpll[i];
339 shared_dpll[i] = tmp;
343 static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
344 struct intel_shared_dpll *pll,
345 struct intel_dpll_hw_state *hw_state)
347 const enum intel_dpll_id id = pll->info->id;
348 intel_wakeref_t wakeref;
351 wakeref = intel_display_power_get_if_enabled(dev_priv,
356 val = I915_READ(PCH_DPLL(id));
357 hw_state->dpll = val;
358 hw_state->fp0 = I915_READ(PCH_FP0(id));
359 hw_state->fp1 = I915_READ(PCH_FP1(id));
361 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
363 return val & DPLL_VCO_ENABLE;
366 static void ibx_pch_dpll_prepare(struct drm_i915_private *dev_priv,
367 struct intel_shared_dpll *pll)
369 const enum intel_dpll_id id = pll->info->id;
371 I915_WRITE(PCH_FP0(id), pll->state.hw_state.fp0);
372 I915_WRITE(PCH_FP1(id), pll->state.hw_state.fp1);
375 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
380 I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
382 val = I915_READ(PCH_DREF_CONTROL);
383 enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
384 DREF_SUPERSPREAD_SOURCE_MASK));
385 I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
388 static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
389 struct intel_shared_dpll *pll)
391 const enum intel_dpll_id id = pll->info->id;
393 /* PCH refclock must be enabled first */
394 ibx_assert_pch_refclk_enabled(dev_priv);
396 I915_WRITE(PCH_DPLL(id), pll->state.hw_state.dpll);
398 /* Wait for the clocks to stabilize. */
399 POSTING_READ(PCH_DPLL(id));
402 /* The pixel multiplier can only be updated once the
403 * DPLL is enabled and the clocks are stable.
407 I915_WRITE(PCH_DPLL(id), pll->state.hw_state.dpll);
408 POSTING_READ(PCH_DPLL(id));
412 static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
413 struct intel_shared_dpll *pll)
415 const enum intel_dpll_id id = pll->info->id;
417 I915_WRITE(PCH_DPLL(id), 0);
418 POSTING_READ(PCH_DPLL(id));
422 static struct intel_shared_dpll *
423 ibx_get_dpll(struct intel_crtc_state *crtc_state,
424 struct intel_encoder *encoder)
426 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
427 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
428 struct intel_shared_dpll *pll;
429 enum intel_dpll_id i;
431 if (HAS_PCH_IBX(dev_priv)) {
432 /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
433 i = (enum intel_dpll_id) crtc->pipe;
434 pll = &dev_priv->shared_dplls[i];
436 DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n",
437 crtc->base.base.id, crtc->base.name,
440 pll = intel_find_shared_dpll(crtc_state,
448 /* reference the pll */
449 intel_reference_shared_dpll(pll, crtc_state);
454 static void ibx_dump_hw_state(struct drm_i915_private *dev_priv,
455 struct intel_dpll_hw_state *hw_state)
457 DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
458 "fp0: 0x%x, fp1: 0x%x\n",
465 static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
466 .prepare = ibx_pch_dpll_prepare,
467 .enable = ibx_pch_dpll_enable,
468 .disable = ibx_pch_dpll_disable,
469 .get_hw_state = ibx_pch_dpll_get_hw_state,
472 static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
473 struct intel_shared_dpll *pll)
475 const enum intel_dpll_id id = pll->info->id;
477 I915_WRITE(WRPLL_CTL(id), pll->state.hw_state.wrpll);
478 POSTING_READ(WRPLL_CTL(id));
482 static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv,
483 struct intel_shared_dpll *pll)
485 I915_WRITE(SPLL_CTL, pll->state.hw_state.spll);
486 POSTING_READ(SPLL_CTL);
490 static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
491 struct intel_shared_dpll *pll)
493 const enum intel_dpll_id id = pll->info->id;
496 val = I915_READ(WRPLL_CTL(id));
497 I915_WRITE(WRPLL_CTL(id), val & ~WRPLL_PLL_ENABLE);
498 POSTING_READ(WRPLL_CTL(id));
501 static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
502 struct intel_shared_dpll *pll)
506 val = I915_READ(SPLL_CTL);
507 I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
508 POSTING_READ(SPLL_CTL);
511 static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
512 struct intel_shared_dpll *pll,
513 struct intel_dpll_hw_state *hw_state)
515 const enum intel_dpll_id id = pll->info->id;
516 intel_wakeref_t wakeref;
519 wakeref = intel_display_power_get_if_enabled(dev_priv,
524 val = I915_READ(WRPLL_CTL(id));
525 hw_state->wrpll = val;
527 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
529 return val & WRPLL_PLL_ENABLE;
532 static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
533 struct intel_shared_dpll *pll,
534 struct intel_dpll_hw_state *hw_state)
536 intel_wakeref_t wakeref;
539 wakeref = intel_display_power_get_if_enabled(dev_priv,
544 val = I915_READ(SPLL_CTL);
545 hw_state->spll = val;
547 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
549 return val & SPLL_PLL_ENABLE;
553 #define LC_FREQ_2K U64_C(LC_FREQ * 2000)
559 /* Constraints for PLL good behavior */
565 struct hsw_wrpll_rnp {
569 static unsigned hsw_wrpll_get_budget_for_freq(int clock)
643 static void hsw_wrpll_update_rnp(u64 freq2k, unsigned int budget,
644 unsigned int r2, unsigned int n2,
646 struct hsw_wrpll_rnp *best)
648 u64 a, b, c, d, diff, diff_best;
650 /* No best (r,n,p) yet */
659 * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
663 * abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
666 * and we would like delta <= budget.
668 * If the discrepancy is above the PPM-based budget, always prefer to
669 * improve upon the previous solution. However, if you're within the
670 * budget, try to maximize Ref * VCO, that is N / (P * R^2).
672 a = freq2k * budget * p * r2;
673 b = freq2k * budget * best->p * best->r2;
674 diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
675 diff_best = abs_diff(freq2k * best->p * best->r2,
676 LC_FREQ_2K * best->n2);
678 d = 1000000 * diff_best;
680 if (a < c && b < d) {
681 /* If both are above the budget, pick the closer */
682 if (best->p * best->r2 * diff < p * r2 * diff_best) {
687 } else if (a >= c && b < d) {
688 /* If A is below the threshold but B is above it? Update. */
692 } else if (a >= c && b >= d) {
693 /* Both are below the limit, so pick the higher n2/(r2*r2) */
694 if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
700 /* Otherwise a < c && b >= d, do nothing */
704 hsw_ddi_calculate_wrpll(int clock /* in Hz */,
705 unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
709 struct hsw_wrpll_rnp best = { 0, 0, 0 };
712 freq2k = clock / 100;
714 budget = hsw_wrpll_get_budget_for_freq(clock);
716 /* Special case handling for 540 pixel clock: bypass WR PLL entirely
717 * and directly pass the LC PLL to it. */
718 if (freq2k == 5400000) {
726 * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
729 * We want R so that REF_MIN <= Ref <= REF_MAX.
730 * Injecting R2 = 2 * R gives:
731 * REF_MAX * r2 > LC_FREQ * 2 and
732 * REF_MIN * r2 < LC_FREQ * 2
734 * Which means the desired boundaries for r2 are:
735 * LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
738 for (r2 = LC_FREQ * 2 / REF_MAX + 1;
739 r2 <= LC_FREQ * 2 / REF_MIN;
743 * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
745 * Once again we want VCO_MIN <= VCO <= VCO_MAX.
746 * Injecting R2 = 2 * R and N2 = 2 * N, we get:
747 * VCO_MAX * r2 > n2 * LC_FREQ and
748 * VCO_MIN * r2 < n2 * LC_FREQ)
750 * Which means the desired boundaries for n2 are:
751 * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
753 for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
754 n2 <= VCO_MAX * r2 / LC_FREQ;
757 for (p = P_MIN; p <= P_MAX; p += P_INC)
758 hsw_wrpll_update_rnp(freq2k, budget,
768 static struct intel_shared_dpll *hsw_ddi_hdmi_get_dpll(struct intel_crtc_state *crtc_state)
770 struct intel_shared_dpll *pll;
772 unsigned int p, n2, r2;
774 hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
776 val = WRPLL_PLL_ENABLE | WRPLL_PLL_LCPLL |
777 WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
778 WRPLL_DIVIDER_POST(p);
780 crtc_state->dpll_hw_state.wrpll = val;
782 pll = intel_find_shared_dpll(crtc_state,
783 DPLL_ID_WRPLL1, DPLL_ID_WRPLL2);
791 static struct intel_shared_dpll *
792 hsw_ddi_dp_get_dpll(struct intel_crtc_state *crtc_state)
794 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
795 struct intel_shared_dpll *pll;
796 enum intel_dpll_id pll_id;
797 int clock = crtc_state->port_clock;
801 pll_id = DPLL_ID_LCPLL_810;
804 pll_id = DPLL_ID_LCPLL_1350;
807 pll_id = DPLL_ID_LCPLL_2700;
810 DRM_DEBUG_KMS("Invalid clock for DP: %d\n", clock);
814 pll = intel_get_shared_dpll_by_id(dev_priv, pll_id);
822 static struct intel_shared_dpll *
823 hsw_get_dpll(struct intel_crtc_state *crtc_state,
824 struct intel_encoder *encoder)
826 struct intel_shared_dpll *pll;
828 memset(&crtc_state->dpll_hw_state, 0,
829 sizeof(crtc_state->dpll_hw_state));
831 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
832 pll = hsw_ddi_hdmi_get_dpll(crtc_state);
833 } else if (intel_crtc_has_dp_encoder(crtc_state)) {
834 pll = hsw_ddi_dp_get_dpll(crtc_state);
835 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
836 if (WARN_ON(crtc_state->port_clock / 2 != 135000))
839 crtc_state->dpll_hw_state.spll =
840 SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC;
842 pll = intel_find_shared_dpll(crtc_state,
843 DPLL_ID_SPLL, DPLL_ID_SPLL);
851 intel_reference_shared_dpll(pll, crtc_state);
856 static void hsw_dump_hw_state(struct drm_i915_private *dev_priv,
857 struct intel_dpll_hw_state *hw_state)
859 DRM_DEBUG_KMS("dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
860 hw_state->wrpll, hw_state->spll);
863 static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
864 .enable = hsw_ddi_wrpll_enable,
865 .disable = hsw_ddi_wrpll_disable,
866 .get_hw_state = hsw_ddi_wrpll_get_hw_state,
869 static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
870 .enable = hsw_ddi_spll_enable,
871 .disable = hsw_ddi_spll_disable,
872 .get_hw_state = hsw_ddi_spll_get_hw_state,
875 static void hsw_ddi_lcpll_enable(struct drm_i915_private *dev_priv,
876 struct intel_shared_dpll *pll)
880 static void hsw_ddi_lcpll_disable(struct drm_i915_private *dev_priv,
881 struct intel_shared_dpll *pll)
885 static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *dev_priv,
886 struct intel_shared_dpll *pll,
887 struct intel_dpll_hw_state *hw_state)
892 static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
893 .enable = hsw_ddi_lcpll_enable,
894 .disable = hsw_ddi_lcpll_disable,
895 .get_hw_state = hsw_ddi_lcpll_get_hw_state,
898 struct skl_dpll_regs {
899 i915_reg_t ctl, cfgcr1, cfgcr2;
902 /* this array is indexed by the *shared* pll id */
903 static const struct skl_dpll_regs skl_dpll_regs[4] = {
907 /* DPLL 0 doesn't support HDMI mode */
912 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
913 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
918 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
919 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
924 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
925 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
929 static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv,
930 struct intel_shared_dpll *pll)
932 const enum intel_dpll_id id = pll->info->id;
935 val = I915_READ(DPLL_CTRL1);
937 val &= ~(DPLL_CTRL1_HDMI_MODE(id) |
939 DPLL_CTRL1_LINK_RATE_MASK(id));
940 val |= pll->state.hw_state.ctrl1 << (id * 6);
942 I915_WRITE(DPLL_CTRL1, val);
943 POSTING_READ(DPLL_CTRL1);
946 static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
947 struct intel_shared_dpll *pll)
949 const struct skl_dpll_regs *regs = skl_dpll_regs;
950 const enum intel_dpll_id id = pll->info->id;
952 skl_ddi_pll_write_ctrl1(dev_priv, pll);
954 I915_WRITE(regs[id].cfgcr1, pll->state.hw_state.cfgcr1);
955 I915_WRITE(regs[id].cfgcr2, pll->state.hw_state.cfgcr2);
956 POSTING_READ(regs[id].cfgcr1);
957 POSTING_READ(regs[id].cfgcr2);
959 /* the enable bit is always bit 31 */
960 I915_WRITE(regs[id].ctl,
961 I915_READ(regs[id].ctl) | LCPLL_PLL_ENABLE);
963 if (intel_wait_for_register(dev_priv,
968 DRM_ERROR("DPLL %d not locked\n", id);
971 static void skl_ddi_dpll0_enable(struct drm_i915_private *dev_priv,
972 struct intel_shared_dpll *pll)
974 skl_ddi_pll_write_ctrl1(dev_priv, pll);
977 static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
978 struct intel_shared_dpll *pll)
980 const struct skl_dpll_regs *regs = skl_dpll_regs;
981 const enum intel_dpll_id id = pll->info->id;
983 /* the enable bit is always bit 31 */
984 I915_WRITE(regs[id].ctl,
985 I915_READ(regs[id].ctl) & ~LCPLL_PLL_ENABLE);
986 POSTING_READ(regs[id].ctl);
989 static void skl_ddi_dpll0_disable(struct drm_i915_private *dev_priv,
990 struct intel_shared_dpll *pll)
994 static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
995 struct intel_shared_dpll *pll,
996 struct intel_dpll_hw_state *hw_state)
999 const struct skl_dpll_regs *regs = skl_dpll_regs;
1000 const enum intel_dpll_id id = pll->info->id;
1001 intel_wakeref_t wakeref;
1004 wakeref = intel_display_power_get_if_enabled(dev_priv,
1011 val = I915_READ(regs[id].ctl);
1012 if (!(val & LCPLL_PLL_ENABLE))
1015 val = I915_READ(DPLL_CTRL1);
1016 hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1018 /* avoid reading back stale values if HDMI mode is not enabled */
1019 if (val & DPLL_CTRL1_HDMI_MODE(id)) {
1020 hw_state->cfgcr1 = I915_READ(regs[id].cfgcr1);
1021 hw_state->cfgcr2 = I915_READ(regs[id].cfgcr2);
1026 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
1031 static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
1032 struct intel_shared_dpll *pll,
1033 struct intel_dpll_hw_state *hw_state)
1035 const struct skl_dpll_regs *regs = skl_dpll_regs;
1036 const enum intel_dpll_id id = pll->info->id;
1037 intel_wakeref_t wakeref;
1041 wakeref = intel_display_power_get_if_enabled(dev_priv,
1048 /* DPLL0 is always enabled since it drives CDCLK */
1049 val = I915_READ(regs[id].ctl);
1050 if (WARN_ON(!(val & LCPLL_PLL_ENABLE)))
1053 val = I915_READ(DPLL_CTRL1);
1054 hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1059 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
1064 struct skl_wrpll_context {
1065 u64 min_deviation; /* current minimal deviation */
1066 u64 central_freq; /* chosen central freq */
1067 u64 dco_freq; /* chosen dco freq */
1068 unsigned int p; /* chosen divider */
1071 static void skl_wrpll_context_init(struct skl_wrpll_context *ctx)
1073 memset(ctx, 0, sizeof(*ctx));
1075 ctx->min_deviation = U64_MAX;
1078 /* DCO freq must be within +1%/-6% of the DCO central freq */
1079 #define SKL_DCO_MAX_PDEVIATION 100
1080 #define SKL_DCO_MAX_NDEVIATION 600
1082 static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
1085 unsigned int divider)
1089 deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
1092 /* positive deviation */
1093 if (dco_freq >= central_freq) {
1094 if (deviation < SKL_DCO_MAX_PDEVIATION &&
1095 deviation < ctx->min_deviation) {
1096 ctx->min_deviation = deviation;
1097 ctx->central_freq = central_freq;
1098 ctx->dco_freq = dco_freq;
1101 /* negative deviation */
1102 } else if (deviation < SKL_DCO_MAX_NDEVIATION &&
1103 deviation < ctx->min_deviation) {
1104 ctx->min_deviation = deviation;
1105 ctx->central_freq = central_freq;
1106 ctx->dco_freq = dco_freq;
1111 static void skl_wrpll_get_multipliers(unsigned int p,
1112 unsigned int *p0 /* out */,
1113 unsigned int *p1 /* out */,
1114 unsigned int *p2 /* out */)
1118 unsigned int half = p / 2;
1120 if (half == 1 || half == 2 || half == 3 || half == 5) {
1124 } else if (half % 2 == 0) {
1128 } else if (half % 3 == 0) {
1132 } else if (half % 7 == 0) {
1137 } else if (p == 3 || p == 9) { /* 3, 5, 7, 9, 15, 21, 35 */
1141 } else if (p == 5 || p == 7) {
1145 } else if (p == 15) {
1149 } else if (p == 21) {
1153 } else if (p == 35) {
1160 struct skl_wrpll_params {
1170 static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
1173 u32 p0, u32 p1, u32 p2)
1177 switch (central_freq) {
1179 params->central_freq = 0;
1182 params->central_freq = 1;
1185 params->central_freq = 3;
1202 WARN(1, "Incorrect PDiv\n");
1219 WARN(1, "Incorrect KDiv\n");
1222 params->qdiv_ratio = p1;
1223 params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
1225 dco_freq = p0 * p1 * p2 * afe_clock;
1228 * Intermediate values are in Hz.
1229 * Divide by MHz to match bsepc
1231 params->dco_integer = div_u64(dco_freq, 24 * MHz(1));
1232 params->dco_fraction =
1233 div_u64((div_u64(dco_freq, 24) -
1234 params->dco_integer * MHz(1)) * 0x8000, MHz(1));
1238 skl_ddi_calculate_wrpll(int clock /* in Hz */,
1239 struct skl_wrpll_params *wrpll_params)
1241 u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
1242 u64 dco_central_freq[3] = { 8400000000ULL,
1245 static const int even_dividers[] = { 4, 6, 8, 10, 12, 14, 16, 18, 20,
1246 24, 28, 30, 32, 36, 40, 42, 44,
1247 48, 52, 54, 56, 60, 64, 66, 68,
1248 70, 72, 76, 78, 80, 84, 88, 90,
1250 static const int odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
1251 static const struct {
1255 { even_dividers, ARRAY_SIZE(even_dividers) },
1256 { odd_dividers, ARRAY_SIZE(odd_dividers) },
1258 struct skl_wrpll_context ctx;
1259 unsigned int dco, d, i;
1260 unsigned int p0, p1, p2;
1262 skl_wrpll_context_init(&ctx);
1264 for (d = 0; d < ARRAY_SIZE(dividers); d++) {
1265 for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
1266 for (i = 0; i < dividers[d].n_dividers; i++) {
1267 unsigned int p = dividers[d].list[i];
1268 u64 dco_freq = p * afe_clock;
1270 skl_wrpll_try_divider(&ctx,
1271 dco_central_freq[dco],
1275 * Skip the remaining dividers if we're sure to
1276 * have found the definitive divider, we can't
1277 * improve a 0 deviation.
1279 if (ctx.min_deviation == 0)
1280 goto skip_remaining_dividers;
1284 skip_remaining_dividers:
1286 * If a solution is found with an even divider, prefer
1289 if (d == 0 && ctx.p)
1294 DRM_DEBUG_DRIVER("No valid divider found for %dHz\n", clock);
1299 * gcc incorrectly analyses that these can be used without being
1300 * initialized. To be fair, it's hard to guess.
1303 skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
1304 skl_wrpll_params_populate(wrpll_params, afe_clock, ctx.central_freq,
1310 static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
1312 u32 ctrl1, cfgcr1, cfgcr2;
1313 struct skl_wrpll_params wrpll_params = { 0, };
1316 * See comment in intel_dpll_hw_state to understand why we always use 0
1317 * as the DPLL id in this function.
1319 ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1321 ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
1323 if (!skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
1327 cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
1328 DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
1329 wrpll_params.dco_integer;
1331 cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
1332 DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
1333 DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
1334 DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
1335 wrpll_params.central_freq;
1337 memset(&crtc_state->dpll_hw_state, 0,
1338 sizeof(crtc_state->dpll_hw_state));
1340 crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1341 crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
1342 crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
1347 skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1352 * See comment in intel_dpll_hw_state to understand why we always use 0
1353 * as the DPLL id in this function.
1355 ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1356 switch (crtc_state->port_clock / 2) {
1358 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
1361 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
1364 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
1368 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
1371 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
1374 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
1378 memset(&crtc_state->dpll_hw_state, 0,
1379 sizeof(crtc_state->dpll_hw_state));
1381 crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1386 static struct intel_shared_dpll *
1387 skl_get_dpll(struct intel_crtc_state *crtc_state,
1388 struct intel_encoder *encoder)
1390 struct intel_shared_dpll *pll;
1393 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
1394 bret = skl_ddi_hdmi_pll_dividers(crtc_state);
1396 DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n");
1399 } else if (intel_crtc_has_dp_encoder(crtc_state)) {
1400 bret = skl_ddi_dp_set_dpll_hw_state(crtc_state);
1402 DRM_DEBUG_KMS("Could not set DP dpll HW state.\n");
1409 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
1410 pll = intel_find_shared_dpll(crtc_state,
1414 pll = intel_find_shared_dpll(crtc_state,
1420 intel_reference_shared_dpll(pll, crtc_state);
1425 static void skl_dump_hw_state(struct drm_i915_private *dev_priv,
1426 struct intel_dpll_hw_state *hw_state)
1428 DRM_DEBUG_KMS("dpll_hw_state: "
1429 "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
1435 static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
1436 .enable = skl_ddi_pll_enable,
1437 .disable = skl_ddi_pll_disable,
1438 .get_hw_state = skl_ddi_pll_get_hw_state,
1441 static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
1442 .enable = skl_ddi_dpll0_enable,
1443 .disable = skl_ddi_dpll0_disable,
1444 .get_hw_state = skl_ddi_dpll0_get_hw_state,
1447 static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
1448 struct intel_shared_dpll *pll)
1451 enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1453 enum dpio_channel ch;
1455 bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
1457 /* Non-SSC reference */
1458 temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
1459 temp |= PORT_PLL_REF_SEL;
1460 I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
1462 if (IS_GEMINILAKE(dev_priv)) {
1463 temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
1464 temp |= PORT_PLL_POWER_ENABLE;
1465 I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
1467 if (wait_for_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) &
1468 PORT_PLL_POWER_STATE), 200))
1469 DRM_ERROR("Power state not set for PLL:%d\n", port);
1472 /* Disable 10 bit clock */
1473 temp = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
1474 temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1475 I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
1478 temp = I915_READ(BXT_PORT_PLL_EBB_0(phy, ch));
1479 temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK);
1480 temp |= pll->state.hw_state.ebb0;
1481 I915_WRITE(BXT_PORT_PLL_EBB_0(phy, ch), temp);
1483 /* Write M2 integer */
1484 temp = I915_READ(BXT_PORT_PLL(phy, ch, 0));
1485 temp &= ~PORT_PLL_M2_MASK;
1486 temp |= pll->state.hw_state.pll0;
1487 I915_WRITE(BXT_PORT_PLL(phy, ch, 0), temp);
1490 temp = I915_READ(BXT_PORT_PLL(phy, ch, 1));
1491 temp &= ~PORT_PLL_N_MASK;
1492 temp |= pll->state.hw_state.pll1;
1493 I915_WRITE(BXT_PORT_PLL(phy, ch, 1), temp);
1495 /* Write M2 fraction */
1496 temp = I915_READ(BXT_PORT_PLL(phy, ch, 2));
1497 temp &= ~PORT_PLL_M2_FRAC_MASK;
1498 temp |= pll->state.hw_state.pll2;
1499 I915_WRITE(BXT_PORT_PLL(phy, ch, 2), temp);
1501 /* Write M2 fraction enable */
1502 temp = I915_READ(BXT_PORT_PLL(phy, ch, 3));
1503 temp &= ~PORT_PLL_M2_FRAC_ENABLE;
1504 temp |= pll->state.hw_state.pll3;
1505 I915_WRITE(BXT_PORT_PLL(phy, ch, 3), temp);
1508 temp = I915_READ(BXT_PORT_PLL(phy, ch, 6));
1509 temp &= ~PORT_PLL_PROP_COEFF_MASK;
1510 temp &= ~PORT_PLL_INT_COEFF_MASK;
1511 temp &= ~PORT_PLL_GAIN_CTL_MASK;
1512 temp |= pll->state.hw_state.pll6;
1513 I915_WRITE(BXT_PORT_PLL(phy, ch, 6), temp);
1515 /* Write calibration val */
1516 temp = I915_READ(BXT_PORT_PLL(phy, ch, 8));
1517 temp &= ~PORT_PLL_TARGET_CNT_MASK;
1518 temp |= pll->state.hw_state.pll8;
1519 I915_WRITE(BXT_PORT_PLL(phy, ch, 8), temp);
1521 temp = I915_READ(BXT_PORT_PLL(phy, ch, 9));
1522 temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK;
1523 temp |= pll->state.hw_state.pll9;
1524 I915_WRITE(BXT_PORT_PLL(phy, ch, 9), temp);
1526 temp = I915_READ(BXT_PORT_PLL(phy, ch, 10));
1527 temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
1528 temp &= ~PORT_PLL_DCO_AMP_MASK;
1529 temp |= pll->state.hw_state.pll10;
1530 I915_WRITE(BXT_PORT_PLL(phy, ch, 10), temp);
1532 /* Recalibrate with new settings */
1533 temp = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
1534 temp |= PORT_PLL_RECALIBRATE;
1535 I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
1536 temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1537 temp |= pll->state.hw_state.ebb4;
1538 I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
1541 temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
1542 temp |= PORT_PLL_ENABLE;
1543 I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
1544 POSTING_READ(BXT_PORT_PLL_ENABLE(port));
1546 if (wait_for_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
1548 DRM_ERROR("PLL %d not locked\n", port);
1550 if (IS_GEMINILAKE(dev_priv)) {
1551 temp = I915_READ(BXT_PORT_TX_DW5_LN0(phy, ch));
1552 temp |= DCC_DELAY_RANGE_2;
1553 I915_WRITE(BXT_PORT_TX_DW5_GRP(phy, ch), temp);
1557 * While we write to the group register to program all lanes at once we
1558 * can read only lane registers and we pick lanes 0/1 for that.
1560 temp = I915_READ(BXT_PORT_PCS_DW12_LN01(phy, ch));
1561 temp &= ~LANE_STAGGER_MASK;
1562 temp &= ~LANESTAGGER_STRAP_OVRD;
1563 temp |= pll->state.hw_state.pcsdw12;
1564 I915_WRITE(BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
1567 static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
1568 struct intel_shared_dpll *pll)
1570 enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1573 temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
1574 temp &= ~PORT_PLL_ENABLE;
1575 I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
1576 POSTING_READ(BXT_PORT_PLL_ENABLE(port));
1578 if (IS_GEMINILAKE(dev_priv)) {
1579 temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
1580 temp &= ~PORT_PLL_POWER_ENABLE;
1581 I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
1583 if (wait_for_us(!(I915_READ(BXT_PORT_PLL_ENABLE(port)) &
1584 PORT_PLL_POWER_STATE), 200))
1585 DRM_ERROR("Power state not reset for PLL:%d\n", port);
1589 static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
1590 struct intel_shared_dpll *pll,
1591 struct intel_dpll_hw_state *hw_state)
1593 enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1594 intel_wakeref_t wakeref;
1596 enum dpio_channel ch;
1600 bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
1602 wakeref = intel_display_power_get_if_enabled(dev_priv,
1609 val = I915_READ(BXT_PORT_PLL_ENABLE(port));
1610 if (!(val & PORT_PLL_ENABLE))
1613 hw_state->ebb0 = I915_READ(BXT_PORT_PLL_EBB_0(phy, ch));
1614 hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
1616 hw_state->ebb4 = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
1617 hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
1619 hw_state->pll0 = I915_READ(BXT_PORT_PLL(phy, ch, 0));
1620 hw_state->pll0 &= PORT_PLL_M2_MASK;
1622 hw_state->pll1 = I915_READ(BXT_PORT_PLL(phy, ch, 1));
1623 hw_state->pll1 &= PORT_PLL_N_MASK;
1625 hw_state->pll2 = I915_READ(BXT_PORT_PLL(phy, ch, 2));
1626 hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
1628 hw_state->pll3 = I915_READ(BXT_PORT_PLL(phy, ch, 3));
1629 hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
1631 hw_state->pll6 = I915_READ(BXT_PORT_PLL(phy, ch, 6));
1632 hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
1633 PORT_PLL_INT_COEFF_MASK |
1634 PORT_PLL_GAIN_CTL_MASK;
1636 hw_state->pll8 = I915_READ(BXT_PORT_PLL(phy, ch, 8));
1637 hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
1639 hw_state->pll9 = I915_READ(BXT_PORT_PLL(phy, ch, 9));
1640 hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
1642 hw_state->pll10 = I915_READ(BXT_PORT_PLL(phy, ch, 10));
1643 hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
1644 PORT_PLL_DCO_AMP_MASK;
1647 * While we write to the group register to program all lanes at once we
1648 * can read only lane registers. We configure all lanes the same way, so
1649 * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
1651 hw_state->pcsdw12 = I915_READ(BXT_PORT_PCS_DW12_LN01(phy, ch));
1652 if (I915_READ(BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
1653 DRM_DEBUG_DRIVER("lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
1655 I915_READ(BXT_PORT_PCS_DW12_LN23(phy, ch)));
1656 hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
1661 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
1666 /* bxt clock parameters */
1667 struct bxt_clk_div {
1679 /* pre-calculated values for DP linkrates */
1680 static const struct bxt_clk_div bxt_dp_clk_val[] = {
1681 {162000, 4, 2, 32, 1677722, 1, 1},
1682 {270000, 4, 1, 27, 0, 0, 1},
1683 {540000, 2, 1, 27, 0, 0, 1},
1684 {216000, 3, 2, 32, 1677722, 1, 1},
1685 {243000, 4, 1, 24, 1258291, 1, 1},
1686 {324000, 4, 1, 32, 1677722, 1, 1},
1687 {432000, 3, 1, 32, 1677722, 1, 1}
1691 bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
1692 struct bxt_clk_div *clk_div)
1694 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1695 struct dpll best_clock;
1697 /* Calculate HDMI div */
1699 * FIXME: tie the following calculation into
1700 * i9xx_crtc_compute_clock
1702 if (!bxt_find_best_dpll(crtc_state, &best_clock)) {
1703 DRM_DEBUG_DRIVER("no PLL dividers found for clock %d pipe %c\n",
1704 crtc_state->port_clock,
1705 pipe_name(crtc->pipe));
1709 clk_div->p1 = best_clock.p1;
1710 clk_div->p2 = best_clock.p2;
1711 WARN_ON(best_clock.m1 != 2);
1712 clk_div->n = best_clock.n;
1713 clk_div->m2_int = best_clock.m2 >> 22;
1714 clk_div->m2_frac = best_clock.m2 & ((1 << 22) - 1);
1715 clk_div->m2_frac_en = clk_div->m2_frac != 0;
1717 clk_div->vco = best_clock.vco;
1722 static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
1723 struct bxt_clk_div *clk_div)
1725 int clock = crtc_state->port_clock;
1728 *clk_div = bxt_dp_clk_val[0];
1729 for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
1730 if (bxt_dp_clk_val[i].clock == clock) {
1731 *clk_div = bxt_dp_clk_val[i];
1736 clk_div->vco = clock * 10 / 2 * clk_div->p1 * clk_div->p2;
1739 static bool bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
1740 const struct bxt_clk_div *clk_div)
1742 struct intel_dpll_hw_state *dpll_hw_state = &crtc_state->dpll_hw_state;
1743 int clock = crtc_state->port_clock;
1744 int vco = clk_div->vco;
1745 u32 prop_coef, int_coef, gain_ctl, targ_cnt;
1748 memset(dpll_hw_state, 0, sizeof(*dpll_hw_state));
1750 if (vco >= 6200000 && vco <= 6700000) {
1755 } else if ((vco > 5400000 && vco < 6200000) ||
1756 (vco >= 4800000 && vco < 5400000)) {
1761 } else if (vco == 5400000) {
1767 DRM_ERROR("Invalid VCO\n");
1773 else if (clock > 135000)
1775 else if (clock > 67000)
1777 else if (clock > 33000)
1782 dpll_hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
1783 dpll_hw_state->pll0 = clk_div->m2_int;
1784 dpll_hw_state->pll1 = PORT_PLL_N(clk_div->n);
1785 dpll_hw_state->pll2 = clk_div->m2_frac;
1787 if (clk_div->m2_frac_en)
1788 dpll_hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
1790 dpll_hw_state->pll6 = prop_coef | PORT_PLL_INT_COEFF(int_coef);
1791 dpll_hw_state->pll6 |= PORT_PLL_GAIN_CTL(gain_ctl);
1793 dpll_hw_state->pll8 = targ_cnt;
1795 dpll_hw_state->pll9 = 5 << PORT_PLL_LOCK_THRESHOLD_SHIFT;
1797 dpll_hw_state->pll10 =
1798 PORT_PLL_DCO_AMP(PORT_PLL_DCO_AMP_DEFAULT)
1799 | PORT_PLL_DCO_AMP_OVR_EN_H;
1801 dpll_hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
1803 dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
1809 bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1811 struct bxt_clk_div clk_div = {};
1813 bxt_ddi_dp_pll_dividers(crtc_state, &clk_div);
1815 return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
1819 bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1821 struct bxt_clk_div clk_div = {};
1823 bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div);
1825 return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
1828 static struct intel_shared_dpll *
1829 bxt_get_dpll(struct intel_crtc_state *crtc_state,
1830 struct intel_encoder *encoder)
1832 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1833 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1834 struct intel_shared_dpll *pll;
1835 enum intel_dpll_id id;
1837 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) &&
1838 !bxt_ddi_hdmi_set_dpll_hw_state(crtc_state))
1841 if (intel_crtc_has_dp_encoder(crtc_state) &&
1842 !bxt_ddi_dp_set_dpll_hw_state(crtc_state))
1845 /* 1:1 mapping between ports and PLLs */
1846 id = (enum intel_dpll_id) encoder->port;
1847 pll = intel_get_shared_dpll_by_id(dev_priv, id);
1849 DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n",
1850 crtc->base.base.id, crtc->base.name, pll->info->name);
1852 intel_reference_shared_dpll(pll, crtc_state);
1857 static void bxt_dump_hw_state(struct drm_i915_private *dev_priv,
1858 struct intel_dpll_hw_state *hw_state)
1860 DRM_DEBUG_KMS("dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
1861 "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
1862 "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
1876 static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
1877 .enable = bxt_ddi_pll_enable,
1878 .disable = bxt_ddi_pll_disable,
1879 .get_hw_state = bxt_ddi_pll_get_hw_state,
1882 static void intel_ddi_pll_init(struct drm_device *dev)
1884 struct drm_i915_private *dev_priv = to_i915(dev);
1886 if (INTEL_GEN(dev_priv) < 9) {
1887 u32 val = I915_READ(LCPLL_CTL);
1890 * The LCPLL register should be turned on by the BIOS. For now
1891 * let's just check its state and print errors in case
1892 * something is wrong. Don't even try to turn it on.
1895 if (val & LCPLL_CD_SOURCE_FCLK)
1896 DRM_ERROR("CDCLK source is not LCPLL\n");
1898 if (val & LCPLL_PLL_DISABLE)
1899 DRM_ERROR("LCPLL is disabled\n");
1903 struct intel_dpll_mgr {
1904 const struct dpll_info *dpll_info;
1906 struct intel_shared_dpll *(*get_dpll)(struct intel_crtc_state *crtc_state,
1907 struct intel_encoder *encoder);
1909 void (*dump_hw_state)(struct drm_i915_private *dev_priv,
1910 struct intel_dpll_hw_state *hw_state);
1913 static const struct dpll_info pch_plls[] = {
1914 { "PCH DPLL A", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_A, 0 },
1915 { "PCH DPLL B", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_B, 0 },
1919 static const struct intel_dpll_mgr pch_pll_mgr = {
1920 .dpll_info = pch_plls,
1921 .get_dpll = ibx_get_dpll,
1922 .dump_hw_state = ibx_dump_hw_state,
1925 static const struct dpll_info hsw_plls[] = {
1926 { "WRPLL 1", &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL1, 0 },
1927 { "WRPLL 2", &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL2, 0 },
1928 { "SPLL", &hsw_ddi_spll_funcs, DPLL_ID_SPLL, 0 },
1929 { "LCPLL 810", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_810, INTEL_DPLL_ALWAYS_ON },
1930 { "LCPLL 1350", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_1350, INTEL_DPLL_ALWAYS_ON },
1931 { "LCPLL 2700", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_2700, INTEL_DPLL_ALWAYS_ON },
1935 static const struct intel_dpll_mgr hsw_pll_mgr = {
1936 .dpll_info = hsw_plls,
1937 .get_dpll = hsw_get_dpll,
1938 .dump_hw_state = hsw_dump_hw_state,
1941 static const struct dpll_info skl_plls[] = {
1942 { "DPLL 0", &skl_ddi_dpll0_funcs, DPLL_ID_SKL_DPLL0, INTEL_DPLL_ALWAYS_ON },
1943 { "DPLL 1", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
1944 { "DPLL 2", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
1945 { "DPLL 3", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL3, 0 },
1949 static const struct intel_dpll_mgr skl_pll_mgr = {
1950 .dpll_info = skl_plls,
1951 .get_dpll = skl_get_dpll,
1952 .dump_hw_state = skl_dump_hw_state,
1955 static const struct dpll_info bxt_plls[] = {
1956 { "PORT PLL A", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
1957 { "PORT PLL B", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
1958 { "PORT PLL C", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
1962 static const struct intel_dpll_mgr bxt_pll_mgr = {
1963 .dpll_info = bxt_plls,
1964 .get_dpll = bxt_get_dpll,
1965 .dump_hw_state = bxt_dump_hw_state,
1968 static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
1969 struct intel_shared_dpll *pll)
1971 const enum intel_dpll_id id = pll->info->id;
1974 /* 1. Enable DPLL power in DPLL_ENABLE. */
1975 val = I915_READ(CNL_DPLL_ENABLE(id));
1976 val |= PLL_POWER_ENABLE;
1977 I915_WRITE(CNL_DPLL_ENABLE(id), val);
1979 /* 2. Wait for DPLL power state enabled in DPLL_ENABLE. */
1980 if (intel_wait_for_register(dev_priv,
1981 CNL_DPLL_ENABLE(id),
1985 DRM_ERROR("PLL %d Power not enabled\n", id);
1988 * 3. Configure DPLL_CFGCR0 to set SSC enable/disable,
1989 * select DP mode, and set DP link rate.
1991 val = pll->state.hw_state.cfgcr0;
1992 I915_WRITE(CNL_DPLL_CFGCR0(id), val);
1994 /* 4. Reab back to ensure writes completed */
1995 POSTING_READ(CNL_DPLL_CFGCR0(id));
1997 /* 3. Configure DPLL_CFGCR0 */
1998 /* Avoid touch CFGCR1 if HDMI mode is not enabled */
1999 if (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_HDMI_MODE) {
2000 val = pll->state.hw_state.cfgcr1;
2001 I915_WRITE(CNL_DPLL_CFGCR1(id), val);
2002 /* 4. Reab back to ensure writes completed */
2003 POSTING_READ(CNL_DPLL_CFGCR1(id));
2007 * 5. If the frequency will result in a change to the voltage
2008 * requirement, follow the Display Voltage Frequency Switching
2009 * Sequence Before Frequency Change
2011 * Note: DVFS is actually handled via the cdclk code paths,
2012 * hence we do nothing here.
2015 /* 6. Enable DPLL in DPLL_ENABLE. */
2016 val = I915_READ(CNL_DPLL_ENABLE(id));
2018 I915_WRITE(CNL_DPLL_ENABLE(id), val);
2020 /* 7. Wait for PLL lock status in DPLL_ENABLE. */
2021 if (intel_wait_for_register(dev_priv,
2022 CNL_DPLL_ENABLE(id),
2026 DRM_ERROR("PLL %d not locked\n", id);
2029 * 8. If the frequency will result in a change to the voltage
2030 * requirement, follow the Display Voltage Frequency Switching
2031 * Sequence After Frequency Change
2033 * Note: DVFS is actually handled via the cdclk code paths,
2034 * hence we do nothing here.
2038 * 9. turn on the clock for the DDI and map the DPLL to the DDI
2039 * Done at intel_ddi_clk_select
2043 static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv,
2044 struct intel_shared_dpll *pll)
2046 const enum intel_dpll_id id = pll->info->id;
2050 * 1. Configure DPCLKA_CFGCR0 to turn off the clock for the DDI.
2051 * Done at intel_ddi_post_disable
2055 * 2. If the frequency will result in a change to the voltage
2056 * requirement, follow the Display Voltage Frequency Switching
2057 * Sequence Before Frequency Change
2059 * Note: DVFS is actually handled via the cdclk code paths,
2060 * hence we do nothing here.
2063 /* 3. Disable DPLL through DPLL_ENABLE. */
2064 val = I915_READ(CNL_DPLL_ENABLE(id));
2066 I915_WRITE(CNL_DPLL_ENABLE(id), val);
2068 /* 4. Wait for PLL not locked status in DPLL_ENABLE. */
2069 if (intel_wait_for_register(dev_priv,
2070 CNL_DPLL_ENABLE(id),
2074 DRM_ERROR("PLL %d locked\n", id);
2077 * 5. If the frequency will result in a change to the voltage
2078 * requirement, follow the Display Voltage Frequency Switching
2079 * Sequence After Frequency Change
2081 * Note: DVFS is actually handled via the cdclk code paths,
2082 * hence we do nothing here.
2085 /* 6. Disable DPLL power in DPLL_ENABLE. */
2086 val = I915_READ(CNL_DPLL_ENABLE(id));
2087 val &= ~PLL_POWER_ENABLE;
2088 I915_WRITE(CNL_DPLL_ENABLE(id), val);
2090 /* 7. Wait for DPLL power state disabled in DPLL_ENABLE. */
2091 if (intel_wait_for_register(dev_priv,
2092 CNL_DPLL_ENABLE(id),
2096 DRM_ERROR("PLL %d Power not disabled\n", id);
2099 static bool cnl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
2100 struct intel_shared_dpll *pll,
2101 struct intel_dpll_hw_state *hw_state)
2103 const enum intel_dpll_id id = pll->info->id;
2104 intel_wakeref_t wakeref;
2108 wakeref = intel_display_power_get_if_enabled(dev_priv,
2115 val = I915_READ(CNL_DPLL_ENABLE(id));
2116 if (!(val & PLL_ENABLE))
2119 val = I915_READ(CNL_DPLL_CFGCR0(id));
2120 hw_state->cfgcr0 = val;
2122 /* avoid reading back stale values if HDMI mode is not enabled */
2123 if (val & DPLL_CFGCR0_HDMI_MODE) {
2124 hw_state->cfgcr1 = I915_READ(CNL_DPLL_CFGCR1(id));
2129 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
2134 static void cnl_wrpll_get_multipliers(int bestdiv, int *pdiv,
2135 int *qdiv, int *kdiv)
2138 if (bestdiv % 2 == 0) {
2143 } else if (bestdiv % 4 == 0) {
2145 *qdiv = bestdiv / 4;
2147 } else if (bestdiv % 6 == 0) {
2149 *qdiv = bestdiv / 6;
2151 } else if (bestdiv % 5 == 0) {
2153 *qdiv = bestdiv / 10;
2155 } else if (bestdiv % 14 == 0) {
2157 *qdiv = bestdiv / 14;
2161 if (bestdiv == 3 || bestdiv == 5 || bestdiv == 7) {
2165 } else { /* 9, 15, 21 */
2166 *pdiv = bestdiv / 3;
2173 static void cnl_wrpll_params_populate(struct skl_wrpll_params *params,
2174 u32 dco_freq, u32 ref_freq,
2175 int pdiv, int qdiv, int kdiv)
2190 WARN(1, "Incorrect KDiv\n");
2207 WARN(1, "Incorrect PDiv\n");
2210 WARN_ON(kdiv != 2 && qdiv != 1);
2212 params->qdiv_ratio = qdiv;
2213 params->qdiv_mode = (qdiv == 1) ? 0 : 1;
2215 dco = div_u64((u64)dco_freq << 15, ref_freq);
2217 params->dco_integer = dco >> 15;
2218 params->dco_fraction = dco & 0x7fff;
2221 int cnl_hdmi_pll_ref_clock(struct drm_i915_private *dev_priv)
2223 int ref_clock = dev_priv->cdclk.hw.ref;
2226 * For ICL+, the spec states: if reference frequency is 38.4,
2227 * use 19.2 because the DPLL automatically divides that by 2.
2229 if (INTEL_GEN(dev_priv) >= 11 && ref_clock == 38400)
2236 cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state,
2237 struct skl_wrpll_params *wrpll_params)
2239 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
2240 u32 afe_clock = crtc_state->port_clock * 5;
2242 u32 dco_min = 7998000;
2243 u32 dco_max = 10000000;
2244 u32 dco_mid = (dco_min + dco_max) / 2;
2245 static const int dividers[] = { 2, 4, 6, 8, 10, 12, 14, 16,
2246 18, 20, 24, 28, 30, 32, 36, 40,
2247 42, 44, 48, 50, 52, 54, 56, 60,
2248 64, 66, 68, 70, 72, 76, 78, 80,
2249 84, 88, 90, 92, 96, 98, 100, 102,
2250 3, 5, 7, 9, 15, 21 };
2251 u32 dco, best_dco = 0, dco_centrality = 0;
2252 u32 best_dco_centrality = U32_MAX; /* Spec meaning of 999999 MHz */
2253 int d, best_div = 0, pdiv = 0, qdiv = 0, kdiv = 0;
2255 for (d = 0; d < ARRAY_SIZE(dividers); d++) {
2256 dco = afe_clock * dividers[d];
2258 if ((dco <= dco_max) && (dco >= dco_min)) {
2259 dco_centrality = abs(dco - dco_mid);
2261 if (dco_centrality < best_dco_centrality) {
2262 best_dco_centrality = dco_centrality;
2263 best_div = dividers[d];
2272 cnl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
2274 ref_clock = cnl_hdmi_pll_ref_clock(dev_priv);
2276 cnl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
2282 static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
2285 struct skl_wrpll_params wrpll_params = { 0, };
2287 cfgcr0 = DPLL_CFGCR0_HDMI_MODE;
2289 if (!cnl_ddi_calculate_wrpll(crtc_state, &wrpll_params))
2292 cfgcr0 |= DPLL_CFGCR0_DCO_FRACTION(wrpll_params.dco_fraction) |
2293 wrpll_params.dco_integer;
2295 cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(wrpll_params.qdiv_ratio) |
2296 DPLL_CFGCR1_QDIV_MODE(wrpll_params.qdiv_mode) |
2297 DPLL_CFGCR1_KDIV(wrpll_params.kdiv) |
2298 DPLL_CFGCR1_PDIV(wrpll_params.pdiv) |
2299 DPLL_CFGCR1_CENTRAL_FREQ;
2301 memset(&crtc_state->dpll_hw_state, 0,
2302 sizeof(crtc_state->dpll_hw_state));
2304 crtc_state->dpll_hw_state.cfgcr0 = cfgcr0;
2305 crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
2310 cnl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
2311 struct intel_dpll_hw_state *dpll_hw_state)
2315 cfgcr0 = DPLL_CFGCR0_SSC_ENABLE;
2317 switch (crtc_state->port_clock / 2) {
2319 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_810;
2322 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1350;
2325 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_2700;
2329 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1620;
2332 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1080;
2335 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_2160;
2338 /* Some SKUs may require elevated I/O voltage to support this */
2339 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_3240;
2342 /* Some SKUs may require elevated I/O voltage to support this */
2343 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_4050;
2347 dpll_hw_state->cfgcr0 = cfgcr0;
2351 static struct intel_shared_dpll *
2352 cnl_get_dpll(struct intel_crtc_state *crtc_state,
2353 struct intel_encoder *encoder)
2355 struct intel_shared_dpll *pll;
2357 struct intel_dpll_hw_state dpll_hw_state;
2359 memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
2361 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
2362 bret = cnl_ddi_hdmi_pll_dividers(crtc_state);
2364 DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n");
2367 } else if (intel_crtc_has_dp_encoder(crtc_state)) {
2368 bret = cnl_ddi_dp_set_dpll_hw_state(crtc_state, &dpll_hw_state);
2370 DRM_DEBUG_KMS("Could not set DP dpll HW state.\n");
2373 crtc_state->dpll_hw_state = dpll_hw_state;
2375 DRM_DEBUG_KMS("Skip DPLL setup for output_types 0x%x\n",
2376 crtc_state->output_types);
2380 pll = intel_find_shared_dpll(crtc_state,
2384 DRM_DEBUG_KMS("No PLL selected\n");
2388 intel_reference_shared_dpll(pll, crtc_state);
2393 static void cnl_dump_hw_state(struct drm_i915_private *dev_priv,
2394 struct intel_dpll_hw_state *hw_state)
2396 DRM_DEBUG_KMS("dpll_hw_state: "
2397 "cfgcr0: 0x%x, cfgcr1: 0x%x\n",
2402 static const struct intel_shared_dpll_funcs cnl_ddi_pll_funcs = {
2403 .enable = cnl_ddi_pll_enable,
2404 .disable = cnl_ddi_pll_disable,
2405 .get_hw_state = cnl_ddi_pll_get_hw_state,
2408 static const struct dpll_info cnl_plls[] = {
2409 { "DPLL 0", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
2410 { "DPLL 1", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
2411 { "DPLL 2", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
2415 static const struct intel_dpll_mgr cnl_pll_mgr = {
2416 .dpll_info = cnl_plls,
2417 .get_dpll = cnl_get_dpll,
2418 .dump_hw_state = cnl_dump_hw_state,
2422 * These values alrea already adjusted: they're the bits we write to the
2423 * registers, not the logical values.
2425 static const struct skl_wrpll_params icl_dp_combo_pll_24MHz_values[] = {
2426 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [0]: 5.4 */
2427 .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0},
2428 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [1]: 2.7 */
2429 .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0},
2430 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [2]: 1.62 */
2431 .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0},
2432 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [3]: 3.24 */
2433 .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0},
2434 { .dco_integer = 0x168, .dco_fraction = 0x0000, /* [4]: 2.16 */
2435 .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2},
2436 { .dco_integer = 0x168, .dco_fraction = 0x0000, /* [5]: 4.32 */
2437 .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0},
2438 { .dco_integer = 0x195, .dco_fraction = 0x0000, /* [6]: 6.48 */
2439 .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0},
2440 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [7]: 8.1 */
2441 .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0},
2444 /* Also used for 38.4 MHz values. */
2445 static const struct skl_wrpll_params icl_dp_combo_pll_19_2MHz_values[] = {
2446 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [0]: 5.4 */
2447 .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0},
2448 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [1]: 2.7 */
2449 .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0},
2450 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [2]: 1.62 */
2451 .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0},
2452 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [3]: 3.24 */
2453 .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0},
2454 { .dco_integer = 0x1C2, .dco_fraction = 0x0000, /* [4]: 2.16 */
2455 .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2},
2456 { .dco_integer = 0x1C2, .dco_fraction = 0x0000, /* [5]: 4.32 */
2457 .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0},
2458 { .dco_integer = 0x1FA, .dco_fraction = 0x2000, /* [6]: 6.48 */
2459 .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0},
2460 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [7]: 8.1 */
2461 .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0},
2464 static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = {
2465 .dco_integer = 0x151, .dco_fraction = 0x4000,
2466 .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2469 static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
2470 .dco_integer = 0x1A5, .dco_fraction = 0x7000,
2471 .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2474 static bool icl_calc_dp_combo_pll(struct drm_i915_private *dev_priv, int clock,
2475 struct skl_wrpll_params *pll_params)
2477 const struct skl_wrpll_params *params;
2479 params = dev_priv->cdclk.hw.ref == 24000 ?
2480 icl_dp_combo_pll_24MHz_values :
2481 icl_dp_combo_pll_19_2MHz_values;
2485 *pll_params = params[0];
2488 *pll_params = params[1];
2491 *pll_params = params[2];
2494 *pll_params = params[3];
2497 *pll_params = params[4];
2500 *pll_params = params[5];
2503 *pll_params = params[6];
2506 *pll_params = params[7];
2509 MISSING_CASE(clock);
2516 static bool icl_calc_tbt_pll(struct drm_i915_private *dev_priv, int clock,
2517 struct skl_wrpll_params *pll_params)
2519 *pll_params = dev_priv->cdclk.hw.ref == 24000 ?
2520 icl_tbt_pll_24MHz_values : icl_tbt_pll_19_2MHz_values;
2524 static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
2525 struct intel_encoder *encoder, int clock,
2526 struct intel_dpll_hw_state *pll_state)
2528 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2530 struct skl_wrpll_params pll_params = { 0 };
2533 if (intel_port_is_tc(dev_priv, encoder->port))
2534 ret = icl_calc_tbt_pll(dev_priv, clock, &pll_params);
2535 else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
2536 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
2537 ret = cnl_ddi_calculate_wrpll(crtc_state, &pll_params);
2539 ret = icl_calc_dp_combo_pll(dev_priv, clock, &pll_params);
2544 cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(pll_params.dco_fraction) |
2545 pll_params.dco_integer;
2547 cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params.qdiv_ratio) |
2548 DPLL_CFGCR1_QDIV_MODE(pll_params.qdiv_mode) |
2549 DPLL_CFGCR1_KDIV(pll_params.kdiv) |
2550 DPLL_CFGCR1_PDIV(pll_params.pdiv) |
2551 DPLL_CFGCR1_CENTRAL_FREQ_8400;
2553 pll_state->cfgcr0 = cfgcr0;
2554 pll_state->cfgcr1 = cfgcr1;
2558 int icl_calc_dp_combo_pll_link(struct drm_i915_private *dev_priv,
2562 u32 pdiv, kdiv, qdiv_mode, qdiv_ratio, dco_integer, dco_fraction;
2563 const struct skl_wrpll_params *params;
2564 int index, n_entries, link_clock;
2566 /* Read back values from DPLL CFGCR registers */
2567 cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(pll_id));
2568 cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(pll_id));
2570 dco_integer = cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK;
2571 dco_fraction = (cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
2572 DPLL_CFGCR0_DCO_FRACTION_SHIFT;
2573 pdiv = (cfgcr1 & DPLL_CFGCR1_PDIV_MASK) >> DPLL_CFGCR1_PDIV_SHIFT;
2574 kdiv = (cfgcr1 & DPLL_CFGCR1_KDIV_MASK) >> DPLL_CFGCR1_KDIV_SHIFT;
2575 qdiv_mode = (cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1)) >>
2576 DPLL_CFGCR1_QDIV_MODE_SHIFT;
2577 qdiv_ratio = (cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
2578 DPLL_CFGCR1_QDIV_RATIO_SHIFT;
2580 params = dev_priv->cdclk.hw.ref == 24000 ?
2581 icl_dp_combo_pll_24MHz_values :
2582 icl_dp_combo_pll_19_2MHz_values;
2583 n_entries = ARRAY_SIZE(icl_dp_combo_pll_24MHz_values);
2585 for (index = 0; index < n_entries; index++) {
2586 if (dco_integer == params[index].dco_integer &&
2587 dco_fraction == params[index].dco_fraction &&
2588 pdiv == params[index].pdiv &&
2589 kdiv == params[index].kdiv &&
2590 qdiv_mode == params[index].qdiv_mode &&
2591 qdiv_ratio == params[index].qdiv_ratio)
2595 /* Map PLL Index to Link Clock */
2598 MISSING_CASE(index);
2601 link_clock = 540000;
2604 link_clock = 270000;
2607 link_clock = 162000;
2610 link_clock = 324000;
2613 link_clock = 216000;
2616 link_clock = 432000;
2619 link_clock = 648000;
2622 link_clock = 810000;
2629 static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
2631 return id - DPLL_ID_ICL_MGPLL1;
2634 enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
2636 return tc_port + DPLL_ID_ICL_MGPLL1;
2639 static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
2640 u32 *target_dco_khz,
2641 struct intel_dpll_hw_state *state)
2643 u32 dco_min_freq, dco_max_freq;
2644 int div1_vals[] = {7, 5, 3, 2};
2648 dco_min_freq = is_dp ? 8100000 : use_ssc ? 8000000 : 7992000;
2649 dco_max_freq = is_dp ? 8100000 : 10000000;
2651 for (i = 0; i < ARRAY_SIZE(div1_vals); i++) {
2652 int div1 = div1_vals[i];
2654 for (div2 = 10; div2 > 0; div2--) {
2655 int dco = div1 * div2 * clock_khz * 5;
2656 int a_divratio, tlinedrv, inputsel;
2659 if (dco < dco_min_freq || dco > dco_max_freq)
2663 a_divratio = is_dp ? 10 : 5;
2669 inputsel = is_dp ? 0 : 1;
2676 hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2;
2679 hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3;
2682 hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5;
2685 hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7;
2689 *target_dco_khz = dco;
2691 state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1);
2693 state->mg_clktop2_coreclkctl1 =
2694 MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(a_divratio);
2696 state->mg_clktop2_hsclkctl =
2697 MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) |
2698 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) |
2700 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2);
2710 * The specification for this function uses real numbers, so the math had to be
2711 * adapted to integer-only calculation, that's why it looks so different.
2713 static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
2714 struct intel_encoder *encoder, int clock,
2715 struct intel_dpll_hw_state *pll_state)
2717 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2718 int refclk_khz = dev_priv->cdclk.hw.ref;
2719 u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
2720 u32 iref_ndiv, iref_trim, iref_pulse_w;
2721 u32 prop_coeff, int_coeff;
2722 u32 tdc_targetcnt, feedfwgain;
2723 u64 ssc_stepsize, ssc_steplen, ssc_steplog;
2725 bool use_ssc = false;
2726 bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
2728 if (!icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
2730 DRM_DEBUG_KMS("Failed to find divisors for clock %d\n", clock);
2735 m2div_int = dco_khz / (refclk_khz * m1div);
2736 if (m2div_int > 255) {
2738 m2div_int = dco_khz / (refclk_khz * m1div);
2739 if (m2div_int > 255) {
2740 DRM_DEBUG_KMS("Failed to find mdiv for clock %d\n",
2745 m2div_rem = dco_khz % (refclk_khz * m1div);
2747 tmp = (u64)m2div_rem * (1 << 22);
2748 do_div(tmp, refclk_khz * m1div);
2751 switch (refclk_khz) {
2768 MISSING_CASE(refclk_khz);
2773 * tdc_res = 0.000003
2774 * tdc_targetcnt = int(2 / (tdc_res * 8 * 50 * 1.1) / refclk_mhz + 0.5)
2776 * The multiplication by 1000 is due to refclk MHz to KHz conversion. It
2777 * was supposed to be a division, but we rearranged the operations of
2778 * the formula to avoid early divisions so we don't multiply the
2781 * 0.000003 * 8 * 50 * 1.1 = 0.00132, also known as 132 / 100000, which
2782 * we also rearrange to work with integers.
2784 * The 0.5 transformed to 5 results in a multiplication by 10 and the
2785 * last division by 10.
2787 tdc_targetcnt = (2 * 1000 * 100000 * 10 / (132 * refclk_khz) + 5) / 10;
2790 * Here we divide dco_khz by 10 in order to allow the dividend to fit in
2791 * 32 bits. That's not a problem since we round the division down
2794 feedfwgain = (use_ssc || m2div_rem > 0) ?
2795 m1div * 1000000 * 100 / (dco_khz * 3 / 10) : 0;
2797 if (dco_khz >= 9000000) {
2806 tmp = (u64)dco_khz * 47 * 32;
2807 do_div(tmp, refclk_khz * m1div * 10000);
2810 tmp = (u64)dco_khz * 1000;
2811 ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32);
2818 pll_state->mg_pll_div0 = (m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
2819 MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
2820 MG_PLL_DIV0_FBDIV_INT(m2div_int);
2822 pll_state->mg_pll_div1 = MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
2823 MG_PLL_DIV1_DITHER_DIV_2 |
2824 MG_PLL_DIV1_NDIVRATIO(1) |
2825 MG_PLL_DIV1_FBPREDIV(m1div);
2827 pll_state->mg_pll_lf = MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
2828 MG_PLL_LF_AFCCNTSEL_512 |
2829 MG_PLL_LF_GAINCTRL(1) |
2830 MG_PLL_LF_INT_COEFF(int_coeff) |
2831 MG_PLL_LF_PROP_COEFF(prop_coeff);
2833 pll_state->mg_pll_frac_lock = MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
2834 MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
2835 MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
2836 MG_PLL_FRAC_LOCK_DCODITHEREN |
2837 MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
2838 if (use_ssc || m2div_rem > 0)
2839 pll_state->mg_pll_frac_lock |= MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
2841 pll_state->mg_pll_ssc = (use_ssc ? MG_PLL_SSC_EN : 0) |
2842 MG_PLL_SSC_TYPE(2) |
2843 MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
2844 MG_PLL_SSC_STEPNUM(ssc_steplog) |
2846 MG_PLL_SSC_STEPSIZE(ssc_stepsize);
2848 pll_state->mg_pll_tdc_coldst_bias = MG_PLL_TDC_COLDST_COLDSTART |
2849 MG_PLL_TDC_COLDST_IREFINT_EN |
2850 MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
2851 MG_PLL_TDC_TDCOVCCORR_EN |
2852 MG_PLL_TDC_TDCSEL(3);
2854 pll_state->mg_pll_bias = MG_PLL_BIAS_BIAS_GB_SEL(3) |
2855 MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
2856 MG_PLL_BIAS_BIAS_BONUS(10) |
2857 MG_PLL_BIAS_BIASCAL_EN |
2858 MG_PLL_BIAS_CTRIM(12) |
2859 MG_PLL_BIAS_VREF_RDAC(4) |
2860 MG_PLL_BIAS_IREFTRIM(iref_trim);
2862 if (refclk_khz == 38400) {
2863 pll_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
2864 pll_state->mg_pll_bias_mask = 0;
2866 pll_state->mg_pll_tdc_coldst_bias_mask = -1U;
2867 pll_state->mg_pll_bias_mask = -1U;
2870 pll_state->mg_pll_tdc_coldst_bias &= pll_state->mg_pll_tdc_coldst_bias_mask;
2871 pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask;
2876 static struct intel_shared_dpll *
2877 icl_get_dpll(struct intel_crtc_state *crtc_state,
2878 struct intel_encoder *encoder)
2880 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
2881 struct intel_digital_port *intel_dig_port;
2882 struct intel_shared_dpll *pll;
2883 struct intel_dpll_hw_state pll_state = {};
2884 enum port port = encoder->port;
2885 enum intel_dpll_id min, max;
2886 int clock = crtc_state->port_clock;
2889 if (intel_port_is_combophy(dev_priv, port)) {
2890 min = DPLL_ID_ICL_DPLL0;
2891 max = DPLL_ID_ICL_DPLL1;
2892 ret = icl_calc_dpll_state(crtc_state, encoder, clock,
2894 } else if (intel_port_is_tc(dev_priv, port)) {
2895 if (encoder->type == INTEL_OUTPUT_DP_MST) {
2896 struct intel_dp_mst_encoder *mst_encoder;
2898 mst_encoder = enc_to_mst(&encoder->base);
2899 intel_dig_port = mst_encoder->primary;
2901 intel_dig_port = enc_to_dig_port(&encoder->base);
2904 if (intel_dig_port->tc_type == TC_PORT_TBT) {
2905 min = DPLL_ID_ICL_TBTPLL;
2907 ret = icl_calc_dpll_state(crtc_state, encoder, clock,
2910 enum tc_port tc_port;
2912 tc_port = intel_port_to_tc(dev_priv, port);
2913 min = icl_tc_port_to_pll_id(tc_port);
2915 ret = icl_calc_mg_pll_state(crtc_state, encoder, clock,
2924 DRM_DEBUG_KMS("Could not calculate PLL state.\n");
2928 crtc_state->dpll_hw_state = pll_state;
2930 pll = intel_find_shared_dpll(crtc_state, min, max);
2932 DRM_DEBUG_KMS("No PLL selected\n");
2936 intel_reference_shared_dpll(pll, crtc_state);
2941 static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv,
2942 struct intel_shared_dpll *pll,
2943 struct intel_dpll_hw_state *hw_state)
2945 const enum intel_dpll_id id = pll->info->id;
2946 enum tc_port tc_port = icl_pll_id_to_tc_port(id);
2947 intel_wakeref_t wakeref;
2951 wakeref = intel_display_power_get_if_enabled(dev_priv,
2956 val = I915_READ(MG_PLL_ENABLE(tc_port));
2957 if (!(val & PLL_ENABLE))
2960 hw_state->mg_refclkin_ctl = I915_READ(MG_REFCLKIN_CTL(tc_port));
2961 hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
2963 hw_state->mg_clktop2_coreclkctl1 =
2964 I915_READ(MG_CLKTOP2_CORECLKCTL1(tc_port));
2965 hw_state->mg_clktop2_coreclkctl1 &=
2966 MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
2968 hw_state->mg_clktop2_hsclkctl =
2969 I915_READ(MG_CLKTOP2_HSCLKCTL(tc_port));
2970 hw_state->mg_clktop2_hsclkctl &=
2971 MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
2972 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
2973 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
2974 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
2976 hw_state->mg_pll_div0 = I915_READ(MG_PLL_DIV0(tc_port));
2977 hw_state->mg_pll_div1 = I915_READ(MG_PLL_DIV1(tc_port));
2978 hw_state->mg_pll_lf = I915_READ(MG_PLL_LF(tc_port));
2979 hw_state->mg_pll_frac_lock = I915_READ(MG_PLL_FRAC_LOCK(tc_port));
2980 hw_state->mg_pll_ssc = I915_READ(MG_PLL_SSC(tc_port));
2982 hw_state->mg_pll_bias = I915_READ(MG_PLL_BIAS(tc_port));
2983 hw_state->mg_pll_tdc_coldst_bias =
2984 I915_READ(MG_PLL_TDC_COLDST_BIAS(tc_port));
2986 if (dev_priv->cdclk.hw.ref == 38400) {
2987 hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
2988 hw_state->mg_pll_bias_mask = 0;
2990 hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
2991 hw_state->mg_pll_bias_mask = -1U;
2994 hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
2995 hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
2999 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
3003 static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3004 struct intel_shared_dpll *pll,
3005 struct intel_dpll_hw_state *hw_state,
3006 i915_reg_t enable_reg)
3008 const enum intel_dpll_id id = pll->info->id;
3009 intel_wakeref_t wakeref;
3013 wakeref = intel_display_power_get_if_enabled(dev_priv,
3018 val = I915_READ(enable_reg);
3019 if (!(val & PLL_ENABLE))
3022 hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(id));
3023 hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(id));
3027 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
3031 static bool combo_pll_get_hw_state(struct drm_i915_private *dev_priv,
3032 struct intel_shared_dpll *pll,
3033 struct intel_dpll_hw_state *hw_state)
3035 return icl_pll_get_hw_state(dev_priv, pll, hw_state,
3036 CNL_DPLL_ENABLE(pll->info->id));
3039 static bool tbt_pll_get_hw_state(struct drm_i915_private *dev_priv,
3040 struct intel_shared_dpll *pll,
3041 struct intel_dpll_hw_state *hw_state)
3043 return icl_pll_get_hw_state(dev_priv, pll, hw_state, TBT_PLL_ENABLE);
3046 static void icl_dpll_write(struct drm_i915_private *dev_priv,
3047 struct intel_shared_dpll *pll)
3049 struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3050 const enum intel_dpll_id id = pll->info->id;
3052 I915_WRITE(ICL_DPLL_CFGCR0(id), hw_state->cfgcr0);
3053 I915_WRITE(ICL_DPLL_CFGCR1(id), hw_state->cfgcr1);
3054 POSTING_READ(ICL_DPLL_CFGCR1(id));
3057 static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
3058 struct intel_shared_dpll *pll)
3060 struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3061 enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3065 * Some of the following registers have reserved fields, so program
3066 * these with RMW based on a mask. The mask can be fixed or generated
3067 * during the calc/readout phase if the mask depends on some other HW
3068 * state like refclk, see icl_calc_mg_pll_state().
3070 val = I915_READ(MG_REFCLKIN_CTL(tc_port));
3071 val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3072 val |= hw_state->mg_refclkin_ctl;
3073 I915_WRITE(MG_REFCLKIN_CTL(tc_port), val);
3075 val = I915_READ(MG_CLKTOP2_CORECLKCTL1(tc_port));
3076 val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3077 val |= hw_state->mg_clktop2_coreclkctl1;
3078 I915_WRITE(MG_CLKTOP2_CORECLKCTL1(tc_port), val);
3080 val = I915_READ(MG_CLKTOP2_HSCLKCTL(tc_port));
3081 val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3082 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3083 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3084 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3085 val |= hw_state->mg_clktop2_hsclkctl;
3086 I915_WRITE(MG_CLKTOP2_HSCLKCTL(tc_port), val);
3088 I915_WRITE(MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
3089 I915_WRITE(MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
3090 I915_WRITE(MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
3091 I915_WRITE(MG_PLL_FRAC_LOCK(tc_port), hw_state->mg_pll_frac_lock);
3092 I915_WRITE(MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
3094 val = I915_READ(MG_PLL_BIAS(tc_port));
3095 val &= ~hw_state->mg_pll_bias_mask;
3096 val |= hw_state->mg_pll_bias;
3097 I915_WRITE(MG_PLL_BIAS(tc_port), val);
3099 val = I915_READ(MG_PLL_TDC_COLDST_BIAS(tc_port));
3100 val &= ~hw_state->mg_pll_tdc_coldst_bias_mask;
3101 val |= hw_state->mg_pll_tdc_coldst_bias;
3102 I915_WRITE(MG_PLL_TDC_COLDST_BIAS(tc_port), val);
3104 POSTING_READ(MG_PLL_TDC_COLDST_BIAS(tc_port));
3107 static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
3108 struct intel_shared_dpll *pll,
3109 i915_reg_t enable_reg)
3113 val = I915_READ(enable_reg);
3114 val |= PLL_POWER_ENABLE;
3115 I915_WRITE(enable_reg, val);
3118 * The spec says we need to "wait" but it also says it should be
3121 if (intel_wait_for_register(dev_priv, enable_reg, PLL_POWER_STATE,
3122 PLL_POWER_STATE, 1))
3123 DRM_ERROR("PLL %d Power not enabled\n", pll->info->id);
3126 static void icl_pll_enable(struct drm_i915_private *dev_priv,
3127 struct intel_shared_dpll *pll,
3128 i915_reg_t enable_reg)
3132 val = I915_READ(enable_reg);
3134 I915_WRITE(enable_reg, val);
3136 /* Timeout is actually 600us. */
3137 if (intel_wait_for_register(dev_priv, enable_reg, PLL_LOCK, PLL_LOCK,
3139 DRM_ERROR("PLL %d not locked\n", pll->info->id);
3142 static void combo_pll_enable(struct drm_i915_private *dev_priv,
3143 struct intel_shared_dpll *pll)
3145 i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id);
3147 icl_pll_power_enable(dev_priv, pll, enable_reg);
3149 icl_dpll_write(dev_priv, pll);
3152 * DVFS pre sequence would be here, but in our driver the cdclk code
3153 * paths should already be setting the appropriate voltage, hence we do
3157 icl_pll_enable(dev_priv, pll, enable_reg);
3159 /* DVFS post sequence would be here. See the comment above. */
3162 static void tbt_pll_enable(struct drm_i915_private *dev_priv,
3163 struct intel_shared_dpll *pll)
3165 icl_pll_power_enable(dev_priv, pll, TBT_PLL_ENABLE);
3167 icl_dpll_write(dev_priv, pll);
3170 * DVFS pre sequence would be here, but in our driver the cdclk code
3171 * paths should already be setting the appropriate voltage, hence we do
3175 icl_pll_enable(dev_priv, pll, TBT_PLL_ENABLE);
3177 /* DVFS post sequence would be here. See the comment above. */
3180 static void mg_pll_enable(struct drm_i915_private *dev_priv,
3181 struct intel_shared_dpll *pll)
3183 i915_reg_t enable_reg =
3184 MG_PLL_ENABLE(icl_pll_id_to_tc_port(pll->info->id));
3186 icl_pll_power_enable(dev_priv, pll, enable_reg);
3188 icl_mg_pll_write(dev_priv, pll);
3191 * DVFS pre sequence would be here, but in our driver the cdclk code
3192 * paths should already be setting the appropriate voltage, hence we do
3196 icl_pll_enable(dev_priv, pll, enable_reg);
3198 /* DVFS post sequence would be here. See the comment above. */
3201 static void icl_pll_disable(struct drm_i915_private *dev_priv,
3202 struct intel_shared_dpll *pll,
3203 i915_reg_t enable_reg)
3207 /* The first steps are done by intel_ddi_post_disable(). */
3210 * DVFS pre sequence would be here, but in our driver the cdclk code
3211 * paths should already be setting the appropriate voltage, hence we do
3215 val = I915_READ(enable_reg);
3217 I915_WRITE(enable_reg, val);
3219 /* Timeout is actually 1us. */
3220 if (intel_wait_for_register(dev_priv, enable_reg, PLL_LOCK, 0, 1))
3221 DRM_ERROR("PLL %d locked\n", pll->info->id);
3223 /* DVFS post sequence would be here. See the comment above. */
3225 val = I915_READ(enable_reg);
3226 val &= ~PLL_POWER_ENABLE;
3227 I915_WRITE(enable_reg, val);
3230 * The spec says we need to "wait" but it also says it should be
3233 if (intel_wait_for_register(dev_priv, enable_reg, PLL_POWER_STATE, 0,
3235 DRM_ERROR("PLL %d Power not disabled\n", pll->info->id);
3238 static void combo_pll_disable(struct drm_i915_private *dev_priv,
3239 struct intel_shared_dpll *pll)
3241 icl_pll_disable(dev_priv, pll, CNL_DPLL_ENABLE(pll->info->id));
3244 static void tbt_pll_disable(struct drm_i915_private *dev_priv,
3245 struct intel_shared_dpll *pll)
3247 icl_pll_disable(dev_priv, pll, TBT_PLL_ENABLE);
3250 static void mg_pll_disable(struct drm_i915_private *dev_priv,
3251 struct intel_shared_dpll *pll)
3253 i915_reg_t enable_reg =
3254 MG_PLL_ENABLE(icl_pll_id_to_tc_port(pll->info->id));
3256 icl_pll_disable(dev_priv, pll, enable_reg);
3259 static void icl_dump_hw_state(struct drm_i915_private *dev_priv,
3260 struct intel_dpll_hw_state *hw_state)
3262 DRM_DEBUG_KMS("dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, "
3263 "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
3264 "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
3265 "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
3266 "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
3267 "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
3268 hw_state->cfgcr0, hw_state->cfgcr1,
3269 hw_state->mg_refclkin_ctl,
3270 hw_state->mg_clktop2_coreclkctl1,
3271 hw_state->mg_clktop2_hsclkctl,
3272 hw_state->mg_pll_div0,
3273 hw_state->mg_pll_div1,
3274 hw_state->mg_pll_lf,
3275 hw_state->mg_pll_frac_lock,
3276 hw_state->mg_pll_ssc,
3277 hw_state->mg_pll_bias,
3278 hw_state->mg_pll_tdc_coldst_bias);
3281 static const struct intel_shared_dpll_funcs combo_pll_funcs = {
3282 .enable = combo_pll_enable,
3283 .disable = combo_pll_disable,
3284 .get_hw_state = combo_pll_get_hw_state,
3287 static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
3288 .enable = tbt_pll_enable,
3289 .disable = tbt_pll_disable,
3290 .get_hw_state = tbt_pll_get_hw_state,
3293 static const struct intel_shared_dpll_funcs mg_pll_funcs = {
3294 .enable = mg_pll_enable,
3295 .disable = mg_pll_disable,
3296 .get_hw_state = mg_pll_get_hw_state,
3299 static const struct dpll_info icl_plls[] = {
3300 { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
3301 { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
3302 { "TBT PLL", &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
3303 { "MG PLL 1", &mg_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
3304 { "MG PLL 2", &mg_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
3305 { "MG PLL 3", &mg_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
3306 { "MG PLL 4", &mg_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
3310 static const struct intel_dpll_mgr icl_pll_mgr = {
3311 .dpll_info = icl_plls,
3312 .get_dpll = icl_get_dpll,
3313 .dump_hw_state = icl_dump_hw_state,
3317 * intel_shared_dpll_init - Initialize shared DPLLs
3320 * Initialize shared DPLLs for @dev.
3322 void intel_shared_dpll_init(struct drm_device *dev)
3324 struct drm_i915_private *dev_priv = to_i915(dev);
3325 const struct intel_dpll_mgr *dpll_mgr = NULL;
3326 const struct dpll_info *dpll_info;
3329 if (INTEL_GEN(dev_priv) >= 11)
3330 dpll_mgr = &icl_pll_mgr;
3331 else if (IS_CANNONLAKE(dev_priv))
3332 dpll_mgr = &cnl_pll_mgr;
3333 else if (IS_GEN9_BC(dev_priv))
3334 dpll_mgr = &skl_pll_mgr;
3335 else if (IS_GEN9_LP(dev_priv))
3336 dpll_mgr = &bxt_pll_mgr;
3337 else if (HAS_DDI(dev_priv))
3338 dpll_mgr = &hsw_pll_mgr;
3339 else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
3340 dpll_mgr = &pch_pll_mgr;
3343 dev_priv->num_shared_dpll = 0;
3347 dpll_info = dpll_mgr->dpll_info;
3349 for (i = 0; dpll_info[i].name; i++) {
3350 WARN_ON(i != dpll_info[i].id);
3351 dev_priv->shared_dplls[i].info = &dpll_info[i];
3354 dev_priv->dpll_mgr = dpll_mgr;
3355 dev_priv->num_shared_dpll = i;
3356 mutex_init(&dev_priv->dpll_lock);
3358 BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
3360 /* FIXME: Move this to a more suitable place */
3361 if (HAS_DDI(dev_priv))
3362 intel_ddi_pll_init(dev);
3366 * intel_get_shared_dpll - get a shared DPLL for CRTC and encoder combination
3367 * @crtc_state: atomic state for the crtc
3370 * Find an appropriate DPLL for the given CRTC and encoder combination. A
3371 * reference from the @crtc_state to the returned pll is registered in the
3372 * atomic state. That configuration is made effective by calling
3373 * intel_shared_dpll_swap_state(). The reference should be released by calling
3374 * intel_release_shared_dpll().
3377 * A shared DPLL to be used by @crtc_state and @encoder.
3379 struct intel_shared_dpll *
3380 intel_get_shared_dpll(struct intel_crtc_state *crtc_state,
3381 struct intel_encoder *encoder)
3383 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
3384 const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr;
3386 if (WARN_ON(!dpll_mgr))
3389 return dpll_mgr->get_dpll(crtc_state, encoder);
3393 * intel_release_shared_dpll - end use of DPLL by CRTC in atomic state
3394 * @dpll: dpll in use by @crtc
3396 * @state: atomic state
3398 * This function releases the reference from @crtc to @dpll from the
3399 * atomic @state. The new configuration is made effective by calling
3400 * intel_shared_dpll_swap_state().
3402 void intel_release_shared_dpll(struct intel_shared_dpll *dpll,
3403 struct intel_crtc *crtc,
3404 struct drm_atomic_state *state)
3406 struct intel_shared_dpll_state *shared_dpll_state;
3408 shared_dpll_state = intel_atomic_get_shared_dpll_state(state);
3409 shared_dpll_state[dpll->info->id].crtc_mask &= ~(1 << crtc->pipe);
3413 * intel_shared_dpll_dump_hw_state - write hw_state to dmesg
3414 * @dev_priv: i915 drm device
3415 * @hw_state: hw state to be written to the log
3417 * Write the relevant values in @hw_state to dmesg using DRM_DEBUG_KMS.
3419 void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
3420 struct intel_dpll_hw_state *hw_state)
3422 if (dev_priv->dpll_mgr) {
3423 dev_priv->dpll_mgr->dump_hw_state(dev_priv, hw_state);
3425 /* fallback for platforms that don't use the shared dpll
3428 DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
3429 "fp0: 0x%x, fp1: 0x%x\n",