2 * Copyright © 2006-2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
24 #include "intel_drv.h"
29 * Display PLLs used for driving outputs vary by platform. While some have
30 * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL
31 * from a pool. In the latter scenario, it is possible that multiple pipes
32 * share a PLL if their configurations match.
34 * This file provides an abstraction over display PLLs. The function
35 * intel_shared_dpll_init() initializes the PLLs for the given platform. The
36 * users of a PLL are tracked and that tracking is integrated with the atomic
37 * modest interface. During an atomic operation, a PLL can be requested for a
38 * given CRTC and encoder configuration by calling intel_get_shared_dpll() and
39 * a previously used PLL can be released with intel_release_shared_dpll().
40 * Changes to the users are first staged in the atomic state, and then made
41 * effective by calling intel_shared_dpll_swap_state() during the atomic
46 intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
47 struct intel_shared_dpll_state *shared_dpll)
51 /* Copy shared dpll state */
52 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
53 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
55 shared_dpll[i] = pll->state;
59 static struct intel_shared_dpll_state *
60 intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
62 struct intel_atomic_state *state = to_intel_atomic_state(s);
64 WARN_ON(!drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
66 if (!state->dpll_set) {
67 state->dpll_set = true;
69 intel_atomic_duplicate_dpll_state(to_i915(s->dev),
73 return state->shared_dpll;
77 * intel_get_shared_dpll_by_id - get a DPLL given its id
78 * @dev_priv: i915 device instance
82 * A pointer to the DPLL with @id
84 struct intel_shared_dpll *
85 intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
86 enum intel_dpll_id id)
88 return &dev_priv->shared_dplls[id];
92 * intel_get_shared_dpll_id - get the id of a DPLL
93 * @dev_priv: i915 device instance
100 intel_get_shared_dpll_id(struct drm_i915_private *dev_priv,
101 struct intel_shared_dpll *pll)
103 if (WARN_ON(pll < dev_priv->shared_dplls||
104 pll > &dev_priv->shared_dplls[dev_priv->num_shared_dpll]))
107 return (enum intel_dpll_id) (pll - dev_priv->shared_dplls);
111 void assert_shared_dpll(struct drm_i915_private *dev_priv,
112 struct intel_shared_dpll *pll,
116 struct intel_dpll_hw_state hw_state;
118 if (WARN(!pll, "asserting DPLL %s with no DPLL\n", onoff(state)))
121 cur_state = pll->info->funcs->get_hw_state(dev_priv, pll, &hw_state);
122 I915_STATE_WARN(cur_state != state,
123 "%s assertion failure (expected %s, current %s)\n",
124 pll->info->name, onoff(state), onoff(cur_state));
128 * intel_prepare_shared_dpll - call a dpll's prepare hook
129 * @crtc_state: CRTC, and its state, which has a shared dpll
131 * This calls the PLL's prepare hook if it has one and if the PLL is not
132 * already enabled. The prepare hook is platform specific.
134 void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state)
136 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
137 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
138 struct intel_shared_dpll *pll = crtc_state->shared_dpll;
140 if (WARN_ON(pll == NULL))
143 mutex_lock(&dev_priv->dpll_lock);
144 WARN_ON(!pll->state.crtc_mask);
145 if (!pll->active_mask) {
146 DRM_DEBUG_DRIVER("setting up %s\n", pll->info->name);
148 assert_shared_dpll_disabled(dev_priv, pll);
150 pll->info->funcs->prepare(dev_priv, pll);
152 mutex_unlock(&dev_priv->dpll_lock);
156 * intel_enable_shared_dpll - enable a CRTC's shared DPLL
157 * @crtc_state: CRTC, and its state, which has a shared DPLL
159 * Enable the shared DPLL used by @crtc.
161 void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
163 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
164 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
165 struct intel_shared_dpll *pll = crtc_state->shared_dpll;
166 unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
167 unsigned int old_mask;
169 if (WARN_ON(pll == NULL))
172 mutex_lock(&dev_priv->dpll_lock);
173 old_mask = pll->active_mask;
175 if (WARN_ON(!(pll->state.crtc_mask & crtc_mask)) ||
176 WARN_ON(pll->active_mask & crtc_mask))
179 pll->active_mask |= crtc_mask;
181 DRM_DEBUG_KMS("enable %s (active %x, on? %d) for crtc %d\n",
182 pll->info->name, pll->active_mask, pll->on,
187 assert_shared_dpll_enabled(dev_priv, pll);
192 DRM_DEBUG_KMS("enabling %s\n", pll->info->name);
193 pll->info->funcs->enable(dev_priv, pll);
197 mutex_unlock(&dev_priv->dpll_lock);
201 * intel_disable_shared_dpll - disable a CRTC's shared DPLL
202 * @crtc_state: CRTC, and its state, which has a shared DPLL
204 * Disable the shared DPLL used by @crtc.
206 void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
208 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
209 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
210 struct intel_shared_dpll *pll = crtc_state->shared_dpll;
211 unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
213 /* PCH only available on ILK+ */
214 if (INTEL_GEN(dev_priv) < 5)
220 mutex_lock(&dev_priv->dpll_lock);
221 if (WARN_ON(!(pll->active_mask & crtc_mask)))
224 DRM_DEBUG_KMS("disable %s (active %x, on? %d) for crtc %d\n",
225 pll->info->name, pll->active_mask, pll->on,
228 assert_shared_dpll_enabled(dev_priv, pll);
231 pll->active_mask &= ~crtc_mask;
232 if (pll->active_mask)
235 DRM_DEBUG_KMS("disabling %s\n", pll->info->name);
236 pll->info->funcs->disable(dev_priv, pll);
240 mutex_unlock(&dev_priv->dpll_lock);
243 static struct intel_shared_dpll *
244 intel_find_shared_dpll(struct intel_crtc_state *crtc_state,
245 enum intel_dpll_id range_min,
246 enum intel_dpll_id range_max)
248 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
249 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
250 struct intel_shared_dpll *pll, *unused_pll = NULL;
251 struct intel_shared_dpll_state *shared_dpll;
252 enum intel_dpll_id i;
254 shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state);
256 for (i = range_min; i <= range_max; i++) {
257 pll = &dev_priv->shared_dplls[i];
259 /* Only want to check enabled timings first */
260 if (shared_dpll[i].crtc_mask == 0) {
266 if (memcmp(&crtc_state->dpll_hw_state,
267 &shared_dpll[i].hw_state,
268 sizeof(crtc_state->dpll_hw_state)) == 0) {
269 DRM_DEBUG_KMS("[CRTC:%d:%s] sharing existing %s (crtc mask 0x%08x, active %x)\n",
270 crtc->base.base.id, crtc->base.name,
272 shared_dpll[i].crtc_mask,
278 /* Ok no matching timings, maybe there's a free one? */
280 DRM_DEBUG_KMS("[CRTC:%d:%s] allocated %s\n",
281 crtc->base.base.id, crtc->base.name,
282 unused_pll->info->name);
290 intel_reference_shared_dpll(struct intel_shared_dpll *pll,
291 struct intel_crtc_state *crtc_state)
293 struct intel_shared_dpll_state *shared_dpll;
294 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
295 const enum intel_dpll_id id = pll->info->id;
297 shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state);
299 if (shared_dpll[id].crtc_mask == 0)
300 shared_dpll[id].hw_state =
301 crtc_state->dpll_hw_state;
303 crtc_state->shared_dpll = pll;
304 DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->info->name,
305 pipe_name(crtc->pipe));
307 shared_dpll[id].crtc_mask |= 1 << crtc->pipe;
311 * intel_shared_dpll_swap_state - make atomic DPLL configuration effective
312 * @state: atomic state
314 * This is the dpll version of drm_atomic_helper_swap_state() since the
315 * helper does not handle driver-specific global state.
317 * For consistency with atomic helpers this function does a complete swap,
318 * i.e. it also puts the current state into @state, even though there is no
319 * need for that at this moment.
321 void intel_shared_dpll_swap_state(struct drm_atomic_state *state)
323 struct drm_i915_private *dev_priv = to_i915(state->dev);
324 struct intel_shared_dpll_state *shared_dpll;
325 struct intel_shared_dpll *pll;
326 enum intel_dpll_id i;
328 if (!to_intel_atomic_state(state)->dpll_set)
331 shared_dpll = to_intel_atomic_state(state)->shared_dpll;
332 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
333 struct intel_shared_dpll_state tmp;
335 pll = &dev_priv->shared_dplls[i];
338 pll->state = shared_dpll[i];
339 shared_dpll[i] = tmp;
343 static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
344 struct intel_shared_dpll *pll,
345 struct intel_dpll_hw_state *hw_state)
347 const enum intel_dpll_id id = pll->info->id;
348 intel_wakeref_t wakeref;
351 wakeref = intel_display_power_get_if_enabled(dev_priv,
356 val = I915_READ(PCH_DPLL(id));
357 hw_state->dpll = val;
358 hw_state->fp0 = I915_READ(PCH_FP0(id));
359 hw_state->fp1 = I915_READ(PCH_FP1(id));
361 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
363 return val & DPLL_VCO_ENABLE;
366 static void ibx_pch_dpll_prepare(struct drm_i915_private *dev_priv,
367 struct intel_shared_dpll *pll)
369 const enum intel_dpll_id id = pll->info->id;
371 I915_WRITE(PCH_FP0(id), pll->state.hw_state.fp0);
372 I915_WRITE(PCH_FP1(id), pll->state.hw_state.fp1);
375 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
380 I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
382 val = I915_READ(PCH_DREF_CONTROL);
383 enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
384 DREF_SUPERSPREAD_SOURCE_MASK));
385 I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
388 static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
389 struct intel_shared_dpll *pll)
391 const enum intel_dpll_id id = pll->info->id;
393 /* PCH refclock must be enabled first */
394 ibx_assert_pch_refclk_enabled(dev_priv);
396 I915_WRITE(PCH_DPLL(id), pll->state.hw_state.dpll);
398 /* Wait for the clocks to stabilize. */
399 POSTING_READ(PCH_DPLL(id));
402 /* The pixel multiplier can only be updated once the
403 * DPLL is enabled and the clocks are stable.
407 I915_WRITE(PCH_DPLL(id), pll->state.hw_state.dpll);
408 POSTING_READ(PCH_DPLL(id));
412 static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
413 struct intel_shared_dpll *pll)
415 const enum intel_dpll_id id = pll->info->id;
417 I915_WRITE(PCH_DPLL(id), 0);
418 POSTING_READ(PCH_DPLL(id));
422 static struct intel_shared_dpll *
423 ibx_get_dpll(struct intel_crtc_state *crtc_state,
424 struct intel_encoder *encoder)
426 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
427 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
428 struct intel_shared_dpll *pll;
429 enum intel_dpll_id i;
431 if (HAS_PCH_IBX(dev_priv)) {
432 /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
433 i = (enum intel_dpll_id) crtc->pipe;
434 pll = &dev_priv->shared_dplls[i];
436 DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n",
437 crtc->base.base.id, crtc->base.name,
440 pll = intel_find_shared_dpll(crtc_state,
448 /* reference the pll */
449 intel_reference_shared_dpll(pll, crtc_state);
454 static void ibx_dump_hw_state(struct drm_i915_private *dev_priv,
455 struct intel_dpll_hw_state *hw_state)
457 DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
458 "fp0: 0x%x, fp1: 0x%x\n",
465 static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
466 .prepare = ibx_pch_dpll_prepare,
467 .enable = ibx_pch_dpll_enable,
468 .disable = ibx_pch_dpll_disable,
469 .get_hw_state = ibx_pch_dpll_get_hw_state,
472 static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
473 struct intel_shared_dpll *pll)
475 const enum intel_dpll_id id = pll->info->id;
477 I915_WRITE(WRPLL_CTL(id), pll->state.hw_state.wrpll);
478 POSTING_READ(WRPLL_CTL(id));
482 static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv,
483 struct intel_shared_dpll *pll)
485 I915_WRITE(SPLL_CTL, pll->state.hw_state.spll);
486 POSTING_READ(SPLL_CTL);
490 static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
491 struct intel_shared_dpll *pll)
493 const enum intel_dpll_id id = pll->info->id;
496 val = I915_READ(WRPLL_CTL(id));
497 I915_WRITE(WRPLL_CTL(id), val & ~WRPLL_PLL_ENABLE);
498 POSTING_READ(WRPLL_CTL(id));
501 static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
502 struct intel_shared_dpll *pll)
506 val = I915_READ(SPLL_CTL);
507 I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
508 POSTING_READ(SPLL_CTL);
511 static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
512 struct intel_shared_dpll *pll,
513 struct intel_dpll_hw_state *hw_state)
515 const enum intel_dpll_id id = pll->info->id;
516 intel_wakeref_t wakeref;
519 wakeref = intel_display_power_get_if_enabled(dev_priv,
524 val = I915_READ(WRPLL_CTL(id));
525 hw_state->wrpll = val;
527 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
529 return val & WRPLL_PLL_ENABLE;
532 static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
533 struct intel_shared_dpll *pll,
534 struct intel_dpll_hw_state *hw_state)
536 intel_wakeref_t wakeref;
539 wakeref = intel_display_power_get_if_enabled(dev_priv,
544 val = I915_READ(SPLL_CTL);
545 hw_state->spll = val;
547 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
549 return val & SPLL_PLL_ENABLE;
553 #define LC_FREQ_2K U64_C(LC_FREQ * 2000)
559 /* Constraints for PLL good behavior */
565 struct hsw_wrpll_rnp {
569 static unsigned hsw_wrpll_get_budget_for_freq(int clock)
643 static void hsw_wrpll_update_rnp(u64 freq2k, unsigned int budget,
644 unsigned int r2, unsigned int n2,
646 struct hsw_wrpll_rnp *best)
648 u64 a, b, c, d, diff, diff_best;
650 /* No best (r,n,p) yet */
659 * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
663 * abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
666 * and we would like delta <= budget.
668 * If the discrepancy is above the PPM-based budget, always prefer to
669 * improve upon the previous solution. However, if you're within the
670 * budget, try to maximize Ref * VCO, that is N / (P * R^2).
672 a = freq2k * budget * p * r2;
673 b = freq2k * budget * best->p * best->r2;
674 diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
675 diff_best = abs_diff(freq2k * best->p * best->r2,
676 LC_FREQ_2K * best->n2);
678 d = 1000000 * diff_best;
680 if (a < c && b < d) {
681 /* If both are above the budget, pick the closer */
682 if (best->p * best->r2 * diff < p * r2 * diff_best) {
687 } else if (a >= c && b < d) {
688 /* If A is below the threshold but B is above it? Update. */
692 } else if (a >= c && b >= d) {
693 /* Both are below the limit, so pick the higher n2/(r2*r2) */
694 if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
700 /* Otherwise a < c && b >= d, do nothing */
704 hsw_ddi_calculate_wrpll(int clock /* in Hz */,
705 unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
709 struct hsw_wrpll_rnp best = { 0, 0, 0 };
712 freq2k = clock / 100;
714 budget = hsw_wrpll_get_budget_for_freq(clock);
716 /* Special case handling for 540 pixel clock: bypass WR PLL entirely
717 * and directly pass the LC PLL to it. */
718 if (freq2k == 5400000) {
726 * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
729 * We want R so that REF_MIN <= Ref <= REF_MAX.
730 * Injecting R2 = 2 * R gives:
731 * REF_MAX * r2 > LC_FREQ * 2 and
732 * REF_MIN * r2 < LC_FREQ * 2
734 * Which means the desired boundaries for r2 are:
735 * LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
738 for (r2 = LC_FREQ * 2 / REF_MAX + 1;
739 r2 <= LC_FREQ * 2 / REF_MIN;
743 * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
745 * Once again we want VCO_MIN <= VCO <= VCO_MAX.
746 * Injecting R2 = 2 * R and N2 = 2 * N, we get:
747 * VCO_MAX * r2 > n2 * LC_FREQ and
748 * VCO_MIN * r2 < n2 * LC_FREQ)
750 * Which means the desired boundaries for n2 are:
751 * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
753 for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
754 n2 <= VCO_MAX * r2 / LC_FREQ;
757 for (p = P_MIN; p <= P_MAX; p += P_INC)
758 hsw_wrpll_update_rnp(freq2k, budget,
768 static struct intel_shared_dpll *hsw_ddi_hdmi_get_dpll(struct intel_crtc_state *crtc_state)
770 struct intel_shared_dpll *pll;
772 unsigned int p, n2, r2;
774 hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
776 val = WRPLL_PLL_ENABLE | WRPLL_PLL_LCPLL |
777 WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
778 WRPLL_DIVIDER_POST(p);
780 crtc_state->dpll_hw_state.wrpll = val;
782 pll = intel_find_shared_dpll(crtc_state,
783 DPLL_ID_WRPLL1, DPLL_ID_WRPLL2);
791 static struct intel_shared_dpll *
792 hsw_ddi_dp_get_dpll(struct intel_crtc_state *crtc_state)
794 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
795 struct intel_shared_dpll *pll;
796 enum intel_dpll_id pll_id;
797 int clock = crtc_state->port_clock;
801 pll_id = DPLL_ID_LCPLL_810;
804 pll_id = DPLL_ID_LCPLL_1350;
807 pll_id = DPLL_ID_LCPLL_2700;
810 DRM_DEBUG_KMS("Invalid clock for DP: %d\n", clock);
814 pll = intel_get_shared_dpll_by_id(dev_priv, pll_id);
822 static struct intel_shared_dpll *
823 hsw_get_dpll(struct intel_crtc_state *crtc_state,
824 struct intel_encoder *encoder)
826 struct intel_shared_dpll *pll;
828 memset(&crtc_state->dpll_hw_state, 0,
829 sizeof(crtc_state->dpll_hw_state));
831 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
832 pll = hsw_ddi_hdmi_get_dpll(crtc_state);
833 } else if (intel_crtc_has_dp_encoder(crtc_state)) {
834 pll = hsw_ddi_dp_get_dpll(crtc_state);
835 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
836 if (WARN_ON(crtc_state->port_clock / 2 != 135000))
839 crtc_state->dpll_hw_state.spll =
840 SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC;
842 pll = intel_find_shared_dpll(crtc_state,
843 DPLL_ID_SPLL, DPLL_ID_SPLL);
851 intel_reference_shared_dpll(pll, crtc_state);
856 static void hsw_dump_hw_state(struct drm_i915_private *dev_priv,
857 struct intel_dpll_hw_state *hw_state)
859 DRM_DEBUG_KMS("dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
860 hw_state->wrpll, hw_state->spll);
863 static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
864 .enable = hsw_ddi_wrpll_enable,
865 .disable = hsw_ddi_wrpll_disable,
866 .get_hw_state = hsw_ddi_wrpll_get_hw_state,
869 static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
870 .enable = hsw_ddi_spll_enable,
871 .disable = hsw_ddi_spll_disable,
872 .get_hw_state = hsw_ddi_spll_get_hw_state,
875 static void hsw_ddi_lcpll_enable(struct drm_i915_private *dev_priv,
876 struct intel_shared_dpll *pll)
880 static void hsw_ddi_lcpll_disable(struct drm_i915_private *dev_priv,
881 struct intel_shared_dpll *pll)
885 static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *dev_priv,
886 struct intel_shared_dpll *pll,
887 struct intel_dpll_hw_state *hw_state)
892 static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
893 .enable = hsw_ddi_lcpll_enable,
894 .disable = hsw_ddi_lcpll_disable,
895 .get_hw_state = hsw_ddi_lcpll_get_hw_state,
898 struct skl_dpll_regs {
899 i915_reg_t ctl, cfgcr1, cfgcr2;
902 /* this array is indexed by the *shared* pll id */
903 static const struct skl_dpll_regs skl_dpll_regs[4] = {
907 /* DPLL 0 doesn't support HDMI mode */
912 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
913 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
918 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
919 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
924 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
925 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
929 static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv,
930 struct intel_shared_dpll *pll)
932 const enum intel_dpll_id id = pll->info->id;
935 val = I915_READ(DPLL_CTRL1);
937 val &= ~(DPLL_CTRL1_HDMI_MODE(id) |
939 DPLL_CTRL1_LINK_RATE_MASK(id));
940 val |= pll->state.hw_state.ctrl1 << (id * 6);
942 I915_WRITE(DPLL_CTRL1, val);
943 POSTING_READ(DPLL_CTRL1);
946 static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
947 struct intel_shared_dpll *pll)
949 const struct skl_dpll_regs *regs = skl_dpll_regs;
950 const enum intel_dpll_id id = pll->info->id;
952 skl_ddi_pll_write_ctrl1(dev_priv, pll);
954 I915_WRITE(regs[id].cfgcr1, pll->state.hw_state.cfgcr1);
955 I915_WRITE(regs[id].cfgcr2, pll->state.hw_state.cfgcr2);
956 POSTING_READ(regs[id].cfgcr1);
957 POSTING_READ(regs[id].cfgcr2);
959 /* the enable bit is always bit 31 */
960 I915_WRITE(regs[id].ctl,
961 I915_READ(regs[id].ctl) | LCPLL_PLL_ENABLE);
963 if (intel_wait_for_register(dev_priv,
968 DRM_ERROR("DPLL %d not locked\n", id);
971 static void skl_ddi_dpll0_enable(struct drm_i915_private *dev_priv,
972 struct intel_shared_dpll *pll)
974 skl_ddi_pll_write_ctrl1(dev_priv, pll);
977 static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
978 struct intel_shared_dpll *pll)
980 const struct skl_dpll_regs *regs = skl_dpll_regs;
981 const enum intel_dpll_id id = pll->info->id;
983 /* the enable bit is always bit 31 */
984 I915_WRITE(regs[id].ctl,
985 I915_READ(regs[id].ctl) & ~LCPLL_PLL_ENABLE);
986 POSTING_READ(regs[id].ctl);
989 static void skl_ddi_dpll0_disable(struct drm_i915_private *dev_priv,
990 struct intel_shared_dpll *pll)
994 static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
995 struct intel_shared_dpll *pll,
996 struct intel_dpll_hw_state *hw_state)
999 const struct skl_dpll_regs *regs = skl_dpll_regs;
1000 const enum intel_dpll_id id = pll->info->id;
1001 intel_wakeref_t wakeref;
1004 wakeref = intel_display_power_get_if_enabled(dev_priv,
1011 val = I915_READ(regs[id].ctl);
1012 if (!(val & LCPLL_PLL_ENABLE))
1015 val = I915_READ(DPLL_CTRL1);
1016 hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1018 /* avoid reading back stale values if HDMI mode is not enabled */
1019 if (val & DPLL_CTRL1_HDMI_MODE(id)) {
1020 hw_state->cfgcr1 = I915_READ(regs[id].cfgcr1);
1021 hw_state->cfgcr2 = I915_READ(regs[id].cfgcr2);
1026 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
1031 static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
1032 struct intel_shared_dpll *pll,
1033 struct intel_dpll_hw_state *hw_state)
1035 const struct skl_dpll_regs *regs = skl_dpll_regs;
1036 const enum intel_dpll_id id = pll->info->id;
1037 intel_wakeref_t wakeref;
1041 wakeref = intel_display_power_get_if_enabled(dev_priv,
1048 /* DPLL0 is always enabled since it drives CDCLK */
1049 val = I915_READ(regs[id].ctl);
1050 if (WARN_ON(!(val & LCPLL_PLL_ENABLE)))
1053 val = I915_READ(DPLL_CTRL1);
1054 hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1059 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
1064 struct skl_wrpll_context {
1065 u64 min_deviation; /* current minimal deviation */
1066 u64 central_freq; /* chosen central freq */
1067 u64 dco_freq; /* chosen dco freq */
1068 unsigned int p; /* chosen divider */
1071 static void skl_wrpll_context_init(struct skl_wrpll_context *ctx)
1073 memset(ctx, 0, sizeof(*ctx));
1075 ctx->min_deviation = U64_MAX;
1078 /* DCO freq must be within +1%/-6% of the DCO central freq */
1079 #define SKL_DCO_MAX_PDEVIATION 100
1080 #define SKL_DCO_MAX_NDEVIATION 600
1082 static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
1085 unsigned int divider)
1089 deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
1092 /* positive deviation */
1093 if (dco_freq >= central_freq) {
1094 if (deviation < SKL_DCO_MAX_PDEVIATION &&
1095 deviation < ctx->min_deviation) {
1096 ctx->min_deviation = deviation;
1097 ctx->central_freq = central_freq;
1098 ctx->dco_freq = dco_freq;
1101 /* negative deviation */
1102 } else if (deviation < SKL_DCO_MAX_NDEVIATION &&
1103 deviation < ctx->min_deviation) {
1104 ctx->min_deviation = deviation;
1105 ctx->central_freq = central_freq;
1106 ctx->dco_freq = dco_freq;
1111 static void skl_wrpll_get_multipliers(unsigned int p,
1112 unsigned int *p0 /* out */,
1113 unsigned int *p1 /* out */,
1114 unsigned int *p2 /* out */)
1118 unsigned int half = p / 2;
1120 if (half == 1 || half == 2 || half == 3 || half == 5) {
1124 } else if (half % 2 == 0) {
1128 } else if (half % 3 == 0) {
1132 } else if (half % 7 == 0) {
1137 } else if (p == 3 || p == 9) { /* 3, 5, 7, 9, 15, 21, 35 */
1141 } else if (p == 5 || p == 7) {
1145 } else if (p == 15) {
1149 } else if (p == 21) {
1153 } else if (p == 35) {
1160 struct skl_wrpll_params {
1170 static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
1173 u32 p0, u32 p1, u32 p2)
1177 switch (central_freq) {
1179 params->central_freq = 0;
1182 params->central_freq = 1;
1185 params->central_freq = 3;
1202 WARN(1, "Incorrect PDiv\n");
1219 WARN(1, "Incorrect KDiv\n");
1222 params->qdiv_ratio = p1;
1223 params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
1225 dco_freq = p0 * p1 * p2 * afe_clock;
1228 * Intermediate values are in Hz.
1229 * Divide by MHz to match bsepc
1231 params->dco_integer = div_u64(dco_freq, 24 * MHz(1));
1232 params->dco_fraction =
1233 div_u64((div_u64(dco_freq, 24) -
1234 params->dco_integer * MHz(1)) * 0x8000, MHz(1));
1238 skl_ddi_calculate_wrpll(int clock /* in Hz */,
1239 struct skl_wrpll_params *wrpll_params)
1241 u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
1242 u64 dco_central_freq[3] = { 8400000000ULL,
1245 static const int even_dividers[] = { 4, 6, 8, 10, 12, 14, 16, 18, 20,
1246 24, 28, 30, 32, 36, 40, 42, 44,
1247 48, 52, 54, 56, 60, 64, 66, 68,
1248 70, 72, 76, 78, 80, 84, 88, 90,
1250 static const int odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
1251 static const struct {
1255 { even_dividers, ARRAY_SIZE(even_dividers) },
1256 { odd_dividers, ARRAY_SIZE(odd_dividers) },
1258 struct skl_wrpll_context ctx;
1259 unsigned int dco, d, i;
1260 unsigned int p0, p1, p2;
1262 skl_wrpll_context_init(&ctx);
1264 for (d = 0; d < ARRAY_SIZE(dividers); d++) {
1265 for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
1266 for (i = 0; i < dividers[d].n_dividers; i++) {
1267 unsigned int p = dividers[d].list[i];
1268 u64 dco_freq = p * afe_clock;
1270 skl_wrpll_try_divider(&ctx,
1271 dco_central_freq[dco],
1275 * Skip the remaining dividers if we're sure to
1276 * have found the definitive divider, we can't
1277 * improve a 0 deviation.
1279 if (ctx.min_deviation == 0)
1280 goto skip_remaining_dividers;
1284 skip_remaining_dividers:
1286 * If a solution is found with an even divider, prefer
1289 if (d == 0 && ctx.p)
1294 DRM_DEBUG_DRIVER("No valid divider found for %dHz\n", clock);
1299 * gcc incorrectly analyses that these can be used without being
1300 * initialized. To be fair, it's hard to guess.
1303 skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
1304 skl_wrpll_params_populate(wrpll_params, afe_clock, ctx.central_freq,
1310 static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc *crtc,
1311 struct intel_crtc_state *crtc_state,
1314 u32 ctrl1, cfgcr1, cfgcr2;
1315 struct skl_wrpll_params wrpll_params = { 0, };
1318 * See comment in intel_dpll_hw_state to understand why we always use 0
1319 * as the DPLL id in this function.
1321 ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1323 ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
1325 if (!skl_ddi_calculate_wrpll(clock * 1000, &wrpll_params))
1328 cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
1329 DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
1330 wrpll_params.dco_integer;
1332 cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
1333 DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
1334 DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
1335 DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
1336 wrpll_params.central_freq;
1338 memset(&crtc_state->dpll_hw_state, 0,
1339 sizeof(crtc_state->dpll_hw_state));
1341 crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1342 crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
1343 crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
1348 skl_ddi_dp_set_dpll_hw_state(int clock,
1349 struct intel_dpll_hw_state *dpll_hw_state)
1354 * See comment in intel_dpll_hw_state to understand why we always use 0
1355 * as the DPLL id in this function.
1357 ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1358 switch (clock / 2) {
1360 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
1363 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
1366 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
1370 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
1373 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
1376 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
1380 dpll_hw_state->ctrl1 = ctrl1;
1384 static struct intel_shared_dpll *
1385 skl_get_dpll(struct intel_crtc_state *crtc_state,
1386 struct intel_encoder *encoder)
1388 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1389 struct intel_shared_dpll *pll;
1390 int clock = crtc_state->port_clock;
1392 struct intel_dpll_hw_state dpll_hw_state;
1394 memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
1396 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
1397 bret = skl_ddi_hdmi_pll_dividers(crtc, crtc_state, clock);
1399 DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n");
1402 } else if (intel_crtc_has_dp_encoder(crtc_state)) {
1403 bret = skl_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state);
1405 DRM_DEBUG_KMS("Could not set DP dpll HW state.\n");
1408 crtc_state->dpll_hw_state = dpll_hw_state;
1413 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
1414 pll = intel_find_shared_dpll(crtc_state,
1418 pll = intel_find_shared_dpll(crtc_state,
1424 intel_reference_shared_dpll(pll, crtc_state);
1429 static void skl_dump_hw_state(struct drm_i915_private *dev_priv,
1430 struct intel_dpll_hw_state *hw_state)
1432 DRM_DEBUG_KMS("dpll_hw_state: "
1433 "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
1439 static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
1440 .enable = skl_ddi_pll_enable,
1441 .disable = skl_ddi_pll_disable,
1442 .get_hw_state = skl_ddi_pll_get_hw_state,
1445 static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
1446 .enable = skl_ddi_dpll0_enable,
1447 .disable = skl_ddi_dpll0_disable,
1448 .get_hw_state = skl_ddi_dpll0_get_hw_state,
1451 static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
1452 struct intel_shared_dpll *pll)
1455 enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1457 enum dpio_channel ch;
1459 bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
1461 /* Non-SSC reference */
1462 temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
1463 temp |= PORT_PLL_REF_SEL;
1464 I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
1466 if (IS_GEMINILAKE(dev_priv)) {
1467 temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
1468 temp |= PORT_PLL_POWER_ENABLE;
1469 I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
1471 if (wait_for_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) &
1472 PORT_PLL_POWER_STATE), 200))
1473 DRM_ERROR("Power state not set for PLL:%d\n", port);
1476 /* Disable 10 bit clock */
1477 temp = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
1478 temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1479 I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
1482 temp = I915_READ(BXT_PORT_PLL_EBB_0(phy, ch));
1483 temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK);
1484 temp |= pll->state.hw_state.ebb0;
1485 I915_WRITE(BXT_PORT_PLL_EBB_0(phy, ch), temp);
1487 /* Write M2 integer */
1488 temp = I915_READ(BXT_PORT_PLL(phy, ch, 0));
1489 temp &= ~PORT_PLL_M2_MASK;
1490 temp |= pll->state.hw_state.pll0;
1491 I915_WRITE(BXT_PORT_PLL(phy, ch, 0), temp);
1494 temp = I915_READ(BXT_PORT_PLL(phy, ch, 1));
1495 temp &= ~PORT_PLL_N_MASK;
1496 temp |= pll->state.hw_state.pll1;
1497 I915_WRITE(BXT_PORT_PLL(phy, ch, 1), temp);
1499 /* Write M2 fraction */
1500 temp = I915_READ(BXT_PORT_PLL(phy, ch, 2));
1501 temp &= ~PORT_PLL_M2_FRAC_MASK;
1502 temp |= pll->state.hw_state.pll2;
1503 I915_WRITE(BXT_PORT_PLL(phy, ch, 2), temp);
1505 /* Write M2 fraction enable */
1506 temp = I915_READ(BXT_PORT_PLL(phy, ch, 3));
1507 temp &= ~PORT_PLL_M2_FRAC_ENABLE;
1508 temp |= pll->state.hw_state.pll3;
1509 I915_WRITE(BXT_PORT_PLL(phy, ch, 3), temp);
1512 temp = I915_READ(BXT_PORT_PLL(phy, ch, 6));
1513 temp &= ~PORT_PLL_PROP_COEFF_MASK;
1514 temp &= ~PORT_PLL_INT_COEFF_MASK;
1515 temp &= ~PORT_PLL_GAIN_CTL_MASK;
1516 temp |= pll->state.hw_state.pll6;
1517 I915_WRITE(BXT_PORT_PLL(phy, ch, 6), temp);
1519 /* Write calibration val */
1520 temp = I915_READ(BXT_PORT_PLL(phy, ch, 8));
1521 temp &= ~PORT_PLL_TARGET_CNT_MASK;
1522 temp |= pll->state.hw_state.pll8;
1523 I915_WRITE(BXT_PORT_PLL(phy, ch, 8), temp);
1525 temp = I915_READ(BXT_PORT_PLL(phy, ch, 9));
1526 temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK;
1527 temp |= pll->state.hw_state.pll9;
1528 I915_WRITE(BXT_PORT_PLL(phy, ch, 9), temp);
1530 temp = I915_READ(BXT_PORT_PLL(phy, ch, 10));
1531 temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
1532 temp &= ~PORT_PLL_DCO_AMP_MASK;
1533 temp |= pll->state.hw_state.pll10;
1534 I915_WRITE(BXT_PORT_PLL(phy, ch, 10), temp);
1536 /* Recalibrate with new settings */
1537 temp = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
1538 temp |= PORT_PLL_RECALIBRATE;
1539 I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
1540 temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1541 temp |= pll->state.hw_state.ebb4;
1542 I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
1545 temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
1546 temp |= PORT_PLL_ENABLE;
1547 I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
1548 POSTING_READ(BXT_PORT_PLL_ENABLE(port));
1550 if (wait_for_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
1552 DRM_ERROR("PLL %d not locked\n", port);
1554 if (IS_GEMINILAKE(dev_priv)) {
1555 temp = I915_READ(BXT_PORT_TX_DW5_LN0(phy, ch));
1556 temp |= DCC_DELAY_RANGE_2;
1557 I915_WRITE(BXT_PORT_TX_DW5_GRP(phy, ch), temp);
1561 * While we write to the group register to program all lanes at once we
1562 * can read only lane registers and we pick lanes 0/1 for that.
1564 temp = I915_READ(BXT_PORT_PCS_DW12_LN01(phy, ch));
1565 temp &= ~LANE_STAGGER_MASK;
1566 temp &= ~LANESTAGGER_STRAP_OVRD;
1567 temp |= pll->state.hw_state.pcsdw12;
1568 I915_WRITE(BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
1571 static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
1572 struct intel_shared_dpll *pll)
1574 enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1577 temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
1578 temp &= ~PORT_PLL_ENABLE;
1579 I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
1580 POSTING_READ(BXT_PORT_PLL_ENABLE(port));
1582 if (IS_GEMINILAKE(dev_priv)) {
1583 temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
1584 temp &= ~PORT_PLL_POWER_ENABLE;
1585 I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
1587 if (wait_for_us(!(I915_READ(BXT_PORT_PLL_ENABLE(port)) &
1588 PORT_PLL_POWER_STATE), 200))
1589 DRM_ERROR("Power state not reset for PLL:%d\n", port);
1593 static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
1594 struct intel_shared_dpll *pll,
1595 struct intel_dpll_hw_state *hw_state)
1597 enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1598 intel_wakeref_t wakeref;
1600 enum dpio_channel ch;
1604 bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
1606 wakeref = intel_display_power_get_if_enabled(dev_priv,
1613 val = I915_READ(BXT_PORT_PLL_ENABLE(port));
1614 if (!(val & PORT_PLL_ENABLE))
1617 hw_state->ebb0 = I915_READ(BXT_PORT_PLL_EBB_0(phy, ch));
1618 hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
1620 hw_state->ebb4 = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
1621 hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
1623 hw_state->pll0 = I915_READ(BXT_PORT_PLL(phy, ch, 0));
1624 hw_state->pll0 &= PORT_PLL_M2_MASK;
1626 hw_state->pll1 = I915_READ(BXT_PORT_PLL(phy, ch, 1));
1627 hw_state->pll1 &= PORT_PLL_N_MASK;
1629 hw_state->pll2 = I915_READ(BXT_PORT_PLL(phy, ch, 2));
1630 hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
1632 hw_state->pll3 = I915_READ(BXT_PORT_PLL(phy, ch, 3));
1633 hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
1635 hw_state->pll6 = I915_READ(BXT_PORT_PLL(phy, ch, 6));
1636 hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
1637 PORT_PLL_INT_COEFF_MASK |
1638 PORT_PLL_GAIN_CTL_MASK;
1640 hw_state->pll8 = I915_READ(BXT_PORT_PLL(phy, ch, 8));
1641 hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
1643 hw_state->pll9 = I915_READ(BXT_PORT_PLL(phy, ch, 9));
1644 hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
1646 hw_state->pll10 = I915_READ(BXT_PORT_PLL(phy, ch, 10));
1647 hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
1648 PORT_PLL_DCO_AMP_MASK;
1651 * While we write to the group register to program all lanes at once we
1652 * can read only lane registers. We configure all lanes the same way, so
1653 * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
1655 hw_state->pcsdw12 = I915_READ(BXT_PORT_PCS_DW12_LN01(phy, ch));
1656 if (I915_READ(BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
1657 DRM_DEBUG_DRIVER("lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
1659 I915_READ(BXT_PORT_PCS_DW12_LN23(phy, ch)));
1660 hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
1665 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
1670 /* bxt clock parameters */
1671 struct bxt_clk_div {
1683 /* pre-calculated values for DP linkrates */
1684 static const struct bxt_clk_div bxt_dp_clk_val[] = {
1685 {162000, 4, 2, 32, 1677722, 1, 1},
1686 {270000, 4, 1, 27, 0, 0, 1},
1687 {540000, 2, 1, 27, 0, 0, 1},
1688 {216000, 3, 2, 32, 1677722, 1, 1},
1689 {243000, 4, 1, 24, 1258291, 1, 1},
1690 {324000, 4, 1, 32, 1677722, 1, 1},
1691 {432000, 3, 1, 32, 1677722, 1, 1}
1695 bxt_ddi_hdmi_pll_dividers(struct intel_crtc *intel_crtc,
1696 struct intel_crtc_state *crtc_state, int clock,
1697 struct bxt_clk_div *clk_div)
1699 struct dpll best_clock;
1701 /* Calculate HDMI div */
1703 * FIXME: tie the following calculation into
1704 * i9xx_crtc_compute_clock
1706 if (!bxt_find_best_dpll(crtc_state, clock, &best_clock)) {
1707 DRM_DEBUG_DRIVER("no PLL dividers found for clock %d pipe %c\n",
1708 clock, pipe_name(intel_crtc->pipe));
1712 clk_div->p1 = best_clock.p1;
1713 clk_div->p2 = best_clock.p2;
1714 WARN_ON(best_clock.m1 != 2);
1715 clk_div->n = best_clock.n;
1716 clk_div->m2_int = best_clock.m2 >> 22;
1717 clk_div->m2_frac = best_clock.m2 & ((1 << 22) - 1);
1718 clk_div->m2_frac_en = clk_div->m2_frac != 0;
1720 clk_div->vco = best_clock.vco;
1725 static void bxt_ddi_dp_pll_dividers(int clock, struct bxt_clk_div *clk_div)
1729 *clk_div = bxt_dp_clk_val[0];
1730 for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
1731 if (bxt_dp_clk_val[i].clock == clock) {
1732 *clk_div = bxt_dp_clk_val[i];
1737 clk_div->vco = clock * 10 / 2 * clk_div->p1 * clk_div->p2;
1740 static bool bxt_ddi_set_dpll_hw_state(int clock,
1741 struct bxt_clk_div *clk_div,
1742 struct intel_dpll_hw_state *dpll_hw_state)
1744 int vco = clk_div->vco;
1745 u32 prop_coef, int_coef, gain_ctl, targ_cnt;
1748 if (vco >= 6200000 && vco <= 6700000) {
1753 } else if ((vco > 5400000 && vco < 6200000) ||
1754 (vco >= 4800000 && vco < 5400000)) {
1759 } else if (vco == 5400000) {
1765 DRM_ERROR("Invalid VCO\n");
1771 else if (clock > 135000)
1773 else if (clock > 67000)
1775 else if (clock > 33000)
1780 dpll_hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
1781 dpll_hw_state->pll0 = clk_div->m2_int;
1782 dpll_hw_state->pll1 = PORT_PLL_N(clk_div->n);
1783 dpll_hw_state->pll2 = clk_div->m2_frac;
1785 if (clk_div->m2_frac_en)
1786 dpll_hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
1788 dpll_hw_state->pll6 = prop_coef | PORT_PLL_INT_COEFF(int_coef);
1789 dpll_hw_state->pll6 |= PORT_PLL_GAIN_CTL(gain_ctl);
1791 dpll_hw_state->pll8 = targ_cnt;
1793 dpll_hw_state->pll9 = 5 << PORT_PLL_LOCK_THRESHOLD_SHIFT;
1795 dpll_hw_state->pll10 =
1796 PORT_PLL_DCO_AMP(PORT_PLL_DCO_AMP_DEFAULT)
1797 | PORT_PLL_DCO_AMP_OVR_EN_H;
1799 dpll_hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
1801 dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
1807 bxt_ddi_dp_set_dpll_hw_state(int clock,
1808 struct intel_dpll_hw_state *dpll_hw_state)
1810 struct bxt_clk_div clk_div = {0};
1812 bxt_ddi_dp_pll_dividers(clock, &clk_div);
1814 return bxt_ddi_set_dpll_hw_state(clock, &clk_div, dpll_hw_state);
1818 bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc *intel_crtc,
1819 struct intel_crtc_state *crtc_state, int clock,
1820 struct intel_dpll_hw_state *dpll_hw_state)
1822 struct bxt_clk_div clk_div = { };
1824 bxt_ddi_hdmi_pll_dividers(intel_crtc, crtc_state, clock, &clk_div);
1826 return bxt_ddi_set_dpll_hw_state(clock, &clk_div, dpll_hw_state);
1829 static struct intel_shared_dpll *
1830 bxt_get_dpll(struct intel_crtc_state *crtc_state,
1831 struct intel_encoder *encoder)
1833 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1834 struct intel_dpll_hw_state dpll_hw_state = { };
1835 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1836 struct intel_shared_dpll *pll;
1837 int i, clock = crtc_state->port_clock;
1839 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) &&
1840 !bxt_ddi_hdmi_set_dpll_hw_state(crtc, crtc_state, clock,
1844 if (intel_crtc_has_dp_encoder(crtc_state) &&
1845 !bxt_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state))
1848 memset(&crtc_state->dpll_hw_state, 0,
1849 sizeof(crtc_state->dpll_hw_state));
1851 crtc_state->dpll_hw_state = dpll_hw_state;
1853 /* 1:1 mapping between ports and PLLs */
1854 i = (enum intel_dpll_id) encoder->port;
1855 pll = intel_get_shared_dpll_by_id(dev_priv, i);
1857 DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n",
1858 crtc->base.base.id, crtc->base.name, pll->info->name);
1860 intel_reference_shared_dpll(pll, crtc_state);
1865 static void bxt_dump_hw_state(struct drm_i915_private *dev_priv,
1866 struct intel_dpll_hw_state *hw_state)
1868 DRM_DEBUG_KMS("dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
1869 "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
1870 "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
1884 static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
1885 .enable = bxt_ddi_pll_enable,
1886 .disable = bxt_ddi_pll_disable,
1887 .get_hw_state = bxt_ddi_pll_get_hw_state,
1890 static void intel_ddi_pll_init(struct drm_device *dev)
1892 struct drm_i915_private *dev_priv = to_i915(dev);
1894 if (INTEL_GEN(dev_priv) < 9) {
1895 u32 val = I915_READ(LCPLL_CTL);
1898 * The LCPLL register should be turned on by the BIOS. For now
1899 * let's just check its state and print errors in case
1900 * something is wrong. Don't even try to turn it on.
1903 if (val & LCPLL_CD_SOURCE_FCLK)
1904 DRM_ERROR("CDCLK source is not LCPLL\n");
1906 if (val & LCPLL_PLL_DISABLE)
1907 DRM_ERROR("LCPLL is disabled\n");
1911 struct intel_dpll_mgr {
1912 const struct dpll_info *dpll_info;
1914 struct intel_shared_dpll *(*get_dpll)(struct intel_crtc_state *crtc_state,
1915 struct intel_encoder *encoder);
1917 void (*dump_hw_state)(struct drm_i915_private *dev_priv,
1918 struct intel_dpll_hw_state *hw_state);
1921 static const struct dpll_info pch_plls[] = {
1922 { "PCH DPLL A", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_A, 0 },
1923 { "PCH DPLL B", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_B, 0 },
1927 static const struct intel_dpll_mgr pch_pll_mgr = {
1928 .dpll_info = pch_plls,
1929 .get_dpll = ibx_get_dpll,
1930 .dump_hw_state = ibx_dump_hw_state,
1933 static const struct dpll_info hsw_plls[] = {
1934 { "WRPLL 1", &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL1, 0 },
1935 { "WRPLL 2", &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL2, 0 },
1936 { "SPLL", &hsw_ddi_spll_funcs, DPLL_ID_SPLL, 0 },
1937 { "LCPLL 810", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_810, INTEL_DPLL_ALWAYS_ON },
1938 { "LCPLL 1350", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_1350, INTEL_DPLL_ALWAYS_ON },
1939 { "LCPLL 2700", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_2700, INTEL_DPLL_ALWAYS_ON },
1943 static const struct intel_dpll_mgr hsw_pll_mgr = {
1944 .dpll_info = hsw_plls,
1945 .get_dpll = hsw_get_dpll,
1946 .dump_hw_state = hsw_dump_hw_state,
1949 static const struct dpll_info skl_plls[] = {
1950 { "DPLL 0", &skl_ddi_dpll0_funcs, DPLL_ID_SKL_DPLL0, INTEL_DPLL_ALWAYS_ON },
1951 { "DPLL 1", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
1952 { "DPLL 2", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
1953 { "DPLL 3", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL3, 0 },
1957 static const struct intel_dpll_mgr skl_pll_mgr = {
1958 .dpll_info = skl_plls,
1959 .get_dpll = skl_get_dpll,
1960 .dump_hw_state = skl_dump_hw_state,
1963 static const struct dpll_info bxt_plls[] = {
1964 { "PORT PLL A", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
1965 { "PORT PLL B", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
1966 { "PORT PLL C", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
1970 static const struct intel_dpll_mgr bxt_pll_mgr = {
1971 .dpll_info = bxt_plls,
1972 .get_dpll = bxt_get_dpll,
1973 .dump_hw_state = bxt_dump_hw_state,
1976 static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
1977 struct intel_shared_dpll *pll)
1979 const enum intel_dpll_id id = pll->info->id;
1982 /* 1. Enable DPLL power in DPLL_ENABLE. */
1983 val = I915_READ(CNL_DPLL_ENABLE(id));
1984 val |= PLL_POWER_ENABLE;
1985 I915_WRITE(CNL_DPLL_ENABLE(id), val);
1987 /* 2. Wait for DPLL power state enabled in DPLL_ENABLE. */
1988 if (intel_wait_for_register(dev_priv,
1989 CNL_DPLL_ENABLE(id),
1993 DRM_ERROR("PLL %d Power not enabled\n", id);
1996 * 3. Configure DPLL_CFGCR0 to set SSC enable/disable,
1997 * select DP mode, and set DP link rate.
1999 val = pll->state.hw_state.cfgcr0;
2000 I915_WRITE(CNL_DPLL_CFGCR0(id), val);
2002 /* 4. Reab back to ensure writes completed */
2003 POSTING_READ(CNL_DPLL_CFGCR0(id));
2005 /* 3. Configure DPLL_CFGCR0 */
2006 /* Avoid touch CFGCR1 if HDMI mode is not enabled */
2007 if (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_HDMI_MODE) {
2008 val = pll->state.hw_state.cfgcr1;
2009 I915_WRITE(CNL_DPLL_CFGCR1(id), val);
2010 /* 4. Reab back to ensure writes completed */
2011 POSTING_READ(CNL_DPLL_CFGCR1(id));
2015 * 5. If the frequency will result in a change to the voltage
2016 * requirement, follow the Display Voltage Frequency Switching
2017 * Sequence Before Frequency Change
2019 * Note: DVFS is actually handled via the cdclk code paths,
2020 * hence we do nothing here.
2023 /* 6. Enable DPLL in DPLL_ENABLE. */
2024 val = I915_READ(CNL_DPLL_ENABLE(id));
2026 I915_WRITE(CNL_DPLL_ENABLE(id), val);
2028 /* 7. Wait for PLL lock status in DPLL_ENABLE. */
2029 if (intel_wait_for_register(dev_priv,
2030 CNL_DPLL_ENABLE(id),
2034 DRM_ERROR("PLL %d not locked\n", id);
2037 * 8. If the frequency will result in a change to the voltage
2038 * requirement, follow the Display Voltage Frequency Switching
2039 * Sequence After Frequency Change
2041 * Note: DVFS is actually handled via the cdclk code paths,
2042 * hence we do nothing here.
2046 * 9. turn on the clock for the DDI and map the DPLL to the DDI
2047 * Done at intel_ddi_clk_select
2051 static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv,
2052 struct intel_shared_dpll *pll)
2054 const enum intel_dpll_id id = pll->info->id;
2058 * 1. Configure DPCLKA_CFGCR0 to turn off the clock for the DDI.
2059 * Done at intel_ddi_post_disable
2063 * 2. If the frequency will result in a change to the voltage
2064 * requirement, follow the Display Voltage Frequency Switching
2065 * Sequence Before Frequency Change
2067 * Note: DVFS is actually handled via the cdclk code paths,
2068 * hence we do nothing here.
2071 /* 3. Disable DPLL through DPLL_ENABLE. */
2072 val = I915_READ(CNL_DPLL_ENABLE(id));
2074 I915_WRITE(CNL_DPLL_ENABLE(id), val);
2076 /* 4. Wait for PLL not locked status in DPLL_ENABLE. */
2077 if (intel_wait_for_register(dev_priv,
2078 CNL_DPLL_ENABLE(id),
2082 DRM_ERROR("PLL %d locked\n", id);
2085 * 5. If the frequency will result in a change to the voltage
2086 * requirement, follow the Display Voltage Frequency Switching
2087 * Sequence After Frequency Change
2089 * Note: DVFS is actually handled via the cdclk code paths,
2090 * hence we do nothing here.
2093 /* 6. Disable DPLL power in DPLL_ENABLE. */
2094 val = I915_READ(CNL_DPLL_ENABLE(id));
2095 val &= ~PLL_POWER_ENABLE;
2096 I915_WRITE(CNL_DPLL_ENABLE(id), val);
2098 /* 7. Wait for DPLL power state disabled in DPLL_ENABLE. */
2099 if (intel_wait_for_register(dev_priv,
2100 CNL_DPLL_ENABLE(id),
2104 DRM_ERROR("PLL %d Power not disabled\n", id);
2107 static bool cnl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
2108 struct intel_shared_dpll *pll,
2109 struct intel_dpll_hw_state *hw_state)
2111 const enum intel_dpll_id id = pll->info->id;
2112 intel_wakeref_t wakeref;
2116 wakeref = intel_display_power_get_if_enabled(dev_priv,
2123 val = I915_READ(CNL_DPLL_ENABLE(id));
2124 if (!(val & PLL_ENABLE))
2127 val = I915_READ(CNL_DPLL_CFGCR0(id));
2128 hw_state->cfgcr0 = val;
2130 /* avoid reading back stale values if HDMI mode is not enabled */
2131 if (val & DPLL_CFGCR0_HDMI_MODE) {
2132 hw_state->cfgcr1 = I915_READ(CNL_DPLL_CFGCR1(id));
2137 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
2142 static void cnl_wrpll_get_multipliers(int bestdiv, int *pdiv,
2143 int *qdiv, int *kdiv)
2146 if (bestdiv % 2 == 0) {
2151 } else if (bestdiv % 4 == 0) {
2153 *qdiv = bestdiv / 4;
2155 } else if (bestdiv % 6 == 0) {
2157 *qdiv = bestdiv / 6;
2159 } else if (bestdiv % 5 == 0) {
2161 *qdiv = bestdiv / 10;
2163 } else if (bestdiv % 14 == 0) {
2165 *qdiv = bestdiv / 14;
2169 if (bestdiv == 3 || bestdiv == 5 || bestdiv == 7) {
2173 } else { /* 9, 15, 21 */
2174 *pdiv = bestdiv / 3;
2181 static void cnl_wrpll_params_populate(struct skl_wrpll_params *params,
2182 u32 dco_freq, u32 ref_freq,
2183 int pdiv, int qdiv, int kdiv)
2198 WARN(1, "Incorrect KDiv\n");
2215 WARN(1, "Incorrect PDiv\n");
2218 WARN_ON(kdiv != 2 && qdiv != 1);
2220 params->qdiv_ratio = qdiv;
2221 params->qdiv_mode = (qdiv == 1) ? 0 : 1;
2223 dco = div_u64((u64)dco_freq << 15, ref_freq);
2225 params->dco_integer = dco >> 15;
2226 params->dco_fraction = dco & 0x7fff;
2229 int cnl_hdmi_pll_ref_clock(struct drm_i915_private *dev_priv)
2231 int ref_clock = dev_priv->cdclk.hw.ref;
2234 * For ICL+, the spec states: if reference frequency is 38.4,
2235 * use 19.2 because the DPLL automatically divides that by 2.
2237 if (INTEL_GEN(dev_priv) >= 11 && ref_clock == 38400)
2244 cnl_ddi_calculate_wrpll(int clock,
2245 struct drm_i915_private *dev_priv,
2246 struct skl_wrpll_params *wrpll_params)
2248 u32 afe_clock = clock * 5;
2250 u32 dco_min = 7998000;
2251 u32 dco_max = 10000000;
2252 u32 dco_mid = (dco_min + dco_max) / 2;
2253 static const int dividers[] = { 2, 4, 6, 8, 10, 12, 14, 16,
2254 18, 20, 24, 28, 30, 32, 36, 40,
2255 42, 44, 48, 50, 52, 54, 56, 60,
2256 64, 66, 68, 70, 72, 76, 78, 80,
2257 84, 88, 90, 92, 96, 98, 100, 102,
2258 3, 5, 7, 9, 15, 21 };
2259 u32 dco, best_dco = 0, dco_centrality = 0;
2260 u32 best_dco_centrality = U32_MAX; /* Spec meaning of 999999 MHz */
2261 int d, best_div = 0, pdiv = 0, qdiv = 0, kdiv = 0;
2263 for (d = 0; d < ARRAY_SIZE(dividers); d++) {
2264 dco = afe_clock * dividers[d];
2266 if ((dco <= dco_max) && (dco >= dco_min)) {
2267 dco_centrality = abs(dco - dco_mid);
2269 if (dco_centrality < best_dco_centrality) {
2270 best_dco_centrality = dco_centrality;
2271 best_div = dividers[d];
2280 cnl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
2282 ref_clock = cnl_hdmi_pll_ref_clock(dev_priv);
2284 cnl_wrpll_params_populate(wrpll_params, best_dco, ref_clock, pdiv, qdiv,
2290 static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc *crtc,
2291 struct intel_crtc_state *crtc_state,
2294 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2296 struct skl_wrpll_params wrpll_params = { 0, };
2298 cfgcr0 = DPLL_CFGCR0_HDMI_MODE;
2300 if (!cnl_ddi_calculate_wrpll(clock, dev_priv, &wrpll_params))
2303 cfgcr0 |= DPLL_CFGCR0_DCO_FRACTION(wrpll_params.dco_fraction) |
2304 wrpll_params.dco_integer;
2306 cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(wrpll_params.qdiv_ratio) |
2307 DPLL_CFGCR1_QDIV_MODE(wrpll_params.qdiv_mode) |
2308 DPLL_CFGCR1_KDIV(wrpll_params.kdiv) |
2309 DPLL_CFGCR1_PDIV(wrpll_params.pdiv) |
2310 DPLL_CFGCR1_CENTRAL_FREQ;
2312 memset(&crtc_state->dpll_hw_state, 0,
2313 sizeof(crtc_state->dpll_hw_state));
2315 crtc_state->dpll_hw_state.cfgcr0 = cfgcr0;
2316 crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
2321 cnl_ddi_dp_set_dpll_hw_state(int clock,
2322 struct intel_dpll_hw_state *dpll_hw_state)
2326 cfgcr0 = DPLL_CFGCR0_SSC_ENABLE;
2328 switch (clock / 2) {
2330 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_810;
2333 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1350;
2336 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_2700;
2340 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1620;
2343 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1080;
2346 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_2160;
2349 /* Some SKUs may require elevated I/O voltage to support this */
2350 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_3240;
2353 /* Some SKUs may require elevated I/O voltage to support this */
2354 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_4050;
2358 dpll_hw_state->cfgcr0 = cfgcr0;
2362 static struct intel_shared_dpll *
2363 cnl_get_dpll(struct intel_crtc_state *crtc_state,
2364 struct intel_encoder *encoder)
2366 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
2367 struct intel_shared_dpll *pll;
2368 int clock = crtc_state->port_clock;
2370 struct intel_dpll_hw_state dpll_hw_state;
2372 memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
2374 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
2375 bret = cnl_ddi_hdmi_pll_dividers(crtc, crtc_state, clock);
2377 DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n");
2380 } else if (intel_crtc_has_dp_encoder(crtc_state)) {
2381 bret = cnl_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state);
2383 DRM_DEBUG_KMS("Could not set DP dpll HW state.\n");
2386 crtc_state->dpll_hw_state = dpll_hw_state;
2388 DRM_DEBUG_KMS("Skip DPLL setup for output_types 0x%x\n",
2389 crtc_state->output_types);
2393 pll = intel_find_shared_dpll(crtc_state,
2397 DRM_DEBUG_KMS("No PLL selected\n");
2401 intel_reference_shared_dpll(pll, crtc_state);
2406 static void cnl_dump_hw_state(struct drm_i915_private *dev_priv,
2407 struct intel_dpll_hw_state *hw_state)
2409 DRM_DEBUG_KMS("dpll_hw_state: "
2410 "cfgcr0: 0x%x, cfgcr1: 0x%x\n",
2415 static const struct intel_shared_dpll_funcs cnl_ddi_pll_funcs = {
2416 .enable = cnl_ddi_pll_enable,
2417 .disable = cnl_ddi_pll_disable,
2418 .get_hw_state = cnl_ddi_pll_get_hw_state,
2421 static const struct dpll_info cnl_plls[] = {
2422 { "DPLL 0", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
2423 { "DPLL 1", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
2424 { "DPLL 2", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
2428 static const struct intel_dpll_mgr cnl_pll_mgr = {
2429 .dpll_info = cnl_plls,
2430 .get_dpll = cnl_get_dpll,
2431 .dump_hw_state = cnl_dump_hw_state,
2435 * These values alrea already adjusted: they're the bits we write to the
2436 * registers, not the logical values.
2438 static const struct skl_wrpll_params icl_dp_combo_pll_24MHz_values[] = {
2439 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [0]: 5.4 */
2440 .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0},
2441 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [1]: 2.7 */
2442 .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0},
2443 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [2]: 1.62 */
2444 .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0},
2445 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [3]: 3.24 */
2446 .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0},
2447 { .dco_integer = 0x168, .dco_fraction = 0x0000, /* [4]: 2.16 */
2448 .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2},
2449 { .dco_integer = 0x168, .dco_fraction = 0x0000, /* [5]: 4.32 */
2450 .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0},
2451 { .dco_integer = 0x195, .dco_fraction = 0x0000, /* [6]: 6.48 */
2452 .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0},
2453 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [7]: 8.1 */
2454 .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0},
2457 /* Also used for 38.4 MHz values. */
2458 static const struct skl_wrpll_params icl_dp_combo_pll_19_2MHz_values[] = {
2459 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [0]: 5.4 */
2460 .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0},
2461 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [1]: 2.7 */
2462 .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0},
2463 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [2]: 1.62 */
2464 .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0},
2465 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [3]: 3.24 */
2466 .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0},
2467 { .dco_integer = 0x1C2, .dco_fraction = 0x0000, /* [4]: 2.16 */
2468 .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2},
2469 { .dco_integer = 0x1C2, .dco_fraction = 0x0000, /* [5]: 4.32 */
2470 .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0},
2471 { .dco_integer = 0x1FA, .dco_fraction = 0x2000, /* [6]: 6.48 */
2472 .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0},
2473 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [7]: 8.1 */
2474 .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0},
2477 static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = {
2478 .dco_integer = 0x151, .dco_fraction = 0x4000,
2479 .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2482 static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
2483 .dco_integer = 0x1A5, .dco_fraction = 0x7000,
2484 .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2487 static bool icl_calc_dp_combo_pll(struct drm_i915_private *dev_priv, int clock,
2488 struct skl_wrpll_params *pll_params)
2490 const struct skl_wrpll_params *params;
2492 params = dev_priv->cdclk.hw.ref == 24000 ?
2493 icl_dp_combo_pll_24MHz_values :
2494 icl_dp_combo_pll_19_2MHz_values;
2498 *pll_params = params[0];
2501 *pll_params = params[1];
2504 *pll_params = params[2];
2507 *pll_params = params[3];
2510 *pll_params = params[4];
2513 *pll_params = params[5];
2516 *pll_params = params[6];
2519 *pll_params = params[7];
2522 MISSING_CASE(clock);
2529 static bool icl_calc_tbt_pll(struct drm_i915_private *dev_priv, int clock,
2530 struct skl_wrpll_params *pll_params)
2532 *pll_params = dev_priv->cdclk.hw.ref == 24000 ?
2533 icl_tbt_pll_24MHz_values : icl_tbt_pll_19_2MHz_values;
2537 static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
2538 struct intel_encoder *encoder, int clock,
2539 struct intel_dpll_hw_state *pll_state)
2541 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2543 struct skl_wrpll_params pll_params = { 0 };
2546 if (intel_port_is_tc(dev_priv, encoder->port))
2547 ret = icl_calc_tbt_pll(dev_priv, clock, &pll_params);
2548 else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
2549 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
2550 ret = cnl_ddi_calculate_wrpll(clock, dev_priv, &pll_params);
2552 ret = icl_calc_dp_combo_pll(dev_priv, clock, &pll_params);
2557 cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(pll_params.dco_fraction) |
2558 pll_params.dco_integer;
2560 cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params.qdiv_ratio) |
2561 DPLL_CFGCR1_QDIV_MODE(pll_params.qdiv_mode) |
2562 DPLL_CFGCR1_KDIV(pll_params.kdiv) |
2563 DPLL_CFGCR1_PDIV(pll_params.pdiv) |
2564 DPLL_CFGCR1_CENTRAL_FREQ_8400;
2566 pll_state->cfgcr0 = cfgcr0;
2567 pll_state->cfgcr1 = cfgcr1;
2571 int icl_calc_dp_combo_pll_link(struct drm_i915_private *dev_priv,
2575 u32 pdiv, kdiv, qdiv_mode, qdiv_ratio, dco_integer, dco_fraction;
2576 const struct skl_wrpll_params *params;
2577 int index, n_entries, link_clock;
2579 /* Read back values from DPLL CFGCR registers */
2580 cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(pll_id));
2581 cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(pll_id));
2583 dco_integer = cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK;
2584 dco_fraction = (cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
2585 DPLL_CFGCR0_DCO_FRACTION_SHIFT;
2586 pdiv = (cfgcr1 & DPLL_CFGCR1_PDIV_MASK) >> DPLL_CFGCR1_PDIV_SHIFT;
2587 kdiv = (cfgcr1 & DPLL_CFGCR1_KDIV_MASK) >> DPLL_CFGCR1_KDIV_SHIFT;
2588 qdiv_mode = (cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1)) >>
2589 DPLL_CFGCR1_QDIV_MODE_SHIFT;
2590 qdiv_ratio = (cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
2591 DPLL_CFGCR1_QDIV_RATIO_SHIFT;
2593 params = dev_priv->cdclk.hw.ref == 24000 ?
2594 icl_dp_combo_pll_24MHz_values :
2595 icl_dp_combo_pll_19_2MHz_values;
2596 n_entries = ARRAY_SIZE(icl_dp_combo_pll_24MHz_values);
2598 for (index = 0; index < n_entries; index++) {
2599 if (dco_integer == params[index].dco_integer &&
2600 dco_fraction == params[index].dco_fraction &&
2601 pdiv == params[index].pdiv &&
2602 kdiv == params[index].kdiv &&
2603 qdiv_mode == params[index].qdiv_mode &&
2604 qdiv_ratio == params[index].qdiv_ratio)
2608 /* Map PLL Index to Link Clock */
2611 MISSING_CASE(index);
2614 link_clock = 540000;
2617 link_clock = 270000;
2620 link_clock = 162000;
2623 link_clock = 324000;
2626 link_clock = 216000;
2629 link_clock = 432000;
2632 link_clock = 648000;
2635 link_clock = 810000;
2642 static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
2644 return id - DPLL_ID_ICL_MGPLL1;
2647 enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
2649 return tc_port + DPLL_ID_ICL_MGPLL1;
2652 static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
2653 u32 *target_dco_khz,
2654 struct intel_dpll_hw_state *state)
2656 u32 dco_min_freq, dco_max_freq;
2657 int div1_vals[] = {7, 5, 3, 2};
2661 dco_min_freq = is_dp ? 8100000 : use_ssc ? 8000000 : 7992000;
2662 dco_max_freq = is_dp ? 8100000 : 10000000;
2664 for (i = 0; i < ARRAY_SIZE(div1_vals); i++) {
2665 int div1 = div1_vals[i];
2667 for (div2 = 10; div2 > 0; div2--) {
2668 int dco = div1 * div2 * clock_khz * 5;
2669 int a_divratio, tlinedrv, inputsel;
2672 if (dco < dco_min_freq || dco > dco_max_freq)
2676 a_divratio = is_dp ? 10 : 5;
2682 inputsel = is_dp ? 0 : 1;
2689 hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2;
2692 hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3;
2695 hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5;
2698 hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7;
2702 *target_dco_khz = dco;
2704 state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1);
2706 state->mg_clktop2_coreclkctl1 =
2707 MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(a_divratio);
2709 state->mg_clktop2_hsclkctl =
2710 MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) |
2711 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) |
2713 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2);
2723 * The specification for this function uses real numbers, so the math had to be
2724 * adapted to integer-only calculation, that's why it looks so different.
2726 static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
2727 struct intel_encoder *encoder, int clock,
2728 struct intel_dpll_hw_state *pll_state)
2730 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2731 int refclk_khz = dev_priv->cdclk.hw.ref;
2732 u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
2733 u32 iref_ndiv, iref_trim, iref_pulse_w;
2734 u32 prop_coeff, int_coeff;
2735 u32 tdc_targetcnt, feedfwgain;
2736 u64 ssc_stepsize, ssc_steplen, ssc_steplog;
2738 bool use_ssc = false;
2739 bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
2741 if (!icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
2743 DRM_DEBUG_KMS("Failed to find divisors for clock %d\n", clock);
2748 m2div_int = dco_khz / (refclk_khz * m1div);
2749 if (m2div_int > 255) {
2751 m2div_int = dco_khz / (refclk_khz * m1div);
2752 if (m2div_int > 255) {
2753 DRM_DEBUG_KMS("Failed to find mdiv for clock %d\n",
2758 m2div_rem = dco_khz % (refclk_khz * m1div);
2760 tmp = (u64)m2div_rem * (1 << 22);
2761 do_div(tmp, refclk_khz * m1div);
2764 switch (refclk_khz) {
2781 MISSING_CASE(refclk_khz);
2786 * tdc_res = 0.000003
2787 * tdc_targetcnt = int(2 / (tdc_res * 8 * 50 * 1.1) / refclk_mhz + 0.5)
2789 * The multiplication by 1000 is due to refclk MHz to KHz conversion. It
2790 * was supposed to be a division, but we rearranged the operations of
2791 * the formula to avoid early divisions so we don't multiply the
2794 * 0.000003 * 8 * 50 * 1.1 = 0.00132, also known as 132 / 100000, which
2795 * we also rearrange to work with integers.
2797 * The 0.5 transformed to 5 results in a multiplication by 10 and the
2798 * last division by 10.
2800 tdc_targetcnt = (2 * 1000 * 100000 * 10 / (132 * refclk_khz) + 5) / 10;
2803 * Here we divide dco_khz by 10 in order to allow the dividend to fit in
2804 * 32 bits. That's not a problem since we round the division down
2807 feedfwgain = (use_ssc || m2div_rem > 0) ?
2808 m1div * 1000000 * 100 / (dco_khz * 3 / 10) : 0;
2810 if (dco_khz >= 9000000) {
2819 tmp = (u64)dco_khz * 47 * 32;
2820 do_div(tmp, refclk_khz * m1div * 10000);
2823 tmp = (u64)dco_khz * 1000;
2824 ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32);
2831 pll_state->mg_pll_div0 = (m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
2832 MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
2833 MG_PLL_DIV0_FBDIV_INT(m2div_int);
2835 pll_state->mg_pll_div1 = MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
2836 MG_PLL_DIV1_DITHER_DIV_2 |
2837 MG_PLL_DIV1_NDIVRATIO(1) |
2838 MG_PLL_DIV1_FBPREDIV(m1div);
2840 pll_state->mg_pll_lf = MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
2841 MG_PLL_LF_AFCCNTSEL_512 |
2842 MG_PLL_LF_GAINCTRL(1) |
2843 MG_PLL_LF_INT_COEFF(int_coeff) |
2844 MG_PLL_LF_PROP_COEFF(prop_coeff);
2846 pll_state->mg_pll_frac_lock = MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
2847 MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
2848 MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
2849 MG_PLL_FRAC_LOCK_DCODITHEREN |
2850 MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
2851 if (use_ssc || m2div_rem > 0)
2852 pll_state->mg_pll_frac_lock |= MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
2854 pll_state->mg_pll_ssc = (use_ssc ? MG_PLL_SSC_EN : 0) |
2855 MG_PLL_SSC_TYPE(2) |
2856 MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
2857 MG_PLL_SSC_STEPNUM(ssc_steplog) |
2859 MG_PLL_SSC_STEPSIZE(ssc_stepsize);
2861 pll_state->mg_pll_tdc_coldst_bias = MG_PLL_TDC_COLDST_COLDSTART |
2862 MG_PLL_TDC_COLDST_IREFINT_EN |
2863 MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
2864 MG_PLL_TDC_TDCOVCCORR_EN |
2865 MG_PLL_TDC_TDCSEL(3);
2867 pll_state->mg_pll_bias = MG_PLL_BIAS_BIAS_GB_SEL(3) |
2868 MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
2869 MG_PLL_BIAS_BIAS_BONUS(10) |
2870 MG_PLL_BIAS_BIASCAL_EN |
2871 MG_PLL_BIAS_CTRIM(12) |
2872 MG_PLL_BIAS_VREF_RDAC(4) |
2873 MG_PLL_BIAS_IREFTRIM(iref_trim);
2875 if (refclk_khz == 38400) {
2876 pll_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
2877 pll_state->mg_pll_bias_mask = 0;
2879 pll_state->mg_pll_tdc_coldst_bias_mask = -1U;
2880 pll_state->mg_pll_bias_mask = -1U;
2883 pll_state->mg_pll_tdc_coldst_bias &= pll_state->mg_pll_tdc_coldst_bias_mask;
2884 pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask;
2889 static struct intel_shared_dpll *
2890 icl_get_dpll(struct intel_crtc_state *crtc_state,
2891 struct intel_encoder *encoder)
2893 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
2894 struct intel_digital_port *intel_dig_port;
2895 struct intel_shared_dpll *pll;
2896 struct intel_dpll_hw_state pll_state = {};
2897 enum port port = encoder->port;
2898 enum intel_dpll_id min, max;
2899 int clock = crtc_state->port_clock;
2902 if (intel_port_is_combophy(dev_priv, port)) {
2903 min = DPLL_ID_ICL_DPLL0;
2904 max = DPLL_ID_ICL_DPLL1;
2905 ret = icl_calc_dpll_state(crtc_state, encoder, clock,
2907 } else if (intel_port_is_tc(dev_priv, port)) {
2908 if (encoder->type == INTEL_OUTPUT_DP_MST) {
2909 struct intel_dp_mst_encoder *mst_encoder;
2911 mst_encoder = enc_to_mst(&encoder->base);
2912 intel_dig_port = mst_encoder->primary;
2914 intel_dig_port = enc_to_dig_port(&encoder->base);
2917 if (intel_dig_port->tc_type == TC_PORT_TBT) {
2918 min = DPLL_ID_ICL_TBTPLL;
2920 ret = icl_calc_dpll_state(crtc_state, encoder, clock,
2923 enum tc_port tc_port;
2925 tc_port = intel_port_to_tc(dev_priv, port);
2926 min = icl_tc_port_to_pll_id(tc_port);
2928 ret = icl_calc_mg_pll_state(crtc_state, encoder, clock,
2937 DRM_DEBUG_KMS("Could not calculate PLL state.\n");
2941 crtc_state->dpll_hw_state = pll_state;
2943 pll = intel_find_shared_dpll(crtc_state, min, max);
2945 DRM_DEBUG_KMS("No PLL selected\n");
2949 intel_reference_shared_dpll(pll, crtc_state);
2954 static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv,
2955 struct intel_shared_dpll *pll,
2956 struct intel_dpll_hw_state *hw_state)
2958 const enum intel_dpll_id id = pll->info->id;
2959 enum tc_port tc_port = icl_pll_id_to_tc_port(id);
2960 intel_wakeref_t wakeref;
2964 wakeref = intel_display_power_get_if_enabled(dev_priv,
2969 val = I915_READ(MG_PLL_ENABLE(tc_port));
2970 if (!(val & PLL_ENABLE))
2973 hw_state->mg_refclkin_ctl = I915_READ(MG_REFCLKIN_CTL(tc_port));
2974 hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
2976 hw_state->mg_clktop2_coreclkctl1 =
2977 I915_READ(MG_CLKTOP2_CORECLKCTL1(tc_port));
2978 hw_state->mg_clktop2_coreclkctl1 &=
2979 MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
2981 hw_state->mg_clktop2_hsclkctl =
2982 I915_READ(MG_CLKTOP2_HSCLKCTL(tc_port));
2983 hw_state->mg_clktop2_hsclkctl &=
2984 MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
2985 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
2986 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
2987 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
2989 hw_state->mg_pll_div0 = I915_READ(MG_PLL_DIV0(tc_port));
2990 hw_state->mg_pll_div1 = I915_READ(MG_PLL_DIV1(tc_port));
2991 hw_state->mg_pll_lf = I915_READ(MG_PLL_LF(tc_port));
2992 hw_state->mg_pll_frac_lock = I915_READ(MG_PLL_FRAC_LOCK(tc_port));
2993 hw_state->mg_pll_ssc = I915_READ(MG_PLL_SSC(tc_port));
2995 hw_state->mg_pll_bias = I915_READ(MG_PLL_BIAS(tc_port));
2996 hw_state->mg_pll_tdc_coldst_bias =
2997 I915_READ(MG_PLL_TDC_COLDST_BIAS(tc_port));
2999 if (dev_priv->cdclk.hw.ref == 38400) {
3000 hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
3001 hw_state->mg_pll_bias_mask = 0;
3003 hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
3004 hw_state->mg_pll_bias_mask = -1U;
3007 hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
3008 hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
3012 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
3016 static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3017 struct intel_shared_dpll *pll,
3018 struct intel_dpll_hw_state *hw_state,
3019 i915_reg_t enable_reg)
3021 const enum intel_dpll_id id = pll->info->id;
3022 intel_wakeref_t wakeref;
3026 wakeref = intel_display_power_get_if_enabled(dev_priv,
3031 val = I915_READ(enable_reg);
3032 if (!(val & PLL_ENABLE))
3035 hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(id));
3036 hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(id));
3040 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
3044 static bool combo_pll_get_hw_state(struct drm_i915_private *dev_priv,
3045 struct intel_shared_dpll *pll,
3046 struct intel_dpll_hw_state *hw_state)
3048 return icl_pll_get_hw_state(dev_priv, pll, hw_state,
3049 CNL_DPLL_ENABLE(pll->info->id));
3052 static bool tbt_pll_get_hw_state(struct drm_i915_private *dev_priv,
3053 struct intel_shared_dpll *pll,
3054 struct intel_dpll_hw_state *hw_state)
3056 return icl_pll_get_hw_state(dev_priv, pll, hw_state, TBT_PLL_ENABLE);
3059 static void icl_dpll_write(struct drm_i915_private *dev_priv,
3060 struct intel_shared_dpll *pll)
3062 struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3063 const enum intel_dpll_id id = pll->info->id;
3065 I915_WRITE(ICL_DPLL_CFGCR0(id), hw_state->cfgcr0);
3066 I915_WRITE(ICL_DPLL_CFGCR1(id), hw_state->cfgcr1);
3067 POSTING_READ(ICL_DPLL_CFGCR1(id));
3070 static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
3071 struct intel_shared_dpll *pll)
3073 struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3074 enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3078 * Some of the following registers have reserved fields, so program
3079 * these with RMW based on a mask. The mask can be fixed or generated
3080 * during the calc/readout phase if the mask depends on some other HW
3081 * state like refclk, see icl_calc_mg_pll_state().
3083 val = I915_READ(MG_REFCLKIN_CTL(tc_port));
3084 val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3085 val |= hw_state->mg_refclkin_ctl;
3086 I915_WRITE(MG_REFCLKIN_CTL(tc_port), val);
3088 val = I915_READ(MG_CLKTOP2_CORECLKCTL1(tc_port));
3089 val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3090 val |= hw_state->mg_clktop2_coreclkctl1;
3091 I915_WRITE(MG_CLKTOP2_CORECLKCTL1(tc_port), val);
3093 val = I915_READ(MG_CLKTOP2_HSCLKCTL(tc_port));
3094 val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3095 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3096 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3097 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3098 val |= hw_state->mg_clktop2_hsclkctl;
3099 I915_WRITE(MG_CLKTOP2_HSCLKCTL(tc_port), val);
3101 I915_WRITE(MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
3102 I915_WRITE(MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
3103 I915_WRITE(MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
3104 I915_WRITE(MG_PLL_FRAC_LOCK(tc_port), hw_state->mg_pll_frac_lock);
3105 I915_WRITE(MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
3107 val = I915_READ(MG_PLL_BIAS(tc_port));
3108 val &= ~hw_state->mg_pll_bias_mask;
3109 val |= hw_state->mg_pll_bias;
3110 I915_WRITE(MG_PLL_BIAS(tc_port), val);
3112 val = I915_READ(MG_PLL_TDC_COLDST_BIAS(tc_port));
3113 val &= ~hw_state->mg_pll_tdc_coldst_bias_mask;
3114 val |= hw_state->mg_pll_tdc_coldst_bias;
3115 I915_WRITE(MG_PLL_TDC_COLDST_BIAS(tc_port), val);
3117 POSTING_READ(MG_PLL_TDC_COLDST_BIAS(tc_port));
3120 static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
3121 struct intel_shared_dpll *pll,
3122 i915_reg_t enable_reg)
3126 val = I915_READ(enable_reg);
3127 val |= PLL_POWER_ENABLE;
3128 I915_WRITE(enable_reg, val);
3131 * The spec says we need to "wait" but it also says it should be
3134 if (intel_wait_for_register(dev_priv, enable_reg, PLL_POWER_STATE,
3135 PLL_POWER_STATE, 1))
3136 DRM_ERROR("PLL %d Power not enabled\n", pll->info->id);
3139 static void icl_pll_enable(struct drm_i915_private *dev_priv,
3140 struct intel_shared_dpll *pll,
3141 i915_reg_t enable_reg)
3145 val = I915_READ(enable_reg);
3147 I915_WRITE(enable_reg, val);
3149 /* Timeout is actually 600us. */
3150 if (intel_wait_for_register(dev_priv, enable_reg, PLL_LOCK, PLL_LOCK,
3152 DRM_ERROR("PLL %d not locked\n", pll->info->id);
3155 static void combo_pll_enable(struct drm_i915_private *dev_priv,
3156 struct intel_shared_dpll *pll)
3158 i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id);
3160 icl_pll_power_enable(dev_priv, pll, enable_reg);
3162 icl_dpll_write(dev_priv, pll);
3165 * DVFS pre sequence would be here, but in our driver the cdclk code
3166 * paths should already be setting the appropriate voltage, hence we do
3170 icl_pll_enable(dev_priv, pll, enable_reg);
3172 /* DVFS post sequence would be here. See the comment above. */
3175 static void tbt_pll_enable(struct drm_i915_private *dev_priv,
3176 struct intel_shared_dpll *pll)
3178 icl_pll_power_enable(dev_priv, pll, TBT_PLL_ENABLE);
3180 icl_dpll_write(dev_priv, pll);
3183 * DVFS pre sequence would be here, but in our driver the cdclk code
3184 * paths should already be setting the appropriate voltage, hence we do
3188 icl_pll_enable(dev_priv, pll, TBT_PLL_ENABLE);
3190 /* DVFS post sequence would be here. See the comment above. */
3193 static void mg_pll_enable(struct drm_i915_private *dev_priv,
3194 struct intel_shared_dpll *pll)
3196 i915_reg_t enable_reg =
3197 MG_PLL_ENABLE(icl_pll_id_to_tc_port(pll->info->id));
3199 icl_pll_power_enable(dev_priv, pll, enable_reg);
3201 icl_mg_pll_write(dev_priv, pll);
3204 * DVFS pre sequence would be here, but in our driver the cdclk code
3205 * paths should already be setting the appropriate voltage, hence we do
3209 icl_pll_enable(dev_priv, pll, enable_reg);
3211 /* DVFS post sequence would be here. See the comment above. */
3214 static void icl_pll_disable(struct drm_i915_private *dev_priv,
3215 struct intel_shared_dpll *pll,
3216 i915_reg_t enable_reg)
3220 /* The first steps are done by intel_ddi_post_disable(). */
3223 * DVFS pre sequence would be here, but in our driver the cdclk code
3224 * paths should already be setting the appropriate voltage, hence we do
3228 val = I915_READ(enable_reg);
3230 I915_WRITE(enable_reg, val);
3232 /* Timeout is actually 1us. */
3233 if (intel_wait_for_register(dev_priv, enable_reg, PLL_LOCK, 0, 1))
3234 DRM_ERROR("PLL %d locked\n", pll->info->id);
3236 /* DVFS post sequence would be here. See the comment above. */
3238 val = I915_READ(enable_reg);
3239 val &= ~PLL_POWER_ENABLE;
3240 I915_WRITE(enable_reg, val);
3243 * The spec says we need to "wait" but it also says it should be
3246 if (intel_wait_for_register(dev_priv, enable_reg, PLL_POWER_STATE, 0,
3248 DRM_ERROR("PLL %d Power not disabled\n", pll->info->id);
3251 static void combo_pll_disable(struct drm_i915_private *dev_priv,
3252 struct intel_shared_dpll *pll)
3254 icl_pll_disable(dev_priv, pll, CNL_DPLL_ENABLE(pll->info->id));
3257 static void tbt_pll_disable(struct drm_i915_private *dev_priv,
3258 struct intel_shared_dpll *pll)
3260 icl_pll_disable(dev_priv, pll, TBT_PLL_ENABLE);
3263 static void mg_pll_disable(struct drm_i915_private *dev_priv,
3264 struct intel_shared_dpll *pll)
3266 i915_reg_t enable_reg =
3267 MG_PLL_ENABLE(icl_pll_id_to_tc_port(pll->info->id));
3269 icl_pll_disable(dev_priv, pll, enable_reg);
3272 static void icl_dump_hw_state(struct drm_i915_private *dev_priv,
3273 struct intel_dpll_hw_state *hw_state)
3275 DRM_DEBUG_KMS("dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, "
3276 "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
3277 "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
3278 "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
3279 "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
3280 "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
3281 hw_state->cfgcr0, hw_state->cfgcr1,
3282 hw_state->mg_refclkin_ctl,
3283 hw_state->mg_clktop2_coreclkctl1,
3284 hw_state->mg_clktop2_hsclkctl,
3285 hw_state->mg_pll_div0,
3286 hw_state->mg_pll_div1,
3287 hw_state->mg_pll_lf,
3288 hw_state->mg_pll_frac_lock,
3289 hw_state->mg_pll_ssc,
3290 hw_state->mg_pll_bias,
3291 hw_state->mg_pll_tdc_coldst_bias);
3294 static const struct intel_shared_dpll_funcs combo_pll_funcs = {
3295 .enable = combo_pll_enable,
3296 .disable = combo_pll_disable,
3297 .get_hw_state = combo_pll_get_hw_state,
3300 static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
3301 .enable = tbt_pll_enable,
3302 .disable = tbt_pll_disable,
3303 .get_hw_state = tbt_pll_get_hw_state,
3306 static const struct intel_shared_dpll_funcs mg_pll_funcs = {
3307 .enable = mg_pll_enable,
3308 .disable = mg_pll_disable,
3309 .get_hw_state = mg_pll_get_hw_state,
3312 static const struct dpll_info icl_plls[] = {
3313 { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
3314 { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
3315 { "TBT PLL", &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
3316 { "MG PLL 1", &mg_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
3317 { "MG PLL 2", &mg_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
3318 { "MG PLL 3", &mg_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
3319 { "MG PLL 4", &mg_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
3323 static const struct intel_dpll_mgr icl_pll_mgr = {
3324 .dpll_info = icl_plls,
3325 .get_dpll = icl_get_dpll,
3326 .dump_hw_state = icl_dump_hw_state,
3330 * intel_shared_dpll_init - Initialize shared DPLLs
3333 * Initialize shared DPLLs for @dev.
3335 void intel_shared_dpll_init(struct drm_device *dev)
3337 struct drm_i915_private *dev_priv = to_i915(dev);
3338 const struct intel_dpll_mgr *dpll_mgr = NULL;
3339 const struct dpll_info *dpll_info;
3342 if (INTEL_GEN(dev_priv) >= 11)
3343 dpll_mgr = &icl_pll_mgr;
3344 else if (IS_CANNONLAKE(dev_priv))
3345 dpll_mgr = &cnl_pll_mgr;
3346 else if (IS_GEN9_BC(dev_priv))
3347 dpll_mgr = &skl_pll_mgr;
3348 else if (IS_GEN9_LP(dev_priv))
3349 dpll_mgr = &bxt_pll_mgr;
3350 else if (HAS_DDI(dev_priv))
3351 dpll_mgr = &hsw_pll_mgr;
3352 else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
3353 dpll_mgr = &pch_pll_mgr;
3356 dev_priv->num_shared_dpll = 0;
3360 dpll_info = dpll_mgr->dpll_info;
3362 for (i = 0; dpll_info[i].name; i++) {
3363 WARN_ON(i != dpll_info[i].id);
3364 dev_priv->shared_dplls[i].info = &dpll_info[i];
3367 dev_priv->dpll_mgr = dpll_mgr;
3368 dev_priv->num_shared_dpll = i;
3369 mutex_init(&dev_priv->dpll_lock);
3371 BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
3373 /* FIXME: Move this to a more suitable place */
3374 if (HAS_DDI(dev_priv))
3375 intel_ddi_pll_init(dev);
3379 * intel_get_shared_dpll - get a shared DPLL for CRTC and encoder combination
3380 * @crtc_state: atomic state for the crtc
3383 * Find an appropriate DPLL for the given CRTC and encoder combination. A
3384 * reference from the @crtc_state to the returned pll is registered in the
3385 * atomic state. That configuration is made effective by calling
3386 * intel_shared_dpll_swap_state(). The reference should be released by calling
3387 * intel_release_shared_dpll().
3390 * A shared DPLL to be used by @crtc_state and @encoder.
3392 struct intel_shared_dpll *
3393 intel_get_shared_dpll(struct intel_crtc_state *crtc_state,
3394 struct intel_encoder *encoder)
3396 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
3397 const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr;
3399 if (WARN_ON(!dpll_mgr))
3402 return dpll_mgr->get_dpll(crtc_state, encoder);
3406 * intel_release_shared_dpll - end use of DPLL by CRTC in atomic state
3407 * @dpll: dpll in use by @crtc
3409 * @state: atomic state
3411 * This function releases the reference from @crtc to @dpll from the
3412 * atomic @state. The new configuration is made effective by calling
3413 * intel_shared_dpll_swap_state().
3415 void intel_release_shared_dpll(struct intel_shared_dpll *dpll,
3416 struct intel_crtc *crtc,
3417 struct drm_atomic_state *state)
3419 struct intel_shared_dpll_state *shared_dpll_state;
3421 shared_dpll_state = intel_atomic_get_shared_dpll_state(state);
3422 shared_dpll_state[dpll->info->id].crtc_mask &= ~(1 << crtc->pipe);
3426 * intel_shared_dpll_dump_hw_state - write hw_state to dmesg
3427 * @dev_priv: i915 drm device
3428 * @hw_state: hw state to be written to the log
3430 * Write the relevant values in @hw_state to dmesg using DRM_DEBUG_KMS.
3432 void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
3433 struct intel_dpll_hw_state *hw_state)
3435 if (dev_priv->dpll_mgr) {
3436 dev_priv->dpll_mgr->dump_hw_state(dev_priv, hw_state);
3438 /* fallback for platforms that don't use the shared dpll
3441 DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
3442 "fp0: 0x%x, fp1: 0x%x\n",